text
stringlengths 2
100k
| meta
dict |
---|---|
/*
* JBoss, Home of Professional Open Source
* Copyright 2011, Red Hat, Inc. and individual contributors
* by the @authors tag. See the copyright.txt in the distribution for a
* full listing of individual contributors.
*
* This is free software; you can redistribute it and/or modify it
* under the terms of the GNU Lesser General Public License as
* published by the Free Software Foundation; either version 2.1 of
* the License, or (at your option) any later version.
*
* This software is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this software; if not, write to the Free
* Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
* 02110-1301 USA, or see the FSF site: http://www.fsf.org.
*/
package org.restcomm.media.client.mgcp.parser.params;
import java.text.ParseException;
import org.restcomm.media.client.mgcp.parser.StringFunctions;
import jain.protocol.ip.mgcp.message.parms.EncryptionMethod;
public class EncryptionMethodHandler
{
private static final byte[] CLEAR_VALUE=new byte[] {
StringFunctions.LOW_C_BYTE,StringFunctions.LOW_L_BYTE,StringFunctions.LOW_E_BYTE,
StringFunctions.LOW_A_BYTE,StringFunctions.LOW_R_BYTE,StringFunctions.COLON_BYTE
};
private static final byte[] BASE64_VALUE=new byte[] {
StringFunctions.LOW_B_BYTE,StringFunctions.LOW_A_BYTE,StringFunctions.LOW_S_BYTE,
StringFunctions.LOW_E_BYTE,StringFunctions.SIX_BYTE,StringFunctions.FOUR_BYTE,StringFunctions.COLON_BYTE
};
private static final byte[] URI_VALUE=new byte[] {
StringFunctions.LOW_U_BYTE,StringFunctions.LOW_R_BYTE,StringFunctions.LOW_I_BYTE,
StringFunctions.COLON_BYTE
};
public static EncryptionMethod decode(byte[] value,int offset,int length) throws ParseException
{
if(length>=URI_VALUE.length && value[offset + URI_VALUE.length-1]==StringFunctions.COLON_BYTE)
{
for(int i=0;i<URI_VALUE.length-1;i++)
if(value[offset+i]!=URI_VALUE[i] && value[offset+i]!=(byte)(URI_VALUE[i]+StringFunctions.CASE_STEP))
throw new ParseException("Invalid value for EncryptionData: " + new String(value,offset,length), 0);
return new EncryptionMethod(EncryptionMethod.URI, new String(value,offset+URI_VALUE.length,length-URI_VALUE.length));
}
else if(length>=CLEAR_VALUE.length && value[offset + CLEAR_VALUE.length-1]==StringFunctions.COLON_BYTE)
{
for(int i=0;i<CLEAR_VALUE.length-1;i++)
if(value[offset+i]!=CLEAR_VALUE[i] && value[offset+i]!=(byte)(CLEAR_VALUE[i]+StringFunctions.CASE_STEP))
throw new ParseException("Invalid value for EncryptionData: " + new String(value,offset,length), 0);
return new EncryptionMethod(EncryptionMethod.CLEAR, new String(value,offset+CLEAR_VALUE.length,length-CLEAR_VALUE.length));
}
else if(length>=BASE64_VALUE.length && value[offset + BASE64_VALUE.length-1]==StringFunctions.COLON_BYTE)
{
for(int i=0;i<BASE64_VALUE.length-1;i++)
if(value[offset+i]!=BASE64_VALUE[i] && value[offset+i]!=(byte)(BASE64_VALUE[i]+StringFunctions.CASE_STEP))
throw new ParseException("Invalid value for EncryptionData: " + new String(value,offset,length), 0);
return new EncryptionMethod(EncryptionMethod.BASE64, new String(value,offset+BASE64_VALUE.length,length-BASE64_VALUE.length));
}
throw new ParseException("Invalid value for EncryptionData: " + new String(value,offset,length), 0);
}
public static int encode(byte[] destination,int offset,EncryptionMethod encryptionMethod)
{
int usedLength=0;
byte[] key=encryptionMethod.getEncryptionKey().getBytes();
switch (encryptionMethod.getEncryptionMethod()) {
case EncryptionMethod.BASE64:
System.arraycopy(BASE64_VALUE, 0, destination, offset, BASE64_VALUE.length);
usedLength=BASE64_VALUE.length;
System.arraycopy(key, 0, destination, offset+usedLength, key.length);
usedLength+=key.length;
break;
case EncryptionMethod.CLEAR:
System.arraycopy(CLEAR_VALUE, 0, destination, offset, CLEAR_VALUE.length);
usedLength=CLEAR_VALUE.length;
System.arraycopy(key, 0, destination, offset+usedLength, key.length);
usedLength+=key.length;
break;
case EncryptionMethod.URI:
System.arraycopy(URI_VALUE, 0, destination, offset, URI_VALUE.length);
usedLength=URI_VALUE.length;
System.arraycopy(key, 0, destination, offset+usedLength, key.length);
usedLength+=key.length;
break;
}
return usedLength;
}
} | {
"pile_set_name": "Github"
} |
{-# LANGUAGE OverloadedStrings #-}
-- |
-- Module : Test.AWS.SQS
-- Copyright : (c) 2013-2018 Brendan Hay
-- License : Mozilla Public License, v. 2.0.
-- Maintainer : Brendan Hay <[email protected]>
-- Stability : experimental
-- Portability : non-portable (GHC extensions)
--
module Test.AWS.SQS
( tests
, fixtures
) where
import qualified Data.HashMap.Strict as Map
import Data.Monoid
import Data.Text (Text)
import Network.AWS.Lens ((&), (.~), (?~))
import Network.AWS.SQS
import Test.AWS.Gen.SQS
import Test.Tasty
tests :: [TestTree]
tests = []
fixtures :: [TestTree]
fixtures =
[ testGroup "request" $
[ requestSendMessage $
sendMessage url "This+is+a+test+message"
& smMessageAttributes .~ Map.fromList
[ ("test_attribute_name_1", messageAttributeValue "String"
& mavStringValue ?~ "test_attribute_value_1")
, ("test_attribute_name_2", messageAttributeValue "String"
& mavStringValue ?~ "test_attribute_value_2")
]
, requestChangeMessageVisibility $
changeMessageVisibility url handle 60
, requestChangeMessageVisibilityBatch $
changeMessageVisibilityBatch url
& cmvbEntries .~
[ changeMessageVisibilityBatchRequestEntry
"change_visibility_msg_2" handle
& cVisibilityTimeout ?~ 45
, changeMessageVisibilityBatchRequestEntry
"change_visibility_msg_3" handle
& cVisibilityTimeout ?~ 45
]
]
, testGroup "response"
[ responseGetQueueURL $ getQueueURLResponse 200 url
, responsePurgeQueue $ purgeQueueResponse
, responseSendMessage $
sendMessageResponse 200
& smrsMessageId ?~ "5fea7756-0ea4-451a-a703-a558b933e274"
& smrsMD5OfMessageBody ?~ "fafb00f5732ab283681e124bf8747ed1"
& smrsMD5OfMessageAttributes ?~ "3ae8f24a165a8cedc005670c81a27295"
, responseReceiveMessage $
receiveMessageResponse 200 & rmrsMessages .~
[ message
& mMessageId ?~ "5fea7756-0ea4-451a-a703-a558b933e274"
& mReceiptHandle ?~ "MbZj6wDWli+JvwwJaBV+3dcjk2YW2vA3+STFFljTM8tJJg6HRG6PYSasuWXPJB+CwLj1FjgXUv1uSj1gUPAWV66FU/WeR4mq2OKpEGYWbnLmpRCJVAyeMjeU5ZBdtcQ+QEauMZc8ZRv37sIW2iJKq3M9MFx1YvV11A2x/KSbkJ0="
& mMD5OfBody ?~ "fafb00f5732ab283681e124bf8747ed1"
& mBody ?~ "This is a test message"
& mMessageAttributes .~ mempty
& mAttributes .~ Map.fromList
[ (SenderId, "195004372649")
, (SentTimestamp, "1238099229000")
, (ApproximateReceiveCount, "5")
, (ApproximateFirstReceiveTimestamp, "1250700979248")
]
]
]
]
url :: Text
url = "http://sqs.us-east-1.amazonaws.com/123456789012/testQueue/"
handle :: Text
handle = "MbZli+JvwwJaBV+3dcjk2W2vA3+STFFljTJg6HYSasuWXPJB/WeR4mq21A2x/KSbkJ0="
| {
"pile_set_name": "Github"
} |
# help.ro.txt - ro GnuPG online help
# Copyright (C) 2007 Free Software Foundation, Inc.
#
# This file is part of GnuPG.
#
# GnuPG is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# GnuPG is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <https://www.gnu.org/licenses/>.
.gpg.edit_ownertrust.value
Este sarcina d-voastră să atribuiţi o valoare aici; această valoare
nu va fi niciodată exportată pentru o terţă parte. Trebuie să
implementăm reţeaua-de-încredere; aceasta nu are nimic în comun cu
certificatele-de-reţea (create implicit).
.
.gpg.edit_ownertrust.set_ultimate.okay
Pentru a construi Reţeaua-de-Încredere, GnuPG trebuie să ştie care chei
au nivel de încredere suprem - acestea de obicei sunt cheile pentru care
aveţi acces la cheia secretă. Răspundeţi "da" pentru a seta
această cheie cu nivel de încredere suprem
.
.gpg.untrusted_key.override
Dacă doriţi oricum să folosiţi această cheie fără încredere, răspundeţi "da".
.
.gpg.pklist.user_id.enter
Introduceţi ID-ul utilizator al destinatarului mesajului.
.
.gpg.keygen.algo
Selectaţi algoritmul de folosit.
DSA (aka DSS) este Digital Signature Algorithm şi poate fi folosit numai
pentru semnături.
Elgamal este un algoritm numai pentru cifrare.
RSA poate fi folosit pentru semnături sau cifrare.
Prima cheie (primară) trebuie să fie întotdeauna o cheie cu care se poate semna.
.
.gpg.keygen.algo.rsa_se
În general nu este o idee bună să folosiţi aceeaşi cheie şi pentru
semnare şi pentru cifrare. Acest algoritm ar trebui folosit numai
în anumite domenii. Vă rugăm consultaţi mai întâi un expert în domeniu.
.
.gpg.keygen.size
Introduceţi lungimea cheii
.
.gpg.keygen.size.huge.okay
Răspundeţi "da" sau "nu"
.
.gpg.keygen.size.large.okay
Răspundeţi "da" sau "nu"
.
.gpg.keygen.valid
Introduceţi valoarea cerută precum a arătat la prompt.
Este posibil să introduceţi o dată ISO (AAAA-LL-ZZ) dar nu veţi
obţine un răspuns de eroare bun - în loc sistemul încearcă să
interpreteze valoare dată ca un interval.
.
.gpg.keygen.valid.okay
Răspundeţi "da" sau "nu"
.
.gpg.keygen.name
Introduceţi numele deţinătorului cheii
.
.gpg.keygen.email
vă rugăm introduceţi o adresă de email (opţională dar recomandată)
.
.gpg.keygen.comment
Vă rugăm introduceţi un comentriu opţional
.
.gpg.keygen.userid.cmd
N pentru a schimba numele.
C pentru a schimba comentariul.
E pentru a schimba adresa de email.
O pentru a continua cu generarea cheii.
T pentru a termina generarea cheii.
.
.gpg.keygen.sub.okay
Răspundeţi "da" (sau numai "d") dacă sunteţi OK să generaţi subcheia.
.
.gpg.sign_uid.okay
Răspundeţi "da" sau "nu"
.
.gpg.sign_uid.class
Când semnaţi un ID utilizator pe o cheie ar trebui să verificaţi mai întâi
că cheia aparţine persoanei numite în ID-ul utilizator. Este util şi altora
să ştie cât de atent aţi verificat acest lucru.
"0" înseamnă că nu pretindeţi nimic despre cât de atent aţi verificat cheia
"1" înseamnă că credeţi că cheia este a persoanei ce pretinde că este
proprietarul ei, dar n-aţi putut, sau nu aţi verificat deloc cheia.
Aceasta este utilă pentru verificare "persona", unde semnaţi cheia
unui utilizator pseudonim.
"2" înseamnă că aţi făcut o verificare supericială a cheii. De exemplu,
aceasta ar putea însemna că aţi verificat amprenta cheii şi aţi verificat
ID-ul utilizator de pe cheie cu un ID cu poză.
"3" înseamnă că aţi făcut o verificare extensivă a cheii. De exemplu,
aceasta ar putea însemna că aţi verificat amprenta cheii cu proprietarul
cheii în persoană, că aţi verificat folosind un document dificil de
falsificat cu poză (cum ar fi un paşaport) că numele proprietarului cheii
este acelaşi cu numele ID-ului utilizator al cheii şi că aţi verificat
(schimbând emailuri) că adresa de email de pe cheie aparţine proprietarului
cheii.
De notat că exemplele date pentru nivelele 2 şi 3 ceva mai sus sunt *numai*
exemple. La urma urmei, d-voastră decideţi ce înseamnă "superficial" şi
"extensiv" pentru d-voastră când semnaţi alte chei.
Dacă nu ştiţi care este răspunsul, răspundeţi "0".
.
.gpg.change_passwd.empty.okay
Răspundeţi "da" sau "nu"
.
.gpg.keyedit.save.okay
Răspundeţi "da" sau "nu"
.
.gpg.keyedit.cancel.okay
Răspundeţi "da" sau "nu"
.
.gpg.keyedit.sign_all.okay
Răspundeţi "da" dacă doriţi să semnaţi TOATE ID-urile utilizator
.
.gpg.keyedit.remove.uid.okay
Răspundeţi "da" dacă într-adevăr doriţi să ştergeţi acest ID utilizator.
Toate certificatele sunt de asemenea pierdute!
.
.gpg.keyedit.remove.subkey.okay
Răspundeţi "da" dacă este OK să ştergeţi subcheia
.
.gpg.keyedit.delsig.valid
Aceasta este o semnătură validă pe cheie; în mod normal n-ar trebui
să ştergeţi această semnătură pentru că aceasta ar putea fi importantăla stabilirea conexiunii de încredere la cheie sau altă cheie certificată
de această cheie.
.
.gpg.keyedit.delsig.unknown
Această semnătură nu poate fi verificată pentru că nu aveţi cheia
corespunzătoare. Ar trebui să amânaţi ştergerea sa până ştiţi care
cheie a fost folosită pentru că această cheie de semnare ar putea
constitui o conexiune de încredere spre o altă cheie deja certificată.
.
.gpg.keyedit.delsig.invalid
Semnătura nu este validă. Aceasta ar trebui ştearsă de pe inelul
d-voastră de chei.
.
.gpg.keyedit.delsig.selfsig
Aceasta este o semnătură care leagă ID-ul utilizator de cheie.
De obicei nu este o idee bună să ştergeţi o asemenea semnătură.
De fapt, GnuPG ar putea să nu mai poată folosi această cheie.
Aşa că faceţi acest lucru numai dacă această auto-semnătură este
dintr-o oarecare cauză invalidă şi o a doua este disponibilă.
.
.gpg.keyedit.updpref.okay
Schimbaţi toate preferinţele ale tuturor ID-urilor utilizator (sau doar
cele selectate) conform cu lista curentă de preferinţe. Timestamp-urile
tuturor auto-semnăturilor afectate vor fi avansate cu o secundă.
.
.gpg.passphrase.enter
Vă rugăm introduceţi fraza-parolă; aceasta este o propoziţie secretă
.
.gpg.passphrase.repeat
Vă rugăm repetaţi ultima frază-parolă, pentru a fi sigur(ă) ce aţi tastat.
.
.gpg.detached_signature.filename
Daţi numele fişierului la care se aplică semnătura
.
.gpg.openfile.overwrite.okay
Răspundeţi "da" dacă este OK să suprascrieţi fişierul
.
.gpg.openfile.askoutname
Vă rugăm introduceţi un nou nume-fişier. Dacă doar apăsaţi RETURN,
va fi folosit fişierul implicit (arătat în paranteze).
.
.gpg.ask_revocation_reason.code
Ar trebui să specificaţi un motiv pentru certificare. În funcţie de
context aveţi posibilitatea să alegeţi din această listă:
"Cheia a fost compromisă"
Folosiţi această opţiune dacă aveţi un motiv să credeţi că persoane
neautorizate au avut acces la cheia d-voastră secretă.
"Cheia este înlocuită"
Folosiţi această opţiune dacă înlocuiţi cheia cu una nouă.
"Cheia nu mai este folosită"
Folosiţi această opţiune dacă pensionaţi cheia.
"ID-ul utilizator nu mai este valid"
Folosiţi această opţiune dacă ID-ul utilizator nu mai trebuie folosit;
de obicei folosită pentru a marca o adresă de email ca invalidă.
.
.gpg.ask_revocation_reason.text
Dacă doriţi, puteţi introduce un text descriind de ce publicaţi acest
certificat de revocare. Vă rugăm fiţi concis.
O linie goală termină textul.
.
# Local variables:
# mode: fundamental
# coding: utf-8
# End:
| {
"pile_set_name": "Github"
} |
from custom_components.hacs.validate.base import (
ActionValidationBase,
ValidationException,
)
class RepositoryInformationFile(ActionValidationBase):
async def async_check(self):
filenames = [x.filename.lower() for x in self.repository.tree]
if self.repository.data.render_readme and "readme" in filenames:
pass
elif self.repository.data.render_readme and "readme.md" in filenames:
pass
elif "info" in filenames:
pass
elif "info.md" in filenames:
pass
else:
raise ValidationException("The repository has no information file")
| {
"pile_set_name": "Github"
} |
/* Finnish translation for the jQuery Timepicker Addon */
/* Written by Juga Paazmaya (http://github.com/paazmaya) */
(function($) {
$.timepicker.regional['fi'] = {
timeOnlyTitle: 'Valitse aika',
timeText: 'Aika',
hourText: 'Tunti',
minuteText: 'Minuutti',
secondText: 'Sekunti',
millisecText: 'Millisekunnin',
microsecText: 'Mikrosekuntia',
timezoneText: 'Aikavyöhyke',
currentText: 'Nyt',
closeText: 'Sulje',
timeFormat: 'HH:mm',
amNames: ['ap.', 'AM', 'A'],
pmNames: ['ip.', 'PM', 'P'],
isRTL: false
};
$.timepicker.setDefaults($.timepicker.regional['fi']);
})(jQuery);
| {
"pile_set_name": "Github"
} |
[ "$EXTERNAL_FREETZ_PACKAGE_BIND_NAMED" == "y" ] && EXTERNAL_SERVICES+=" bind"
| {
"pile_set_name": "Github"
} |
/*
** Copyright (c) 2002-2016, Erik de Castro Lopo <[email protected]>
** All rights reserved.
**
** This code is released under 2-clause BSD license. Please see the
** file at : https://github.com/erikd/libsamplerate/blob/master/COPYING
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include "src_config.h"
#include "common.h"
#define SINC_MAGIC_MARKER MAKE_MAGIC (' ', 's', 'i', 'n', 'c', ' ')
/*========================================================================================
*/
#define MAKE_INCREMENT_T(x) ((increment_t) (x))
#define SHIFT_BITS 12
#define FP_ONE ((double) (((increment_t) 1) << SHIFT_BITS))
#define INV_FP_ONE (1.0 / FP_ONE)
/*========================================================================================
*/
typedef int32_t increment_t ;
typedef float coeff_t ;
#include "fastest_coeffs.h"
#include "mid_qual_coeffs.h"
#include "high_qual_coeffs.h"
typedef struct
{ int sinc_magic_marker ;
int channels ;
long in_count, in_used ;
long out_count, out_gen ;
int coeff_half_len, index_inc ;
double src_ratio, input_index ;
coeff_t const *coeffs ;
int b_current, b_end, b_real_end, b_len ;
/* Sure hope noone does more than 128 channels at once. */
double left_calc [128], right_calc [128] ;
/* C99 struct flexible array. */
float buffer [] ;
} SINC_FILTER ;
static int sinc_multichan_vari_process (SRC_PRIVATE *psrc, SRC_DATA *data) ;
static int sinc_hex_vari_process (SRC_PRIVATE *psrc, SRC_DATA *data) ;
static int sinc_quad_vari_process (SRC_PRIVATE *psrc, SRC_DATA *data) ;
static int sinc_stereo_vari_process (SRC_PRIVATE *psrc, SRC_DATA *data) ;
static int sinc_mono_vari_process (SRC_PRIVATE *psrc, SRC_DATA *data) ;
static int prepare_data (SINC_FILTER *filter, SRC_DATA *data, int half_filter_chan_len) WARN_UNUSED ;
static void sinc_reset (SRC_PRIVATE *psrc) ;
static int sinc_copy (SRC_PRIVATE *from, SRC_PRIVATE *to) ;
static inline increment_t
double_to_fp (double x)
{ return (increment_t) (lrint ((x) * FP_ONE)) ;
} /* double_to_fp */
static inline increment_t
int_to_fp (int x)
{ return (((increment_t) (x)) << SHIFT_BITS) ;
} /* int_to_fp */
static inline int
fp_to_int (increment_t x)
{ return (((x) >> SHIFT_BITS)) ;
} /* fp_to_int */
static inline increment_t
fp_fraction_part (increment_t x)
{ return ((x) & ((((increment_t) 1) << SHIFT_BITS) - 1)) ;
} /* fp_fraction_part */
static inline double
fp_to_double (increment_t x)
{ return fp_fraction_part (x) * INV_FP_ONE ;
} /* fp_to_double */
/*----------------------------------------------------------------------------------------
*/
const char*
sinc_get_name (int src_enum)
{
switch (src_enum)
{ case SRC_SINC_BEST_QUALITY :
return "Best Sinc Interpolator" ;
case SRC_SINC_MEDIUM_QUALITY :
return "Medium Sinc Interpolator" ;
case SRC_SINC_FASTEST :
return "Fastest Sinc Interpolator" ;
default: break ;
} ;
return NULL ;
} /* sinc_get_descrition */
const char*
sinc_get_description (int src_enum)
{
switch (src_enum)
{ case SRC_SINC_FASTEST :
return "Band limited sinc interpolation, fastest, 97dB SNR, 80% BW." ;
case SRC_SINC_MEDIUM_QUALITY :
return "Band limited sinc interpolation, medium quality, 121dB SNR, 90% BW." ;
case SRC_SINC_BEST_QUALITY :
return "Band limited sinc interpolation, best quality, 144dB SNR, 96% BW." ;
default :
break ;
} ;
return NULL ;
} /* sinc_get_descrition */
int
sinc_set_converter (SRC_PRIVATE *psrc, int src_enum)
{ SINC_FILTER *filter, temp_filter ;
increment_t count ;
uint32_t bits ;
/* Quick sanity check. */
if (SHIFT_BITS >= sizeof (increment_t) * 8 - 1)
return SRC_ERR_SHIFT_BITS ;
if (psrc->private_data != NULL)
{ free (psrc->private_data) ;
psrc->private_data = NULL ;
} ;
memset (&temp_filter, 0, sizeof (temp_filter)) ;
temp_filter.sinc_magic_marker = SINC_MAGIC_MARKER ;
temp_filter.channels = psrc->channels ;
if (psrc->channels > ARRAY_LEN (temp_filter.left_calc))
return SRC_ERR_BAD_CHANNEL_COUNT ;
else if (psrc->channels == 1)
{ psrc->const_process = sinc_mono_vari_process ;
psrc->vari_process = sinc_mono_vari_process ;
}
else
if (psrc->channels == 2)
{ psrc->const_process = sinc_stereo_vari_process ;
psrc->vari_process = sinc_stereo_vari_process ;
}
else
if (psrc->channels == 4)
{ psrc->const_process = sinc_quad_vari_process ;
psrc->vari_process = sinc_quad_vari_process ;
}
else
if (psrc->channels == 6)
{ psrc->const_process = sinc_hex_vari_process ;
psrc->vari_process = sinc_hex_vari_process ;
}
else
{ psrc->const_process = sinc_multichan_vari_process ;
psrc->vari_process = sinc_multichan_vari_process ;
} ;
psrc->reset = sinc_reset ;
psrc->copy = sinc_copy ;
switch (src_enum)
{ case SRC_SINC_FASTEST :
temp_filter.coeffs = fastest_coeffs.coeffs ;
temp_filter.coeff_half_len = ARRAY_LEN (fastest_coeffs.coeffs) - 2 ;
temp_filter.index_inc = fastest_coeffs.increment ;
break ;
case SRC_SINC_MEDIUM_QUALITY :
temp_filter.coeffs = slow_mid_qual_coeffs.coeffs ;
temp_filter.coeff_half_len = ARRAY_LEN (slow_mid_qual_coeffs.coeffs) - 2 ;
temp_filter.index_inc = slow_mid_qual_coeffs.increment ;
break ;
case SRC_SINC_BEST_QUALITY :
temp_filter.coeffs = slow_high_qual_coeffs.coeffs ;
temp_filter.coeff_half_len = ARRAY_LEN (slow_high_qual_coeffs.coeffs) - 2 ;
temp_filter.index_inc = slow_high_qual_coeffs.increment ;
break ;
default :
return SRC_ERR_BAD_CONVERTER ;
} ;
/*
** FIXME : This needs to be looked at more closely to see if there is
** a better way. Need to look at prepare_data () at the same time.
*/
temp_filter.b_len = 3 * (int) lrint ((temp_filter.coeff_half_len + 2.0) / temp_filter.index_inc * SRC_MAX_RATIO + 1) ;
temp_filter.b_len = MAX (temp_filter.b_len, 4096) ;
temp_filter.b_len *= temp_filter.channels ;
temp_filter.b_len += 1 ; // There is a <= check against samples_in_hand requiring a buffer bigger than the calculation above
if ((filter = ZERO_ALLOC (SINC_FILTER, sizeof (SINC_FILTER) + sizeof (filter->buffer [0]) * (temp_filter.b_len + temp_filter.channels))) == NULL)
return SRC_ERR_MALLOC_FAILED ;
*filter = temp_filter ;
memset (&temp_filter, 0xEE, sizeof (temp_filter)) ;
psrc->private_data = filter ;
sinc_reset (psrc) ;
count = filter->coeff_half_len ;
for (bits = 0 ; (MAKE_INCREMENT_T (1) << bits) < count ; bits++)
count |= (MAKE_INCREMENT_T (1) << bits) ;
if (bits + SHIFT_BITS - 1 >= (int) (sizeof (increment_t) * 8))
return SRC_ERR_FILTER_LEN ;
return SRC_ERR_NO_ERROR ;
} /* sinc_set_converter */
static void
sinc_reset (SRC_PRIVATE *psrc)
{ SINC_FILTER *filter ;
filter = (SINC_FILTER*) psrc->private_data ;
if (filter == NULL)
return ;
filter->b_current = filter->b_end = 0 ;
filter->b_real_end = -1 ;
filter->src_ratio = filter->input_index = 0.0 ;
memset (filter->buffer, 0, filter->b_len * sizeof (filter->buffer [0])) ;
/* Set this for a sanity check */
memset (filter->buffer + filter->b_len, 0xAA, filter->channels * sizeof (filter->buffer [0])) ;
} /* sinc_reset */
static int
sinc_copy (SRC_PRIVATE *from, SRC_PRIVATE *to)
{
if (from->private_data == NULL)
return SRC_ERR_NO_PRIVATE ;
SINC_FILTER *to_filter = NULL ;
SINC_FILTER* from_filter = (SINC_FILTER*) from->private_data ;
size_t private_length = sizeof (SINC_FILTER) + sizeof (from_filter->buffer [0]) * (from_filter->b_len + from_filter->channels) ;
if ((to_filter = ZERO_ALLOC (SINC_FILTER, private_length)) == NULL)
return SRC_ERR_MALLOC_FAILED ;
memcpy (to_filter, from_filter, private_length) ;
to->private_data = to_filter ;
return SRC_ERR_NO_ERROR ;
} /* sinc_copy */
/*========================================================================================
** Beware all ye who dare pass this point. There be dragons here.
*/
static inline double
calc_output_single (SINC_FILTER *filter, increment_t increment, increment_t start_filter_index)
{ double fraction, left, right, icoeff ;
increment_t filter_index, max_filter_index ;
int data_index, coeff_count, indx ;
/* Convert input parameters into fixed point. */
max_filter_index = int_to_fp (filter->coeff_half_len) ;
/* First apply the left half of the filter. */
filter_index = start_filter_index ;
coeff_count = (max_filter_index - filter_index) / increment ;
filter_index = filter_index + coeff_count * increment ;
data_index = filter->b_current - coeff_count ;
left = 0.0 ;
do
{ if (data_index >= 0) /* Avoid underflow access to filter->buffer. */
{ fraction = fp_to_double (filter_index) ;
indx = fp_to_int (filter_index) ;
icoeff = filter->coeffs [indx] + fraction * (filter->coeffs [indx + 1] - filter->coeffs [indx]) ;
left += icoeff * filter->buffer [data_index] ;
} ;
filter_index -= increment ;
data_index = data_index + 1 ;
}
while (filter_index >= MAKE_INCREMENT_T (0)) ;
/* Now apply the right half of the filter. */
filter_index = increment - start_filter_index ;
coeff_count = (max_filter_index - filter_index) / increment ;
filter_index = filter_index + coeff_count * increment ;
data_index = filter->b_current + 1 + coeff_count ;
right = 0.0 ;
do
{ fraction = fp_to_double (filter_index) ;
indx = fp_to_int (filter_index) ;
icoeff = filter->coeffs [indx] + fraction * (filter->coeffs [indx + 1] - filter->coeffs [indx]) ;
right += icoeff * filter->buffer [data_index] ;
filter_index -= increment ;
data_index = data_index - 1 ;
}
while (filter_index > MAKE_INCREMENT_T (0)) ;
return (left + right) ;
} /* calc_output_single */
static int
sinc_mono_vari_process (SRC_PRIVATE *psrc, SRC_DATA *data)
{ SINC_FILTER *filter ;
double input_index, src_ratio, count, float_increment, terminate, rem ;
increment_t increment, start_filter_index ;
int half_filter_chan_len, samples_in_hand ;
if (psrc->private_data == NULL)
return SRC_ERR_NO_PRIVATE ;
filter = (SINC_FILTER*) psrc->private_data ;
/* If there is not a problem, this will be optimised out. */
if (sizeof (filter->buffer [0]) != sizeof (data->data_in [0]))
return SRC_ERR_SIZE_INCOMPATIBILITY ;
filter->in_count = data->input_frames * filter->channels ;
filter->out_count = data->output_frames * filter->channels ;
filter->in_used = filter->out_gen = 0 ;
src_ratio = psrc->last_ratio ;
if (is_bad_src_ratio (src_ratio))
return SRC_ERR_BAD_INTERNAL_STATE ;
/* Check the sample rate ratio wrt the buffer len. */
count = (filter->coeff_half_len + 2.0) / filter->index_inc ;
if (MIN (psrc->last_ratio, data->src_ratio) < 1.0)
count /= MIN (psrc->last_ratio, data->src_ratio) ;
/* Maximum coefficientson either side of center point. */
half_filter_chan_len = filter->channels * (int) (lrint (count) + 1) ;
input_index = psrc->last_position ;
rem = fmod_one (input_index) ;
filter->b_current = (filter->b_current + filter->channels * lrint (input_index - rem)) % filter->b_len ;
input_index = rem ;
terminate = 1.0 / src_ratio + 1e-20 ;
/* Main processing loop. */
while (filter->out_gen < filter->out_count)
{
/* Need to reload buffer? */
samples_in_hand = (filter->b_end - filter->b_current + filter->b_len) % filter->b_len ;
if (samples_in_hand <= half_filter_chan_len)
{ if ((psrc->error = prepare_data (filter, data, half_filter_chan_len)) != 0)
return psrc->error ;
samples_in_hand = (filter->b_end - filter->b_current + filter->b_len) % filter->b_len ;
if (samples_in_hand <= half_filter_chan_len)
break ;
} ;
/* This is the termination condition. */
if (filter->b_real_end >= 0)
{ if (filter->b_current + input_index + terminate > filter->b_real_end)
break ;
} ;
if (filter->out_count > 0 && fabs (psrc->last_ratio - data->src_ratio) > 1e-10)
src_ratio = psrc->last_ratio + filter->out_gen * (data->src_ratio - psrc->last_ratio) / filter->out_count ;
float_increment = filter->index_inc * (src_ratio < 1.0 ? src_ratio : 1.0) ;
increment = double_to_fp (float_increment) ;
start_filter_index = double_to_fp (input_index * float_increment) ;
data->data_out [filter->out_gen] = (float) ((float_increment / filter->index_inc) *
calc_output_single (filter, increment, start_filter_index)) ;
filter->out_gen ++ ;
/* Figure out the next index. */
input_index += 1.0 / src_ratio ;
rem = fmod_one (input_index) ;
filter->b_current = (filter->b_current + filter->channels * lrint (input_index - rem)) % filter->b_len ;
input_index = rem ;
} ;
psrc->last_position = input_index ;
/* Save current ratio rather then target ratio. */
psrc->last_ratio = src_ratio ;
data->input_frames_used = filter->in_used / filter->channels ;
data->output_frames_gen = filter->out_gen / filter->channels ;
return SRC_ERR_NO_ERROR ;
} /* sinc_mono_vari_process */
static inline void
calc_output_stereo (SINC_FILTER *filter, increment_t increment, increment_t start_filter_index, double scale, float * output)
{ double fraction, left [2], right [2], icoeff ;
increment_t filter_index, max_filter_index ;
int data_index, coeff_count, indx ;
/* Convert input parameters into fixed point. */
max_filter_index = int_to_fp (filter->coeff_half_len) ;
/* First apply the left half of the filter. */
filter_index = start_filter_index ;
coeff_count = (max_filter_index - filter_index) / increment ;
filter_index = filter_index + coeff_count * increment ;
data_index = filter->b_current - filter->channels * coeff_count ;
left [0] = left [1] = 0.0 ;
do
{ if (data_index >= 0) /* Avoid underflow access to filter->buffer. */
{ fraction = fp_to_double (filter_index) ;
indx = fp_to_int (filter_index) ;
icoeff = filter->coeffs [indx] + fraction * (filter->coeffs [indx + 1] - filter->coeffs [indx]) ;
left [0] += icoeff * filter->buffer [data_index] ;
left [1] += icoeff * filter->buffer [data_index + 1] ;
} ;
filter_index -= increment ;
data_index = data_index + 2 ;
}
while (filter_index >= MAKE_INCREMENT_T (0)) ;
/* Now apply the right half of the filter. */
filter_index = increment - start_filter_index ;
coeff_count = (max_filter_index - filter_index) / increment ;
filter_index = filter_index + coeff_count * increment ;
data_index = filter->b_current + filter->channels * (1 + coeff_count) ;
right [0] = right [1] = 0.0 ;
do
{ fraction = fp_to_double (filter_index) ;
indx = fp_to_int (filter_index) ;
icoeff = filter->coeffs [indx] + fraction * (filter->coeffs [indx + 1] - filter->coeffs [indx]) ;
right [0] += icoeff * filter->buffer [data_index] ;
right [1] += icoeff * filter->buffer [data_index + 1] ;
filter_index -= increment ;
data_index = data_index - 2 ;
}
while (filter_index > MAKE_INCREMENT_T (0)) ;
output [0] = scale * (left [0] + right [0]) ;
output [1] = scale * (left [1] + right [1]) ;
} /* calc_output_stereo */
static int
sinc_stereo_vari_process (SRC_PRIVATE *psrc, SRC_DATA *data)
{ SINC_FILTER *filter ;
double input_index, src_ratio, count, float_increment, terminate, rem ;
increment_t increment, start_filter_index ;
int half_filter_chan_len, samples_in_hand ;
if (psrc->private_data == NULL)
return SRC_ERR_NO_PRIVATE ;
filter = (SINC_FILTER*) psrc->private_data ;
/* If there is not a problem, this will be optimised out. */
if (sizeof (filter->buffer [0]) != sizeof (data->data_in [0]))
return SRC_ERR_SIZE_INCOMPATIBILITY ;
filter->in_count = data->input_frames * filter->channels ;
filter->out_count = data->output_frames * filter->channels ;
filter->in_used = filter->out_gen = 0 ;
src_ratio = psrc->last_ratio ;
if (is_bad_src_ratio (src_ratio))
return SRC_ERR_BAD_INTERNAL_STATE ;
/* Check the sample rate ratio wrt the buffer len. */
count = (filter->coeff_half_len + 2.0) / filter->index_inc ;
if (MIN (psrc->last_ratio, data->src_ratio) < 1.0)
count /= MIN (psrc->last_ratio, data->src_ratio) ;
/* Maximum coefficientson either side of center point. */
half_filter_chan_len = filter->channels * (int) (lrint (count) + 1) ;
input_index = psrc->last_position ;
rem = fmod_one (input_index) ;
filter->b_current = (filter->b_current + filter->channels * lrint (input_index - rem)) % filter->b_len ;
input_index = rem ;
terminate = 1.0 / src_ratio + 1e-20 ;
/* Main processing loop. */
while (filter->out_gen < filter->out_count)
{
/* Need to reload buffer? */
samples_in_hand = (filter->b_end - filter->b_current + filter->b_len) % filter->b_len ;
if (samples_in_hand <= half_filter_chan_len)
{ if ((psrc->error = prepare_data (filter, data, half_filter_chan_len)) != 0)
return psrc->error ;
samples_in_hand = (filter->b_end - filter->b_current + filter->b_len) % filter->b_len ;
if (samples_in_hand <= half_filter_chan_len)
break ;
} ;
/* This is the termination condition. */
if (filter->b_real_end >= 0)
{ if (filter->b_current + input_index + terminate >= filter->b_real_end)
break ;
} ;
if (filter->out_count > 0 && fabs (psrc->last_ratio - data->src_ratio) > 1e-10)
src_ratio = psrc->last_ratio + filter->out_gen * (data->src_ratio - psrc->last_ratio) / filter->out_count ;
float_increment = filter->index_inc * (src_ratio < 1.0 ? src_ratio : 1.0) ;
increment = double_to_fp (float_increment) ;
start_filter_index = double_to_fp (input_index * float_increment) ;
calc_output_stereo (filter, increment, start_filter_index, float_increment / filter->index_inc, data->data_out + filter->out_gen) ;
filter->out_gen += 2 ;
/* Figure out the next index. */
input_index += 1.0 / src_ratio ;
rem = fmod_one (input_index) ;
filter->b_current = (filter->b_current + filter->channels * lrint (input_index - rem)) % filter->b_len ;
input_index = rem ;
} ;
psrc->last_position = input_index ;
/* Save current ratio rather then target ratio. */
psrc->last_ratio = src_ratio ;
data->input_frames_used = filter->in_used / filter->channels ;
data->output_frames_gen = filter->out_gen / filter->channels ;
return SRC_ERR_NO_ERROR ;
} /* sinc_stereo_vari_process */
static inline void
calc_output_quad (SINC_FILTER *filter, increment_t increment, increment_t start_filter_index, double scale, float * output)
{ double fraction, left [4], right [4], icoeff ;
increment_t filter_index, max_filter_index ;
int data_index, coeff_count, indx ;
/* Convert input parameters into fixed point. */
max_filter_index = int_to_fp (filter->coeff_half_len) ;
/* First apply the left half of the filter. */
filter_index = start_filter_index ;
coeff_count = (max_filter_index - filter_index) / increment ;
filter_index = filter_index + coeff_count * increment ;
data_index = filter->b_current - filter->channels * coeff_count ;
left [0] = left [1] = left [2] = left [3] = 0.0 ;
do
{ if (data_index >= 0) /* Avoid underflow access to filter->buffer. */
{ fraction = fp_to_double (filter_index) ;
indx = fp_to_int (filter_index) ;
icoeff = filter->coeffs [indx] + fraction * (filter->coeffs [indx + 1] - filter->coeffs [indx]) ;
left [0] += icoeff * filter->buffer [data_index] ;
left [1] += icoeff * filter->buffer [data_index + 1] ;
left [2] += icoeff * filter->buffer [data_index + 2] ;
left [3] += icoeff * filter->buffer [data_index + 3] ;
} ;
filter_index -= increment ;
data_index = data_index + 4 ;
}
while (filter_index >= MAKE_INCREMENT_T (0)) ;
/* Now apply the right half of the filter. */
filter_index = increment - start_filter_index ;
coeff_count = (max_filter_index - filter_index) / increment ;
filter_index = filter_index + coeff_count * increment ;
data_index = filter->b_current + filter->channels * (1 + coeff_count) ;
right [0] = right [1] = right [2] = right [3] = 0.0 ;
do
{ fraction = fp_to_double (filter_index) ;
indx = fp_to_int (filter_index) ;
icoeff = filter->coeffs [indx] + fraction * (filter->coeffs [indx + 1] - filter->coeffs [indx]) ;
right [0] += icoeff * filter->buffer [data_index] ;
right [1] += icoeff * filter->buffer [data_index + 1] ;
right [2] += icoeff * filter->buffer [data_index + 2] ;
right [3] += icoeff * filter->buffer [data_index + 3] ;
filter_index -= increment ;
data_index = data_index - 4 ;
}
while (filter_index > MAKE_INCREMENT_T (0)) ;
output [0] = scale * (left [0] + right [0]) ;
output [1] = scale * (left [1] + right [1]) ;
output [2] = scale * (left [2] + right [2]) ;
output [3] = scale * (left [3] + right [3]) ;
} /* calc_output_quad */
static int
sinc_quad_vari_process (SRC_PRIVATE *psrc, SRC_DATA *data)
{ SINC_FILTER *filter ;
double input_index, src_ratio, count, float_increment, terminate, rem ;
increment_t increment, start_filter_index ;
int half_filter_chan_len, samples_in_hand ;
if (psrc->private_data == NULL)
return SRC_ERR_NO_PRIVATE ;
filter = (SINC_FILTER*) psrc->private_data ;
/* If there is not a problem, this will be optimised out. */
if (sizeof (filter->buffer [0]) != sizeof (data->data_in [0]))
return SRC_ERR_SIZE_INCOMPATIBILITY ;
filter->in_count = data->input_frames * filter->channels ;
filter->out_count = data->output_frames * filter->channels ;
filter->in_used = filter->out_gen = 0 ;
src_ratio = psrc->last_ratio ;
if (is_bad_src_ratio (src_ratio))
return SRC_ERR_BAD_INTERNAL_STATE ;
/* Check the sample rate ratio wrt the buffer len. */
count = (filter->coeff_half_len + 2.0) / filter->index_inc ;
if (MIN (psrc->last_ratio, data->src_ratio) < 1.0)
count /= MIN (psrc->last_ratio, data->src_ratio) ;
/* Maximum coefficientson either side of center point. */
half_filter_chan_len = filter->channels * (int) (lrint (count) + 1) ;
input_index = psrc->last_position ;
rem = fmod_one (input_index) ;
filter->b_current = (filter->b_current + filter->channels * lrint (input_index - rem)) % filter->b_len ;
input_index = rem ;
terminate = 1.0 / src_ratio + 1e-20 ;
/* Main processing loop. */
while (filter->out_gen < filter->out_count)
{
/* Need to reload buffer? */
samples_in_hand = (filter->b_end - filter->b_current + filter->b_len) % filter->b_len ;
if (samples_in_hand <= half_filter_chan_len)
{ if ((psrc->error = prepare_data (filter, data, half_filter_chan_len)) != 0)
return psrc->error ;
samples_in_hand = (filter->b_end - filter->b_current + filter->b_len) % filter->b_len ;
if (samples_in_hand <= half_filter_chan_len)
break ;
} ;
/* This is the termination condition. */
if (filter->b_real_end >= 0)
{ if (filter->b_current + input_index + terminate >= filter->b_real_end)
break ;
} ;
if (filter->out_count > 0 && fabs (psrc->last_ratio - data->src_ratio) > 1e-10)
src_ratio = psrc->last_ratio + filter->out_gen * (data->src_ratio - psrc->last_ratio) / filter->out_count ;
float_increment = filter->index_inc * (src_ratio < 1.0 ? src_ratio : 1.0) ;
increment = double_to_fp (float_increment) ;
start_filter_index = double_to_fp (input_index * float_increment) ;
calc_output_quad (filter, increment, start_filter_index, float_increment / filter->index_inc, data->data_out + filter->out_gen) ;
filter->out_gen += 4 ;
/* Figure out the next index. */
input_index += 1.0 / src_ratio ;
rem = fmod_one (input_index) ;
filter->b_current = (filter->b_current + filter->channels * lrint (input_index - rem)) % filter->b_len ;
input_index = rem ;
} ;
psrc->last_position = input_index ;
/* Save current ratio rather then target ratio. */
psrc->last_ratio = src_ratio ;
data->input_frames_used = filter->in_used / filter->channels ;
data->output_frames_gen = filter->out_gen / filter->channels ;
return SRC_ERR_NO_ERROR ;
} /* sinc_quad_vari_process */
static inline void
calc_output_hex (SINC_FILTER *filter, increment_t increment, increment_t start_filter_index, double scale, float * output)
{ double fraction, left [6], right [6], icoeff ;
increment_t filter_index, max_filter_index ;
int data_index, coeff_count, indx ;
/* Convert input parameters into fixed point. */
max_filter_index = int_to_fp (filter->coeff_half_len) ;
/* First apply the left half of the filter. */
filter_index = start_filter_index ;
coeff_count = (max_filter_index - filter_index) / increment ;
filter_index = filter_index + coeff_count * increment ;
data_index = filter->b_current - filter->channels * coeff_count ;
left [0] = left [1] = left [2] = left [3] = left [4] = left [5] = 0.0 ;
do
{ if (data_index >= 0) /* Avoid underflow access to filter->buffer. */
{ fraction = fp_to_double (filter_index) ;
indx = fp_to_int (filter_index) ;
icoeff = filter->coeffs [indx] + fraction * (filter->coeffs [indx + 1] - filter->coeffs [indx]) ;
left [0] += icoeff * filter->buffer [data_index] ;
left [1] += icoeff * filter->buffer [data_index + 1] ;
left [2] += icoeff * filter->buffer [data_index + 2] ;
left [3] += icoeff * filter->buffer [data_index + 3] ;
left [4] += icoeff * filter->buffer [data_index + 4] ;
left [5] += icoeff * filter->buffer [data_index + 5] ;
} ;
filter_index -= increment ;
data_index = data_index + 6 ;
}
while (filter_index >= MAKE_INCREMENT_T (0)) ;
/* Now apply the right half of the filter. */
filter_index = increment - start_filter_index ;
coeff_count = (max_filter_index - filter_index) / increment ;
filter_index = filter_index + coeff_count * increment ;
data_index = filter->b_current + filter->channels * (1 + coeff_count) ;
right [0] = right [1] = right [2] = right [3] = right [4] = right [5] = 0.0 ;
do
{ fraction = fp_to_double (filter_index) ;
indx = fp_to_int (filter_index) ;
icoeff = filter->coeffs [indx] + fraction * (filter->coeffs [indx + 1] - filter->coeffs [indx]) ;
right [0] += icoeff * filter->buffer [data_index] ;
right [1] += icoeff * filter->buffer [data_index + 1] ;
right [2] += icoeff * filter->buffer [data_index + 2] ;
right [3] += icoeff * filter->buffer [data_index + 3] ;
right [4] += icoeff * filter->buffer [data_index + 4] ;
right [5] += icoeff * filter->buffer [data_index + 5] ;
filter_index -= increment ;
data_index = data_index - 6 ;
}
while (filter_index > MAKE_INCREMENT_T (0)) ;
output [0] = scale * (left [0] + right [0]) ;
output [1] = scale * (left [1] + right [1]) ;
output [2] = scale * (left [2] + right [2]) ;
output [3] = scale * (left [3] + right [3]) ;
output [4] = scale * (left [4] + right [4]) ;
output [5] = scale * (left [5] + right [5]) ;
} /* calc_output_hex */
static int
sinc_hex_vari_process (SRC_PRIVATE *psrc, SRC_DATA *data)
{ SINC_FILTER *filter ;
double input_index, src_ratio, count, float_increment, terminate, rem ;
increment_t increment, start_filter_index ;
int half_filter_chan_len, samples_in_hand ;
if (psrc->private_data == NULL)
return SRC_ERR_NO_PRIVATE ;
filter = (SINC_FILTER*) psrc->private_data ;
/* If there is not a problem, this will be optimised out. */
if (sizeof (filter->buffer [0]) != sizeof (data->data_in [0]))
return SRC_ERR_SIZE_INCOMPATIBILITY ;
filter->in_count = data->input_frames * filter->channels ;
filter->out_count = data->output_frames * filter->channels ;
filter->in_used = filter->out_gen = 0 ;
src_ratio = psrc->last_ratio ;
if (is_bad_src_ratio (src_ratio))
return SRC_ERR_BAD_INTERNAL_STATE ;
/* Check the sample rate ratio wrt the buffer len. */
count = (filter->coeff_half_len + 2.0) / filter->index_inc ;
if (MIN (psrc->last_ratio, data->src_ratio) < 1.0)
count /= MIN (psrc->last_ratio, data->src_ratio) ;
/* Maximum coefficientson either side of center point. */
half_filter_chan_len = filter->channels * (int) (lrint (count) + 1) ;
input_index = psrc->last_position ;
rem = fmod_one (input_index) ;
filter->b_current = (filter->b_current + filter->channels * lrint (input_index - rem)) % filter->b_len ;
input_index = rem ;
terminate = 1.0 / src_ratio + 1e-20 ;
/* Main processing loop. */
while (filter->out_gen < filter->out_count)
{
/* Need to reload buffer? */
samples_in_hand = (filter->b_end - filter->b_current + filter->b_len) % filter->b_len ;
if (samples_in_hand <= half_filter_chan_len)
{ if ((psrc->error = prepare_data (filter, data, half_filter_chan_len)) != 0)
return psrc->error ;
samples_in_hand = (filter->b_end - filter->b_current + filter->b_len) % filter->b_len ;
if (samples_in_hand <= half_filter_chan_len)
break ;
} ;
/* This is the termination condition. */
if (filter->b_real_end >= 0)
{ if (filter->b_current + input_index + terminate >= filter->b_real_end)
break ;
} ;
if (filter->out_count > 0 && fabs (psrc->last_ratio - data->src_ratio) > 1e-10)
src_ratio = psrc->last_ratio + filter->out_gen * (data->src_ratio - psrc->last_ratio) / filter->out_count ;
float_increment = filter->index_inc * (src_ratio < 1.0 ? src_ratio : 1.0) ;
increment = double_to_fp (float_increment) ;
start_filter_index = double_to_fp (input_index * float_increment) ;
calc_output_hex (filter, increment, start_filter_index, float_increment / filter->index_inc, data->data_out + filter->out_gen) ;
filter->out_gen += 6 ;
/* Figure out the next index. */
input_index += 1.0 / src_ratio ;
rem = fmod_one (input_index) ;
filter->b_current = (filter->b_current + filter->channels * lrint (input_index - rem)) % filter->b_len ;
input_index = rem ;
} ;
psrc->last_position = input_index ;
/* Save current ratio rather then target ratio. */
psrc->last_ratio = src_ratio ;
data->input_frames_used = filter->in_used / filter->channels ;
data->output_frames_gen = filter->out_gen / filter->channels ;
return SRC_ERR_NO_ERROR ;
} /* sinc_hex_vari_process */
static inline void
calc_output_multi (SINC_FILTER *filter, increment_t increment, increment_t start_filter_index, int channels, double scale, float * output)
{ double fraction, icoeff ;
/* The following line is 1999 ISO Standard C. If your compiler complains, get a better compiler. */
double *left, *right ;
increment_t filter_index, max_filter_index ;
int data_index, coeff_count, indx, ch ;
left = filter->left_calc ;
right = filter->right_calc ;
/* Convert input parameters into fixed point. */
max_filter_index = int_to_fp (filter->coeff_half_len) ;
/* First apply the left half of the filter. */
filter_index = start_filter_index ;
coeff_count = (max_filter_index - filter_index) / increment ;
filter_index = filter_index + coeff_count * increment ;
data_index = filter->b_current - channels * coeff_count ;
memset (left, 0, sizeof (left [0]) * channels) ;
do
{ fraction = fp_to_double (filter_index) ;
indx = fp_to_int (filter_index) ;
icoeff = filter->coeffs [indx] + fraction * (filter->coeffs [indx + 1] - filter->coeffs [indx]) ;
if (data_index >= 0) /* Avoid underflow access to filter->buffer. */
{ /*
** Duff's Device.
** See : http://en.wikipedia.org/wiki/Duff's_device
*/
ch = channels ;
do
{ switch (ch % 8)
{ default :
ch -- ;
left [ch] += icoeff * filter->buffer [data_index + ch] ;
/* Falls through. */
case 7 :
ch -- ;
left [ch] += icoeff * filter->buffer [data_index + ch] ;
/* Falls through. */
case 6 :
ch -- ;
left [ch] += icoeff * filter->buffer [data_index + ch] ;
/* Falls through. */
case 5 :
ch -- ;
left [ch] += icoeff * filter->buffer [data_index + ch] ;
/* Falls through. */
case 4 :
ch -- ;
left [ch] += icoeff * filter->buffer [data_index + ch] ;
/* Falls through. */
case 3 :
ch -- ;
left [ch] += icoeff * filter->buffer [data_index + ch] ;
/* Falls through. */
case 2 :
ch -- ;
left [ch] += icoeff * filter->buffer [data_index + ch] ;
/* Falls through. */
case 1 :
ch -- ;
left [ch] += icoeff * filter->buffer [data_index + ch] ;
} ;
}
while (ch > 0) ;
} ;
filter_index -= increment ;
data_index = data_index + channels ;
}
while (filter_index >= MAKE_INCREMENT_T (0)) ;
/* Now apply the right half of the filter. */
filter_index = increment - start_filter_index ;
coeff_count = (max_filter_index - filter_index) / increment ;
filter_index = filter_index + coeff_count * increment ;
data_index = filter->b_current + channels * (1 + coeff_count) ;
memset (right, 0, sizeof (right [0]) * channels) ;
do
{ fraction = fp_to_double (filter_index) ;
indx = fp_to_int (filter_index) ;
icoeff = filter->coeffs [indx] + fraction * (filter->coeffs [indx + 1] - filter->coeffs [indx]) ;
ch = channels ;
do
{
switch (ch % 8)
{ default :
ch -- ;
right [ch] += icoeff * filter->buffer [data_index + ch] ;
/* Falls through. */
case 7 :
ch -- ;
right [ch] += icoeff * filter->buffer [data_index + ch] ;
/* Falls through. */
case 6 :
ch -- ;
right [ch] += icoeff * filter->buffer [data_index + ch] ;
/* Falls through. */
case 5 :
ch -- ;
right [ch] += icoeff * filter->buffer [data_index + ch] ;
/* Falls through. */
case 4 :
ch -- ;
right [ch] += icoeff * filter->buffer [data_index + ch] ;
/* Falls through. */
case 3 :
ch -- ;
right [ch] += icoeff * filter->buffer [data_index + ch] ;
/* Falls through. */
case 2 :
ch -- ;
right [ch] += icoeff * filter->buffer [data_index + ch] ;
/* Falls through. */
case 1 :
ch -- ;
right [ch] += icoeff * filter->buffer [data_index + ch] ;
} ;
}
while (ch > 0) ;
filter_index -= increment ;
data_index = data_index - channels ;
}
while (filter_index > MAKE_INCREMENT_T (0)) ;
ch = channels ;
do
{
switch (ch % 8)
{ default :
ch -- ;
output [ch] = scale * (left [ch] + right [ch]) ;
/* Falls through. */
case 7 :
ch -- ;
output [ch] = scale * (left [ch] + right [ch]) ;
/* Falls through. */
case 6 :
ch -- ;
output [ch] = scale * (left [ch] + right [ch]) ;
/* Falls through. */
case 5 :
ch -- ;
output [ch] = scale * (left [ch] + right [ch]) ;
/* Falls through. */
case 4 :
ch -- ;
output [ch] = scale * (left [ch] + right [ch]) ;
/* Falls through. */
case 3 :
ch -- ;
output [ch] = scale * (left [ch] + right [ch]) ;
/* Falls through. */
case 2 :
ch -- ;
output [ch] = scale * (left [ch] + right [ch]) ;
/* Falls through. */
case 1 :
ch -- ;
output [ch] = scale * (left [ch] + right [ch]) ;
} ;
}
while (ch > 0) ;
return ;
} /* calc_output_multi */
static int
sinc_multichan_vari_process (SRC_PRIVATE *psrc, SRC_DATA *data)
{ SINC_FILTER *filter ;
double input_index, src_ratio, count, float_increment, terminate, rem ;
increment_t increment, start_filter_index ;
int half_filter_chan_len, samples_in_hand ;
if (psrc->private_data == NULL)
return SRC_ERR_NO_PRIVATE ;
filter = (SINC_FILTER*) psrc->private_data ;
/* If there is not a problem, this will be optimised out. */
if (sizeof (filter->buffer [0]) != sizeof (data->data_in [0]))
return SRC_ERR_SIZE_INCOMPATIBILITY ;
filter->in_count = data->input_frames * filter->channels ;
filter->out_count = data->output_frames * filter->channels ;
filter->in_used = filter->out_gen = 0 ;
src_ratio = psrc->last_ratio ;
if (is_bad_src_ratio (src_ratio))
return SRC_ERR_BAD_INTERNAL_STATE ;
/* Check the sample rate ratio wrt the buffer len. */
count = (filter->coeff_half_len + 2.0) / filter->index_inc ;
if (MIN (psrc->last_ratio, data->src_ratio) < 1.0)
count /= MIN (psrc->last_ratio, data->src_ratio) ;
/* Maximum coefficientson either side of center point. */
half_filter_chan_len = filter->channels * (int) (lrint (count) + 1) ;
input_index = psrc->last_position ;
rem = fmod_one (input_index) ;
filter->b_current = (filter->b_current + filter->channels * lrint (input_index - rem)) % filter->b_len ;
input_index = rem ;
terminate = 1.0 / src_ratio + 1e-20 ;
/* Main processing loop. */
while (filter->out_gen < filter->out_count)
{
/* Need to reload buffer? */
samples_in_hand = (filter->b_end - filter->b_current + filter->b_len) % filter->b_len ;
if (samples_in_hand <= half_filter_chan_len)
{ if ((psrc->error = prepare_data (filter, data, half_filter_chan_len)) != 0)
return psrc->error ;
samples_in_hand = (filter->b_end - filter->b_current + filter->b_len) % filter->b_len ;
if (samples_in_hand <= half_filter_chan_len)
break ;
} ;
/* This is the termination condition. */
if (filter->b_real_end >= 0)
{ if (filter->b_current + input_index + terminate >= filter->b_real_end)
break ;
} ;
if (filter->out_count > 0 && fabs (psrc->last_ratio - data->src_ratio) > 1e-10)
src_ratio = psrc->last_ratio + filter->out_gen * (data->src_ratio - psrc->last_ratio) / filter->out_count ;
float_increment = filter->index_inc * (src_ratio < 1.0 ? src_ratio : 1.0) ;
increment = double_to_fp (float_increment) ;
start_filter_index = double_to_fp (input_index * float_increment) ;
calc_output_multi (filter, increment, start_filter_index, filter->channels, float_increment / filter->index_inc, data->data_out + filter->out_gen) ;
filter->out_gen += psrc->channels ;
/* Figure out the next index. */
input_index += 1.0 / src_ratio ;
rem = fmod_one (input_index) ;
filter->b_current = (filter->b_current + filter->channels * lrint (input_index - rem)) % filter->b_len ;
input_index = rem ;
} ;
psrc->last_position = input_index ;
/* Save current ratio rather then target ratio. */
psrc->last_ratio = src_ratio ;
data->input_frames_used = filter->in_used / filter->channels ;
data->output_frames_gen = filter->out_gen / filter->channels ;
return SRC_ERR_NO_ERROR ;
} /* sinc_multichan_vari_process */
/*----------------------------------------------------------------------------------------
*/
static int
prepare_data (SINC_FILTER *filter, SRC_DATA *data, int half_filter_chan_len)
{ int len = 0 ;
if (filter->b_real_end >= 0)
return 0 ; /* Should be terminating. Just return. */
if (data->data_in == NULL)
return 0 ;
if (filter->b_current == 0)
{ /* Initial state. Set up zeros at the start of the buffer and
** then load new data after that.
*/
len = filter->b_len - 2 * half_filter_chan_len ;
filter->b_current = filter->b_end = half_filter_chan_len ;
}
else if (filter->b_end + half_filter_chan_len + filter->channels < filter->b_len)
{ /* Load data at current end position. */
len = MAX (filter->b_len - filter->b_current - half_filter_chan_len, 0) ;
}
else
{ /* Move data at end of buffer back to the start of the buffer. */
len = filter->b_end - filter->b_current ;
memmove (filter->buffer, filter->buffer + filter->b_current - half_filter_chan_len,
(half_filter_chan_len + len) * sizeof (filter->buffer [0])) ;
filter->b_current = half_filter_chan_len ;
filter->b_end = filter->b_current + len ;
/* Now load data at current end of buffer. */
len = MAX (filter->b_len - filter->b_current - half_filter_chan_len, 0) ;
} ;
len = MIN ((int) (filter->in_count - filter->in_used), len) ;
len -= (len % filter->channels) ;
if (len < 0 || filter->b_end + len > filter->b_len)
return SRC_ERR_SINC_PREPARE_DATA_BAD_LEN ;
memcpy (filter->buffer + filter->b_end, data->data_in + filter->in_used,
len * sizeof (filter->buffer [0])) ;
filter->b_end += len ;
filter->in_used += len ;
if (filter->in_used == filter->in_count &&
filter->b_end - filter->b_current < 2 * half_filter_chan_len && data->end_of_input)
{ /* Handle the case where all data in the current buffer has been
** consumed and this is the last buffer.
*/
if (filter->b_len - filter->b_end < half_filter_chan_len + 5)
{ /* If necessary, move data down to the start of the buffer. */
len = filter->b_end - filter->b_current ;
memmove (filter->buffer, filter->buffer + filter->b_current - half_filter_chan_len,
(half_filter_chan_len + len) * sizeof (filter->buffer [0])) ;
filter->b_current = half_filter_chan_len ;
filter->b_end = filter->b_current + len ;
} ;
filter->b_real_end = filter->b_end ;
len = half_filter_chan_len + 5 ;
if (len < 0 || filter->b_end + len > filter->b_len)
len = filter->b_len - filter->b_end ;
memset (filter->buffer + filter->b_end, 0, len * sizeof (filter->buffer [0])) ;
filter->b_end += len ;
} ;
return 0 ;
} /* prepare_data */
| {
"pile_set_name": "Github"
} |
/**
* Node.js wrapper for "notify-send".
*/
var os = require('os');
var which = require('which');
var utils = require('../lib/utils');
var EventEmitter = require('events').EventEmitter;
var util = require('util');
var notifier = 'notify-send';
var hasNotifier = void 0;
module.exports = NotifySend;
function NotifySend(options) {
options = utils.clone(options || {});
if (!(this instanceof NotifySend)) {
return new NotifySend(options);
}
this.options = options;
EventEmitter.call(this);
}
util.inherits(NotifySend, EventEmitter);
function noop() {}
NotifySend.prototype.notify = function(options, callback) {
options = utils.clone(options || {});
callback = callback || noop;
if (typeof callback !== 'function') {
throw new TypeError(
'The second argument must be a function callback. You have passed ' +
typeof callback
);
}
if (typeof options === 'string') {
options = { title: 'node-notifier', message: options };
}
if (!options.message) {
callback(new Error('Message is required.'));
return this;
}
if (os.type() !== 'Linux' && !os.type().match(/BSD$/)) {
callback(new Error('Only supported on Linux and *BSD systems'));
return this;
}
if (hasNotifier === false) {
callback(new Error('notify-send must be installed on the system.'));
return this;
}
if (hasNotifier || !!this.options.suppressOsdCheck) {
doNotification(options, callback);
return this;
}
try {
hasNotifier = !!which.sync(notifier);
doNotification(options, callback);
} catch (err) {
hasNotifier = false;
return callback(err);
}
return this;
};
var allowedArguments = ['urgency', 'expire-time', 'icon', 'category', 'hint'];
function doNotification(options, callback) {
var initial, argsList;
options = utils.mapToNotifySend(options);
options.title = options.title || 'Node Notification:';
initial = [options.title, options.message];
delete options.title;
delete options.message;
argsList = utils.constructArgumentList(options, {
initial: initial,
keyExtra: '-',
allowedArguments: allowedArguments
});
utils.command(notifier, argsList, callback);
}
| {
"pile_set_name": "Github"
} |
sha256:6c10d2d197fe81c199966fd0976d7a259510e54da8e1583b1c08a1e66b5d8e42
| {
"pile_set_name": "Github"
} |
package cli
import (
"fmt"
"io"
"os"
"strings"
)
// OsExiter is the function used when the app exits. If not set defaults to os.Exit.
var OsExiter = os.Exit
// ErrWriter is used to write errors to the user. This can be anything
// implementing the io.Writer interface and defaults to os.Stderr.
var ErrWriter io.Writer = os.Stderr
// MultiError is an error that wraps multiple errors.
type MultiError struct {
Errors []error
}
// NewMultiError creates a new MultiError. Pass in one or more errors.
func NewMultiError(err ...error) MultiError {
return MultiError{Errors: err}
}
// Error implements the error interface.
func (m MultiError) Error() string {
errs := make([]string, len(m.Errors))
for i, err := range m.Errors {
errs[i] = err.Error()
}
return strings.Join(errs, "\n")
}
type ErrorFormatter interface {
Format(s fmt.State, verb rune)
}
// ExitCoder is the interface checked by `App` and `Command` for a custom exit
// code
type ExitCoder interface {
error
ExitCode() int
}
// ExitError fulfills both the builtin `error` interface and `ExitCoder`
type ExitError struct {
exitCode int
message interface{}
}
// NewExitError makes a new *ExitError
func NewExitError(message interface{}, exitCode int) *ExitError {
return &ExitError{
exitCode: exitCode,
message: message,
}
}
// Error returns the string message, fulfilling the interface required by
// `error`
func (ee *ExitError) Error() string {
return fmt.Sprintf("%v", ee.message)
}
// ExitCode returns the exit code, fulfilling the interface required by
// `ExitCoder`
func (ee *ExitError) ExitCode() int {
return ee.exitCode
}
// HandleExitCoder checks if the error fulfills the ExitCoder interface, and if
// so prints the error to stderr (if it is non-empty) and calls OsExiter with the
// given exit code. If the given error is a MultiError, then this func is
// called on all members of the Errors slice and calls OsExiter with the last exit code.
func HandleExitCoder(err error) {
if err == nil {
return
}
if exitErr, ok := err.(ExitCoder); ok {
if err.Error() != "" {
if _, ok := exitErr.(ErrorFormatter); ok {
fmt.Fprintf(ErrWriter, "%+v\n", err)
} else {
fmt.Fprintln(ErrWriter, err)
}
}
OsExiter(exitErr.ExitCode())
return
}
if multiErr, ok := err.(MultiError); ok {
code := handleMultiError(multiErr)
OsExiter(code)
return
}
}
func handleMultiError(multiErr MultiError) int {
code := 1
for _, merr := range multiErr.Errors {
if multiErr2, ok := merr.(MultiError); ok {
code = handleMultiError(multiErr2)
} else {
fmt.Fprintln(ErrWriter, merr)
if exitErr, ok := merr.(ExitCoder); ok {
code = exitErr.ExitCode()
}
}
}
return code
}
| {
"pile_set_name": "Github"
} |
// Copyright 2020 The Kubernetes Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package test
import (
"bufio"
"bytes"
"context"
"fmt"
"io/ioutil"
"net/http"
"net/url"
"sort"
"strings"
"testing"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/portforward"
"k8s.io/client-go/transport/spdy"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/prometheus/common/expfmt"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/clientcmd"
"k8s.io/client-go/transport"
metricsclientset "k8s.io/metrics/pkg/client/clientset/versioned"
)
func TestMetricsServer(t *testing.T) {
RegisterFailHandler(Fail)
RunSpecs(t, "[MetricsServer]")
}
var _ = Describe("MetricsServer", func() {
restConfig, err := getRestConfig()
if err != nil {
panic(err)
}
client, err := clientset.NewForConfig(restConfig)
if err != nil {
panic(err)
}
mclient, err := metricsclientset.NewForConfig(restConfig)
if err != nil {
panic(err)
}
It("exposes metrics from at least one pod in cluster", func() {
podMetrics, err := mclient.MetricsV1beta1().PodMetricses(metav1.NamespaceAll).List(context.Background(), metav1.ListOptions{})
Expect(err).NotTo(HaveOccurred(), "Failed to list pod metrics")
Expect(podMetrics.Items).NotTo(BeEmpty(), "Need at least one pod to verify if MetricsServer works")
})
It("exposes metrics about all nodes in cluster", func() {
nodeList, err := client.CoreV1().Nodes().List(context.Background(), metav1.ListOptions{})
if err != nil {
panic(err)
}
Expect(nodeList.Items).NotTo(BeEmpty(), "Need at least one node to verify if MetricsServer works")
for _, node := range nodeList.Items {
_, err := mclient.MetricsV1beta1().NodeMetricses().Get(context.Background(), node.Name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred(), "Metrics for node %s are not available", node.Name)
}
})
It("exposes prometheus metrics", func() {
podList, err := client.CoreV1().Pods(metav1.NamespaceSystem).List(context.Background(), metav1.ListOptions{LabelSelector: "k8s-app=metrics-server"})
Expect(err).NotTo(HaveOccurred(), "Failed to find Metrics Server pod")
Expect(podList.Items).NotTo(BeEmpty(), "Metrics Server pod was not found")
Expect(podList.Items).To(HaveLen(1), "Expect to only have one Metrics Server pod")
msPod := podList.Items[0]
resp, err := proxyRequestToPod(restConfig, msPod.Namespace, msPod.Name, "https", 4443, "/metrics")
Expect(err).NotTo(HaveOccurred(), "Failed to get Metrics Server /metrics endpoint")
metrics, err := parseMetricNames(resp)
Expect(err).NotTo(HaveOccurred(), "Failed to parse Metrics Server metrics")
sort.Strings(metrics)
Expect(metrics).To(Equal([]string{
"apiserver_audit_event_total",
"apiserver_audit_requests_rejected_total",
"apiserver_client_certificate_expiration_seconds",
"apiserver_current_inflight_requests",
"apiserver_envelope_encryption_dek_cache_fill_percent",
"apiserver_flowcontrol_read_vs_write_request_count_samples",
"apiserver_flowcontrol_read_vs_write_request_count_watermarks",
"apiserver_request_duration_seconds",
"apiserver_request_total",
"apiserver_response_sizes",
"apiserver_storage_data_key_generation_duration_seconds",
"apiserver_storage_data_key_generation_failures_total",
"apiserver_storage_envelope_transformation_cache_misses_total",
"apiserver_tls_handshake_errors_total",
"authenticated_user_requests",
"authentication_attempts",
"authentication_duration_seconds",
"go_gc_duration_seconds",
"go_goroutines",
"go_info",
"go_memstats_alloc_bytes",
"go_memstats_alloc_bytes_total",
"go_memstats_buck_hash_sys_bytes",
"go_memstats_frees_total",
"go_memstats_gc_cpu_fraction",
"go_memstats_gc_sys_bytes",
"go_memstats_heap_alloc_bytes",
"go_memstats_heap_idle_bytes",
"go_memstats_heap_inuse_bytes",
"go_memstats_heap_objects",
"go_memstats_heap_released_bytes",
"go_memstats_heap_sys_bytes",
"go_memstats_last_gc_time_seconds",
"go_memstats_lookups_total",
"go_memstats_mallocs_total",
"go_memstats_mcache_inuse_bytes",
"go_memstats_mcache_sys_bytes",
"go_memstats_mspan_inuse_bytes",
"go_memstats_mspan_sys_bytes",
"go_memstats_next_gc_bytes",
"go_memstats_other_sys_bytes",
"go_memstats_stack_inuse_bytes",
"go_memstats_stack_sys_bytes",
"go_memstats_sys_bytes",
"go_threads",
"metrics_server_api_metric_freshness_seconds",
"metrics_server_kubelet_last_request_time_seconds",
"metrics_server_kubelet_request_duration_seconds",
"metrics_server_kubelet_request_total",
"metrics_server_manager_tick_duration_seconds",
"metrics_server_storage_points",
"process_cpu_seconds_total",
"process_max_fds",
"process_open_fds",
"process_resident_memory_bytes",
"process_start_time_seconds",
"process_virtual_memory_bytes",
"process_virtual_memory_max_bytes",
}), "Unexpected metrics")
})
})
func getRestConfig() (*rest.Config, error) {
config, err := clientcmd.NewDefaultClientConfigLoadingRules().Load()
if err != nil {
return nil, err
}
return clientcmd.NewDefaultClientConfig(*config, &clientcmd.ConfigOverrides{}).ClientConfig()
}
func parseMetricNames(data []byte) ([]string, error) {
var parser expfmt.TextParser
mfs, err := parser.TextToMetricFamilies(bytes.NewReader(data))
if err != nil {
return nil, err
}
Expect(err).NotTo(HaveOccurred(), "Failed to parse mfs")
var ms []string
for key := range mfs {
ms = append(ms, key)
}
return ms, nil
}
func proxyRequestToPod(config *rest.Config, namespace, podname, scheme string, port int, path string) ([]byte, error) {
cancel, err := setupForwarding(config, namespace, podname)
defer cancel()
if err != nil {
return nil, err
}
reqUrl := url.URL{Scheme: scheme, Path: path, Host: fmt.Sprintf("127.0.0.1:%d", port)}
resp, err := sendRequest(config, reqUrl.String())
defer resp.Body.Close()
if err != nil {
return nil, err
}
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return nil, err
}
return body, nil
}
func setupForwarding(config *rest.Config, namespace, podname string) (cancel func(), err error) {
hostIP := strings.TrimLeft(config.Host, "https://")
trans, upgrader, err := spdy.RoundTripperFor(config)
if err != nil {
return noop, err
}
dialer := spdy.NewDialer(upgrader, &http.Client{Transport: trans}, http.MethodPost, &url.URL{Scheme: "https", Path: fmt.Sprintf("/api/v1/namespaces/%s/pods/%s/portforward", namespace, podname), Host: hostIP})
var berr, bout bytes.Buffer
buffErr := bufio.NewWriter(&berr)
buffOut := bufio.NewWriter(&bout)
stopCh := make(chan struct{})
readyCh := make(chan struct{})
fw, err := portforward.New(dialer, []string{"4443:4443"}, stopCh, readyCh, buffOut, buffErr)
if err != nil {
return noop, err
}
go func() {
fmt.Print(fw.ForwardPorts())
}()
<-readyCh
return func() {
stopCh <- struct{}{}
}, nil
}
func sendRequest(config *rest.Config, url string) (*http.Response, error) {
tsConfig, err := config.TransportConfig()
if err != nil {
return nil, err
}
tsConfig.TLS.Insecure = true
tsConfig.TLS.CAData = []byte{}
ts, err := transport.New(tsConfig)
if err != nil {
return nil, err
}
client := &http.Client{Transport: ts}
return client.Get(url)
}
func noop() {}
| {
"pile_set_name": "Github"
} |
#X-Generator: crowdin.com
# DOM XSS Active Scan rule
#
# This file defines the default (English) variants of all of the internationalised messages
domxss.name=Cross Site Scripting (DOM Based)
domxss.desc=DOM XSS Active Scan Rule
domxss.skipped.reason.browsererror=failed to start or connect to the browser
| {
"pile_set_name": "Github"
} |
namespace Metrics
{
/// <summary>
/// Indicates a metric's ability to be reset. Reseting a metric clear all currently collected data.
/// </summary>
public interface ResetableMetric : Utils.IHideObjectMembers
{
/// <summary>
/// Clear all currently collected data for this metric.
/// </summary>
void Reset();
}
}
| {
"pile_set_name": "Github"
} |
{
mkDerivation, extra-cmake-modules,
kcoreaddons, kconfig, kcrash, kdbusaddons, ki18n, kiconthemes, knotifications,
kwidgetsaddons, kwindowsystem, polkit-qt
}:
mkDerivation {
name = "polkit-kde-agent";
nativeBuildInputs = [ extra-cmake-modules ];
propagatedBuildInputs = [
kdbusaddons kwidgetsaddons kcoreaddons kcrash kconfig ki18n kiconthemes
knotifications kwindowsystem polkit-qt
];
outputs = [ "out" "dev" ];
}
| {
"pile_set_name": "Github"
} |
//
// The MIT License (MIT)
//
// Copyright (c) 2016 Tag Games Limited
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
//
#include <ChilliSource/Rendering/Base/FrameAllocatorQueue.h>
namespace ChilliSource
{
namespace
{
constexpr u32 k_numAllocators = 3;
constexpr u32 k_allocatorPageSize = 1024 * 1024;
}
//------------------------------------------------------------------------------
FrameAllocatorQueue::FrameAllocatorQueue() noexcept
{
for (u32 i = 0; i < k_numAllocators; ++i)
{
PagedLinearAllocatorUPtr allocator(new PagedLinearAllocator(k_allocatorPageSize));
m_queue.push_back(allocator.get());
m_allocators.push_back(std::move(allocator));
}
}
//------------------------------------------------------------------------------
IAllocator* FrameAllocatorQueue::Pop() noexcept
{
std::unique_lock<std::mutex> lock(m_mutex);
while (m_queue.empty())
{
m_condition.wait(lock);
}
auto front = m_queue.front();
m_queue.pop_front();
return front;
}
//------------------------------------------------------------------------------
IAllocator* FrameAllocatorQueue::Front() noexcept
{
std::unique_lock<std::mutex> lock(m_mutex);
while (m_queue.empty())
{
m_condition.wait(lock);
}
auto front = m_queue.front();
return front;
}
//------------------------------------------------------------------------------
void FrameAllocatorQueue::Push(IAllocator* allocator) noexcept
{
std::unique_lock<std::mutex> lock(m_mutex);
#if CS_ENABLE_DEBUG
for (IAllocator* queuedAllocators : m_queue)
{
CS_ASSERT(queuedAllocators != allocator, "Allocator is already in queue, cannot be pushed twice!");
}
#endif
for (auto& pagedLinearAllocator : m_allocators)
{
if (pagedLinearAllocator.get() == allocator)
{
pagedLinearAllocator->Reset();
m_queue.push_back(allocator);
m_condition.notify_one();
return;
}
}
CS_LOG_FATAL("Cannot push an allocator that is not owned by this queue");
}
}
| {
"pile_set_name": "Github"
} |
/* Provide relocatable packages.
Copyright (C) 2003, 2005, 2008-2011 Free Software Foundation, Inc.
Written by Bruno Haible <[email protected]>, 2003.
This program is free software; you can redistribute it and/or modify it
under the terms of the GNU Library General Public License as published
by the Free Software Foundation; either version 2, or (at your option)
any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Library General Public License for more details.
You should have received a copy of the GNU Library General Public
License along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
USA. */
#ifndef _RELOCATABLE_H
#define _RELOCATABLE_H
#ifdef __cplusplus
extern "C" {
#endif
/* This can be enabled through the configure --enable-relocatable option. */
#if ENABLE_RELOCATABLE
/* When building a DLL, we must export some functions. Note that because
this is a private .h file, we don't need to use __declspec(dllimport)
in any case. */
#if HAVE_VISIBILITY && BUILDING_DLL
# define RELOCATABLE_DLL_EXPORTED __attribute__((__visibility__("default")))
#elif defined _MSC_VER && BUILDING_DLL
# define RELOCATABLE_DLL_EXPORTED __declspec(dllexport)
#else
# define RELOCATABLE_DLL_EXPORTED
#endif
/* Sets the original and the current installation prefix of the package.
Relocation simply replaces a pathname starting with the original prefix
by the corresponding pathname with the current prefix instead. Both
prefixes should be directory names without trailing slash (i.e. use ""
instead of "/"). */
extern RELOCATABLE_DLL_EXPORTED void
set_relocation_prefix (const char *orig_prefix,
const char *curr_prefix);
/* Returns the pathname, relocated according to the current installation
directory.
The returned string is either PATHNAME unmodified or a freshly allocated
string that you can free with free() after casting it to 'char *'. */
extern const char * relocate (const char *pathname);
/* Memory management: relocate() potentially allocates memory, because it has
to construct a fresh pathname. If this is a problem because your program
calls relocate() frequently, think about caching the result. Or free the
return value if it was different from the argument pathname. */
/* Convenience function:
Computes the current installation prefix, based on the original
installation prefix, the original installation directory of a particular
file, and the current pathname of this file.
Returns it, freshly allocated. Returns NULL upon failure. */
extern char * compute_curr_prefix (const char *orig_installprefix,
const char *orig_installdir,
const char *curr_pathname);
#else
/* By default, we use the hardwired pathnames. */
#define relocate(pathname) (pathname)
#endif
#ifdef __cplusplus
}
#endif
#endif /* _RELOCATABLE_H */
| {
"pile_set_name": "Github"
} |
//========================================================================
//
// SplashT1FontFile.h
//
//========================================================================
//========================================================================
//
// Modified under the Poppler project - http://poppler.freedesktop.org
//
// All changes made under the Poppler project to this file are licensed
// under GPL version 2 or later
//
// Copyright (C) 2006 Takashi Iwai <[email protected]>
//
// To see a description of the changes please see the Changelog file that
// came with your tarball or type make ChangeLog if you are building from git
//
//========================================================================
#ifndef SPLASHT1FONTFILE_H
#define SPLASHT1FONTFILE_H
#if HAVE_T1LIB_H
#ifdef USE_GCC_PRAGMAS
#pragma interface
#endif
#include "SplashFontFile.h"
class SplashT1FontEngine;
//------------------------------------------------------------------------
// SplashT1FontFile
//------------------------------------------------------------------------
class SplashT1FontFile: public SplashFontFile {
public:
static SplashFontFile *loadType1Font(SplashT1FontEngine *engineA,
SplashFontFileID *idA,
SplashFontSrc *src,
char **encA);
virtual ~SplashT1FontFile();
// Create a new SplashT1Font, i.e., a scaled instance of this font
// file.
virtual SplashFont *makeFont(SplashCoord *mat,
SplashCoord *textMat);
private:
SplashT1FontFile(SplashT1FontEngine *engineA,
SplashFontFileID *idA,
SplashFontSrc *src,
int t1libIDA, char **encA, char *encStrA);
SplashT1FontEngine *engine;
int t1libID; // t1lib font ID
char **enc;
char *encStr;
friend class SplashT1Font;
};
#endif // HAVE_T1LIB_H
#endif
| {
"pile_set_name": "Github"
} |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.zookeeper.test;
import static org.junit.jupiter.api.Assertions.assertEquals;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import org.apache.zookeeper.CreateMode;
import org.apache.zookeeper.KeeperException;
import org.apache.zookeeper.ZooDefs.Ids;
import org.apache.zookeeper.ZooKeeper;
import org.apache.zookeeper.data.Stat;
import org.junit.jupiter.api.AfterEach;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
public class GetChildren2Test extends ClientBase {
private ZooKeeper zk;
@BeforeEach
@Override
public void setUp() throws Exception {
super.setUp();
zk = createClient();
}
@AfterEach
@Override
public void tearDown() throws Exception {
super.tearDown();
zk.close();
}
@Test
public void testChild() throws IOException, KeeperException, InterruptedException {
String name = "/foo";
zk.create(name, name.getBytes(), Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT);
String childname = name + "/bar";
zk.create(childname, childname.getBytes(), Ids.OPEN_ACL_UNSAFE, CreateMode.EPHEMERAL);
Stat stat = new Stat();
List<String> s = zk.getChildren(name, false, stat);
assertEquals(stat.getCzxid(), stat.getMzxid());
assertEquals(stat.getCzxid() + 1, stat.getPzxid());
assertEquals(stat.getCtime(), stat.getMtime());
assertEquals(1, stat.getCversion());
assertEquals(0, stat.getVersion());
assertEquals(0, stat.getAversion());
assertEquals(0, stat.getEphemeralOwner());
assertEquals(name.length(), stat.getDataLength());
assertEquals(1, stat.getNumChildren());
assertEquals(s.size(), stat.getNumChildren());
s = zk.getChildren(childname, false, stat);
assertEquals(stat.getCzxid(), stat.getMzxid());
assertEquals(stat.getCzxid(), stat.getPzxid());
assertEquals(stat.getCtime(), stat.getMtime());
assertEquals(0, stat.getCversion());
assertEquals(0, stat.getVersion());
assertEquals(0, stat.getAversion());
assertEquals(zk.getSessionId(), stat.getEphemeralOwner());
assertEquals(childname.length(), stat.getDataLength());
assertEquals(0, stat.getNumChildren());
assertEquals(s.size(), stat.getNumChildren());
}
@Test
public void testChildren() throws IOException, KeeperException, InterruptedException {
String name = "/foo";
zk.create(name, name.getBytes(), Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT);
List<String> children = new ArrayList<String>();
List<String> children_s = new ArrayList<String>();
for (int i = 0; i < 10; i++) {
String childname = name + "/bar" + i;
String childname_s = "bar" + i;
children.add(childname);
children_s.add(childname_s);
}
for (int i = 0; i < children.size(); i++) {
String childname = children.get(i);
zk.create(childname, childname.getBytes(), Ids.OPEN_ACL_UNSAFE, CreateMode.EPHEMERAL);
Stat stat = new Stat();
List<String> s = zk.getChildren(name, false, stat);
assertEquals(stat.getCzxid(), stat.getMzxid());
assertEquals(stat.getCzxid() + i + 1, stat.getPzxid());
assertEquals(stat.getCtime(), stat.getMtime());
assertEquals(i + 1, stat.getCversion());
assertEquals(0, stat.getVersion());
assertEquals(0, stat.getAversion());
assertEquals(0, stat.getEphemeralOwner());
assertEquals(name.length(), stat.getDataLength());
assertEquals(i + 1, stat.getNumChildren());
assertEquals(s.size(), stat.getNumChildren());
}
List<String> p = zk.getChildren(name, false, null);
List<String> c_a = children_s;
List<String> c_b = p;
Collections.sort(c_a);
Collections.sort(c_b);
assertEquals(c_a.size(), 10);
assertEquals(c_a, c_b);
}
}
| {
"pile_set_name": "Github"
} |
[FORMAT "WCOFF"]
[INSTRSET "i486p"]
[BITS 32]
[FILE "api007.nas"]
GLOBAL _api_boxfilwin
[SECTION .text]
_api_boxfilwin: ; void api_boxfilwin(int win, int x0, int y0, int x1, int y1, int col);
PUSH EDI
PUSH ESI
PUSH EBP
PUSH EBX
MOV EDX,7
MOV EBX,[ESP+20] ; win
MOV EAX,[ESP+24] ; x0
MOV ECX,[ESP+28] ; y0
MOV ESI,[ESP+32] ; x1
MOV EDI,[ESP+36] ; y1
MOV EBP,[ESP+40] ; col
INT 0x40
POP EBX
POP EBP
POP ESI
POP EDI
RET
| {
"pile_set_name": "Github"
} |
from __future__ import unicode_literals
from warnings import catch_warnings
from django.contrib.auth.models import User
from django.utils import six, timezone
from djblets.testing.decorators import add_fixtures
from kgb import SpyAgency
from reviewboard.changedescs.models import ChangeDescription
from reviewboard.diffviewer.models import DiffSet
from reviewboard.reviews.errors import PublishError
from reviewboard.reviews.models import (Comment, ReviewRequest,
ReviewRequestDraft)
from reviewboard.reviews.signals import (review_request_reopened,
review_request_reopening)
from reviewboard.scmtools.core import ChangeSet
from reviewboard.testing import TestCase
class ReviewRequestTests(SpyAgency, TestCase):
"""Tests for reviewboard.reviews.models.ReviewRequest."""
fixtures = ['test_users']
@add_fixtures(['test_scmtools'])
def test_can_add_default_reviewers_with_no_repository(self):
"""Testing ReviewRequest.can_add_default_reviewers with no repository
"""
review_request = self.create_review_request()
with self.assertNumQueries(0):
self.assertFalse(review_request.can_add_default_reviewers())
@add_fixtures(['test_scmtools'])
def test_can_add_default_reviewers_with_no_diffs(self):
"""Testing ReviewRequest.can_add_default_reviewers with no existing
diffs
"""
review_request = self.create_review_request(create_repository=True)
with self.assertNumQueries(1):
self.assertTrue(review_request.can_add_default_reviewers())
@add_fixtures(['test_scmtools'])
def test_can_add_default_reviewers_with_diffs(self):
"""Testing ReviewRequest.can_add_default_reviewers with existing diffs
"""
review_request = self.create_review_request(create_repository=True)
self.create_diffset(review_request)
with self.assertNumQueries(1):
self.assertFalse(review_request.can_add_default_reviewers())
def test_get_close_info_returns_correct_information(self):
"""Testing ReviewRequest.get_close_info returns all necessary
information
"""
review_request = self.create_review_request(publish=True)
review_request.close(close_type=ReviewRequest.SUBMITTED,
description='test123', rich_text=True)
close_info = review_request.get_close_info()
self.assertIn('timestamp', close_info)
self.assertLess(close_info['timestamp'], timezone.now())
self.assertIn('close_description', close_info)
self.assertEqual(close_info['close_description'], 'test123')
self.assertIn('is_rich_text', close_info)
self.assertTrue(close_info['is_rich_text'])
def test_get_close_info_timestamp_not_updated_by_reviews(self):
"""Testing ReviewRequest.get_close_info timestamp unnaffected by
subsequent reviews on review requests.
"""
review_request = self.create_review_request(publish=True)
review_request.close(close_type=ReviewRequest.SUBMITTED,
description='test123', rich_text=True)
past_close_info = review_request.get_close_info()
future = past_close_info['timestamp'] + timezone.timedelta(days=10)
review = self.create_review(review_request, publish=True,
timestamp=future)
close_info = review_request.get_close_info()
difference = review.timestamp.date() - close_info['timestamp'].date()
self.assertEqual(difference.days, 10)
self.assertEqual(past_close_info['timestamp'], close_info['timestamp'])
def test_public_with_discard_reopen_submitted(self):
"""Testing ReviewRequest.public when discarded, reopened, submitted"""
user = User.objects.get(username='grumpy')
review_request = self.create_review_request(publish=True,
target_people=[user])
self.assertTrue(review_request.public)
review_request.close(ReviewRequest.DISCARDED)
self.assertTrue(review_request.public)
review_request.reopen()
self.assertFalse(review_request.public)
review_request.publish(review_request.submitter)
review_request.close(ReviewRequest.SUBMITTED)
self.assertTrue(review_request.public)
def test_close_removes_commit_id(self):
"""Testing ReviewRequest.close with discarded removes commit ID"""
review_request = self.create_review_request(publish=True,
commit_id='123')
self.assertEqual(review_request.commit_id, '123')
review_request.close(ReviewRequest.DISCARDED)
self.assertIsNone(review_request.commit_id)
def test_reopen_from_discarded(self):
"""Testing ReviewRequest.reopen from discarded review request"""
review_request = self.create_review_request(publish=True)
self.assertTrue(review_request.public)
review_request.close(ReviewRequest.DISCARDED)
self.spy_on(review_request_reopened.send)
self.spy_on(review_request_reopening.send)
review_request.reopen(user=review_request.submitter)
self.assertFalse(review_request.public)
self.assertEqual(review_request.status, ReviewRequest.PENDING_REVIEW)
draft = review_request.get_draft()
changedesc = draft.changedesc
self.assertEqual(changedesc.fields_changed['status']['old'][0],
ReviewRequest.DISCARDED)
self.assertEqual(changedesc.fields_changed['status']['new'][0],
ReviewRequest.PENDING_REVIEW)
# Test that the signals were emitted correctly.
self.assertTrue(review_request_reopening.send.spy.last_called_with(
sender=ReviewRequest,
user=review_request.submitter,
review_request=review_request))
self.assertTrue(review_request_reopened.send.spy.last_called_with(
sender=ReviewRequest,
user=review_request.submitter,
review_request=review_request,
old_status=ReviewRequest.DISCARDED,
old_public=True))
def test_reopen_from_submitted(self):
"""Testing ReviewRequest.reopen from submitted review request"""
review_request = self.create_review_request(publish=True)
self.assertTrue(review_request.public)
review_request.close(ReviewRequest.SUBMITTED)
self.spy_on(review_request_reopened.send)
self.spy_on(review_request_reopening.send)
review_request.reopen(user=review_request.submitter)
self.assertTrue(review_request.public)
self.assertEqual(review_request.status, ReviewRequest.PENDING_REVIEW)
changedesc = review_request.changedescs.latest()
self.assertEqual(changedesc.fields_changed['status']['old'][0],
ReviewRequest.SUBMITTED)
self.assertEqual(changedesc.fields_changed['status']['new'][0],
ReviewRequest.PENDING_REVIEW)
self.assertTrue(review_request_reopening.send.spy.last_called_with(
sender=ReviewRequest,
user=review_request.submitter,
review_request=review_request))
self.assertTrue(review_request_reopened.send.spy.last_called_with(
sender=ReviewRequest,
user=review_request.submitter,
review_request=review_request,
old_status=ReviewRequest.SUBMITTED,
old_public=True))
def test_changenum_against_changenum_and_commit_id(self):
"""Testing create ReviewRequest with changenum against both changenum
and commit_id
"""
changenum = 123
review_request = self.create_review_request(publish=True,
changenum=changenum)
review_request = ReviewRequest.objects.get(pk=review_request.id)
self.assertEqual(review_request.changenum, changenum)
self.assertIsNone(review_request.commit_id)
@add_fixtures(['test_scmtools'])
def test_changeset_update_commit_id(self):
"""Testing ReviewRequest.changeset_is_pending update commit ID
behavior
"""
current_commit_id = '123'
new_commit_id = '124'
review_request = self.create_review_request(
publish=True,
commit_id=current_commit_id,
create_repository=True)
draft = ReviewRequestDraft.create(review_request)
self.assertEqual(review_request.commit_id, current_commit_id)
self.assertEqual(draft.commit_id, current_commit_id)
def _get_fake_changeset(scmtool, commit_id, allow_empty=True):
self.assertEqual(commit_id, current_commit_id)
changeset = ChangeSet()
changeset.pending = False
changeset.changenum = int(new_commit_id)
return changeset
scmtool = review_request.repository.get_scmtool()
scmtool.supports_pending_changesets = True
self.spy_on(scmtool.get_changeset,
call_fake=_get_fake_changeset)
self.spy_on(review_request.repository.get_scmtool,
call_fake=lambda x: scmtool)
is_pending, new_commit_id = \
review_request.changeset_is_pending(current_commit_id)
self.assertEqual(is_pending, False)
self.assertEqual(new_commit_id, new_commit_id)
review_request = ReviewRequest.objects.get(pk=review_request.pk)
self.assertEqual(review_request.commit_id, new_commit_id)
draft = review_request.get_draft()
self.assertEqual(draft.commit_id, new_commit_id)
def test_unicode_summary_and_str(self):
"""Testing ReviewRequest.__str__ with unicode summaries."""
review_request = self.create_review_request(
summary='\u203e\u203e', publish=True)
self.assertEqual(six.text_type(review_request), '\u203e\u203e')
def test_discard_unpublished_private(self):
"""Testing ReviewRequest.close with private requests on discard
to ensure changes from draft are copied over
"""
review_request = self.create_review_request(
publish=False,
public=False)
self.assertFalse(review_request.public)
self.assertNotEqual(review_request.status, ReviewRequest.DISCARDED)
draft = ReviewRequestDraft.create(review_request)
summary = 'Test summary'
description = 'Test description'
testing_done = 'Test testing done'
draft.summary = summary
draft.description = description
draft.testing_done = testing_done
draft.save()
review_request.close(ReviewRequest.DISCARDED)
self.assertEqual(review_request.summary, summary)
self.assertEqual(review_request.description, description)
self.assertEqual(review_request.testing_done, testing_done)
def test_discard_unpublished_public(self):
"""Testing ReviewRequest.close with public requests on discard
to ensure changes from draft are not copied over
"""
review_request = self.create_review_request(
publish=False,
public=True)
self.assertTrue(review_request.public)
self.assertNotEqual(review_request.status, ReviewRequest.DISCARDED)
draft = ReviewRequestDraft.create(review_request)
summary = 'Test summary'
description = 'Test description'
testing_done = 'Test testing done'
draft.summary = summary
draft.description = description
draft.testing_done = testing_done
draft.save()
review_request.close(ReviewRequest.DISCARDED)
self.assertNotEqual(review_request.summary, summary)
self.assertNotEqual(review_request.description, description)
self.assertNotEqual(review_request.testing_done, testing_done)
def test_publish_changedesc_none(self):
"""Testing ReviewRequest.publish on a new request to ensure there are
no change descriptions
"""
review_request = self.create_review_request(publish=True)
review_request.publish(review_request.submitter)
with self.assertRaises(ChangeDescription.DoesNotExist):
review_request.changedescs.filter(public=True).latest()
def test_submit_nonpublic(self):
"""Testing ReviewRequest.close with non-public requests to ensure state
transitions to SUBMITTED from non-public review request is not allowed
"""
review_request = self.create_review_request(public=False)
with self.assertRaises(PublishError):
review_request.close(ReviewRequest.SUBMITTED)
def test_submit_public(self):
"""Testing ReviewRequest.close with public requests to ensure
public requests can be transferred to SUBMITTED
"""
review_request = self.create_review_request(public=True)
review_request.close(ReviewRequest.SUBMITTED)
def test_determine_user_for_review_request(self):
"""Testing ChangeDescription.get_user for change descriptions for
review requests
"""
review_request = self.create_review_request(publish=True)
doc = review_request.submitter
grumpy = User.objects.get(username='grumpy')
change1 = ChangeDescription()
change1.record_field_change('foo', ['bar'], ['baz'])
change1.save()
review_request.changedescs.add(change1)
change2 = ChangeDescription()
change2.record_field_change('submitter', doc, grumpy, 'username')
change2.save()
review_request.changedescs.add(change2)
change3 = ChangeDescription()
change3.record_field_change('foo', ['bar'], ['baz'])
change3.save()
review_request.changedescs.add(change3)
change4 = ChangeDescription()
change4.record_field_change('submitter', grumpy, doc, 'username')
change4.save()
review_request.changedescs.add(change4)
self.assertIsNone(change1.user)
self.assertIsNone(change2.user)
self.assertIsNone(change3.user)
self.assertIsNone(change4.user)
self.assertEqual(change1.get_user(review_request), doc)
self.assertEqual(change2.get_user(review_request), doc)
self.assertEqual(change3.get_user(review_request), grumpy)
self.assertEqual(change4.get_user(review_request), grumpy)
self.assertEqual(change1.user, doc)
self.assertEqual(change2.user, doc)
self.assertEqual(change3.user, grumpy)
self.assertEqual(change4.user, grumpy)
@add_fixtures(['test_scmtools'])
def test_last_updated(self):
"""Testing ReviewRequest.last_updated stays in sync with
Review.timestamp when a review is published
"""
review_request = self.create_review_request(create_repository=True,
publish=True)
diffset = self.create_diffset(review_request)
filediff = self.create_filediff(diffset)
review1 = self.create_review(review_request, publish=True)
self.assertEqual(review_request.last_updated, review1.timestamp)
review2 = self.create_review(review_request, publish=True)
self.assertEqual(review_request.last_updated, review2.timestamp)
# Create a diff review.
diff_review = self.create_review(review_request)
self.create_diff_comment(diff_review, filediff)
diff_review.publish()
self.assertEqual(review_request.last_updated, diff_review.timestamp)
@add_fixtures(['test_scmtools'])
def test_create_with_history_and_commit_id(self):
"""Testing ReviewRequest.objects.create when create_with_history=True
and create_from_commit_id=True
"""
user = User.objects.get(username='doc')
repository = self.create_repository()
msg = ('create_from_commit_id and create_with_history cannot both be '
'set to True.')
with self.assertRaisesMessage(ValueError, msg):
ReviewRequest.objects.create(repository=repository,
user=user,
commit_id='0' * 40,
create_from_commit_id=True,
create_with_history=True)
@add_fixtures(['test_scmtools'])
def test_created_with_history_cannot_change_when_true(self):
"""Testing ReviewRequest.created_with_history cannot change after
creation when False
"""
user = User.objects.get(username='doc')
repository = self.create_repository()
review_request = ReviewRequest.objects.create(repository=repository,
user=user)
self.assertFalse(review_request.created_with_history)
msg = ('created_with_history cannot be changed once the review '
'request has been created.')
with self.assertRaisesMessage(ValueError, msg):
review_request.created_with_history = True
@add_fixtures(['test_scmtools'])
def test_created_with_history_cannot_change_when_false(self):
"""Testing ReviewRequest.created_with_history cannot change after
creation when True
"""
user = User.objects.get(username='doc')
repository = self.create_repository()
review_request = ReviewRequest.objects.create(repository=repository,
user=user,
create_with_history=True)
self.assertTrue(review_request.created_with_history)
msg = ('created_with_history cannot be changed once the review '
'request has been created.')
with self.assertRaisesMessage(ValueError, msg):
review_request.created_with_history = False
def test_review_participants_with_reviews(self):
"""Testing ReviewRequest.review_participants with reviews"""
user1 = User.objects.create_user(username='aaa',
email='[email protected]')
user2 = User.objects.create_user(username='bbb',
email='[email protected]')
user3 = User.objects.create_user(username='ccc',
email='[email protected]')
user4 = User.objects.create_user(username='ddd',
email='[email protected]')
review_request = self.create_review_request(publish=True)
review1 = self.create_review(review_request,
user=user1,
publish=True)
self.create_reply(review1, user=user2, public=True)
self.create_reply(review1, user=user1, public=True)
review2 = self.create_review(review_request,
user=user3,
publish=True)
self.create_reply(review2, user=user4, public=False)
self.create_reply(review2, user=user3, public=True)
self.create_reply(review2, user=user2, public=True)
self.create_review(review_request, user=user4)
with self.assertNumQueries(2):
self.assertEqual(review_request.review_participants,
{user1, user2, user3})
def test_review_participants_with_no_reviews(self):
"""Testing ReviewRequest.review_participants with no reviews"""
review_request = self.create_review_request(publish=True)
with self.assertNumQueries(1):
self.assertEqual(review_request.review_participants, set())
def test_is_accessible_by_with_draft_and_owner(self):
"""Testing ReviewRequest.is_accessible_by with draft and owner"""
review_request = self.create_review_request()
self.assertTrue(review_request.is_accessible_by(review_request.owner))
def test_is_accessible_by_with_draft_and_non_owner(self):
"""Testing ReviewRequest.is_accessible_by with draft and non-owner"""
user = self.create_user()
review_request = self.create_review_request()
self.assertFalse(review_request.is_accessible_by(user))
def test_is_accessible_by_with_draft_and_superuser(self):
"""Testing ReviewRequest.is_accessible_by with draft and superuser"""
user = self.create_user(is_superuser=True)
review_request = self.create_review_request()
self.assertTrue(review_request.is_accessible_by(user))
@add_fixtures(['test_scmtools'])
def test_is_accessible_by_with_private_repo_no_member(self):
"""Testing ReviewRequest.is_accessible_by with private repository
and user not a member
"""
user = self.create_user()
repository = self.create_repository(public=False)
review_request = self.create_review_request(repository=repository,
publish=True)
self.assertFalse(review_request.is_accessible_by(user))
@add_fixtures(['test_scmtools'])
def test_is_accessible_by_with_private_repo_member(self):
"""Testing ReviewRequest.is_accessible_by with private repository
and user is a member
"""
user = self.create_user()
repository = self.create_repository(public=False)
repository.users.add(user)
review_request = self.create_review_request(repository=repository,
publish=True)
self.assertTrue(review_request.is_accessible_by(user))
@add_fixtures(['test_scmtools'])
def test_is_accessible_by_with_private_repo_member_by_group(self):
"""Testing ReviewRequest.is_accessible_by with private repository
and user is a member by group
"""
user = self.create_user()
group = self.create_review_group(invite_only=True)
group.users.add(user)
repository = self.create_repository(public=False)
repository.review_groups.add(group)
review_request = self.create_review_request(repository=repository,
publish=True)
self.assertTrue(review_request.is_accessible_by(user))
def test_is_accessible_by_with_invite_only_group_and_not_member(self):
"""Testing ReviewRequest.is_accessible_by with invite-only group and
user is not a member
"""
user = self.create_user()
group = self.create_review_group(invite_only=True)
review_request = self.create_review_request(publish=True)
review_request.target_groups.add(group)
self.assertFalse(review_request.is_accessible_by(user))
def test_is_accessible_by_with_invite_only_group_and_member(self):
"""Testing ReviewRequest.is_accessible_by with invite-only group and
user is a member
"""
user = self.create_user()
group = self.create_review_group(invite_only=True)
group.users.add(user)
review_request = self.create_review_request(publish=True)
review_request.target_groups.add(group)
self.assertTrue(review_request.is_accessible_by(user))
class GetLastActivityInfoTests(TestCase):
"""Unit tests for ReviewRequest.get_last_activity_info"""
fixtures = ['test_scmtools', 'test_users']
def setUp(self):
super(GetLastActivityInfoTests, self).setUp()
doc = User.objects.get(username='doc')
self.review_request = self.create_review_request(
create_repository=True,
publish=True,
target_people=[doc])
def test_get_last_activity_info(self):
"""Testing ReviewRequest.get_last_activity_info"""
self.assertEqual(
self.review_request.get_last_activity_info(),
{
'changedesc': None,
'timestamp': self.review_request.last_updated,
'updated_object': self.review_request,
})
def test_get_last_activity_info_draft(self):
"""Testing ReviewRequest.get_last_activity_info after updating the
draft
"""
draft = ReviewRequestDraft.create(self.review_request)
draft.summary = 'A new summary appears'
draft.save()
self.assertEqual(
self.review_request.get_last_activity_info(),
{
'changedesc': None,
'timestamp': self.review_request.last_updated,
'updated_object': self.review_request,
})
def test_get_last_activity_info_update(self):
"""Testing ReviewRequest.get_last_activity_info after an update"""
draft = ReviewRequestDraft.create(self.review_request)
draft.summary = 'A new summary appears'
draft.save()
self.review_request = ReviewRequest.objects.get(
pk=self.review_request.pk)
self.review_request.publish(user=self.review_request.submitter)
changedesc = self.review_request.changedescs.latest()
self.assertEqual(
self.review_request.get_last_activity_info(),
{
'changedesc': changedesc,
'timestamp': changedesc.timestamp,
'updated_object': self.review_request,
})
def test_get_last_activity_info_diff_update(self):
"""Testing ReviewRequest.get_last_activity_info after a diff update"""
diffset = self.create_diffset(review_request=self.review_request,
draft=True)
self.review_request.publish(user=self.review_request.submitter)
diffset = DiffSet.objects.get(pk=diffset.pk)
self.assertEqual(
self.review_request.get_last_activity_info(),
{
'changedesc': self.review_request.changedescs.latest(),
'timestamp': diffset.timestamp,
'updated_object': diffset,
})
def test_get_last_activity_info_review(self):
"""Testing ReviewRequest.get_last_activity_info after a review"""
review = self.create_review(review_request=self.review_request,
publish=True)
self.assertEqual(
self.review_request.get_last_activity_info(),
{
'changedesc': None,
'timestamp': review.timestamp,
'updated_object': review,
})
def test_get_last_activity_info_review_reply(self):
"""Testing ReviewRequest.get_last_activity_info after a review and
a reply
"""
review = self.create_review(review_request=self.review_request,
publish=True)
reply = self.create_reply(review=review, publish=True)
self.assertEqual(
self.review_request.get_last_activity_info(),
{
'changedesc': None,
'timestamp': reply.timestamp,
'updated_object': reply,
})
def test_get_last_activity_info_update_and_review(self):
"""Testing ReviewRequest.get_last_activity_info after an update and a
review
"""
draft = ReviewRequestDraft.create(self.review_request)
draft.summary = 'A new summary appears'
draft.save()
# self.review_request = ReviewRequest.objects.get(
# pk=self.review_request.pk)
self.review_request.publish(user=self.review_request.submitter)
review = self.create_review(review_request=self.review_request,
publish=True)
self.assertEqual(
self.review_request.get_last_activity_info(),
{
'changedesc': None,
'timestamp': review.timestamp,
'updated_object': review,
})
class IssueCounterTests(TestCase):
"""Unit tests for review request issue counters."""
fixtures = ['test_users']
def setUp(self):
super(IssueCounterTests, self).setUp()
self.review_request = self.create_review_request(publish=True)
self.assertEqual(self.review_request.issue_open_count, 0)
self.assertEqual(self.review_request.issue_resolved_count, 0)
self.assertEqual(self.review_request.issue_dropped_count, 0)
self.assertEqual(self.review_request.issue_verifying_count, 0)
self._reset_counts()
@add_fixtures(['test_scmtools'])
def test_init_with_diff_comments(self):
"""Testing ReviewRequest issue counter initialization from diff
comments
"""
self.review_request.repository = self.create_repository()
diffset = self.create_diffset(self.review_request)
filediff = self.create_filediff(diffset)
self._test_issue_counts(
lambda review, issue_opened: self.create_diff_comment(
review, filediff, issue_opened=issue_opened))
def test_init_with_file_attachment_comments(self):
"""Testing ReviewRequest issue counter initialization from file
attachment comments
"""
file_attachment = self.create_file_attachment(self.review_request)
self._test_issue_counts(
lambda review, issue_opened: self.create_file_attachment_comment(
review, file_attachment, issue_opened=issue_opened))
def test_init_with_general_comments(self):
"""Testing ReviewRequest issue counter initialization from general
comments
"""
self._test_issue_counts(
lambda review, issue_opened: self.create_general_comment(
review, issue_opened=issue_opened))
def test_init_with_screenshot_comments(self):
"""Testing ReviewRequest issue counter initialization from screenshot
comments
"""
screenshot = self.create_screenshot(self.review_request)
self._test_issue_counts(
lambda review, issue_opened: self.create_screenshot_comment(
review, screenshot, issue_opened=issue_opened))
@add_fixtures(['test_scmtools'])
def test_init_with_mix(self):
"""Testing ReviewRequest issue counter initialization from multiple
types of comments at once
"""
# The initial implementation for issue status counting broke when
# there were multiple types of comments on a review (such as diff
# comments and file attachment comments). There would be an
# artificially large number of issues reported.
#
# That's been fixed, and this test is ensuring that it doesn't
# regress.
self.review_request.repository = self.create_repository()
diffset = self.create_diffset(self.review_request)
filediff = self.create_filediff(diffset)
file_attachment = self.create_file_attachment(self.review_request)
screenshot = self.create_screenshot(self.review_request)
review = self.create_review(self.review_request)
# One open file attachment comment
self.create_file_attachment_comment(review, file_attachment,
issue_opened=True)
# Two diff comments
self.create_diff_comment(review, filediff, issue_opened=True)
self.create_diff_comment(review, filediff, issue_opened=True)
# Four screenshot comments
self.create_screenshot_comment(review, screenshot, issue_opened=True)
self.create_screenshot_comment(review, screenshot, issue_opened=True)
self.create_screenshot_comment(review, screenshot, issue_opened=True)
self.create_screenshot_comment(review, screenshot, issue_opened=True)
# Three open general comments
self.create_general_comment(review, issue_opened=True)
self.create_general_comment(review, issue_opened=True)
self.create_general_comment(review, issue_opened=True)
# The issue counts should be end up being 0, since they'll initialize
# during load.
self._reload_object(clear_counters=True)
self.assertEqual(self.review_request.issue_open_count, 0)
self.assertEqual(self.review_request.issue_resolved_count, 0)
self.assertEqual(self.review_request.issue_dropped_count, 0)
self.assertEqual(self.review_request.issue_verifying_count, 0)
# Now publish. We should have 10 open issues, by way of incrementing
# during publish.
review.publish()
self._reload_object()
self.assertEqual(self.review_request.issue_open_count, 10)
self.assertEqual(self.review_request.issue_dropped_count, 0)
self.assertEqual(self.review_request.issue_resolved_count, 0)
self.assertEqual(self.review_request.issue_verifying_count, 0)
# Make sure we get the same number back when initializing counters.
self._reload_object(clear_counters=True)
self.assertEqual(self.review_request.issue_open_count, 10)
self.assertEqual(self.review_request.issue_dropped_count, 0)
self.assertEqual(self.review_request.issue_resolved_count, 0)
self.assertEqual(self.review_request.issue_verifying_count, 0)
def test_init_file_attachment_comment_with_replies(self):
"""Testing ReviewRequest file attachment comment issue counter
initialization and replies
"""
file_attachment = self.create_file_attachment(self.review_request)
review = self.create_review(self.review_request)
comment = self.create_file_attachment_comment(review, file_attachment,
issue_opened=True)
review.publish()
reply = self.create_reply(review)
self.create_file_attachment_comment(reply, file_attachment,
reply_to=comment,
issue_opened=True)
reply.publish()
self._reload_object(clear_counters=True)
self.assertEqual(self.review_request.issue_open_count, 1)
self.assertEqual(self.review_request.issue_resolved_count, 0)
self.assertEqual(self.review_request.issue_dropped_count, 0)
def test_init_general_comment_with_replies(self):
"""Testing ReviewRequest general comment issue counter initialization
and replies
"""
review = self.create_review(self.review_request)
comment = self.create_general_comment(review, issue_opened=True)
review.publish()
reply = self.create_reply(review)
self.create_general_comment(reply, reply_to=comment,
issue_opened=True)
reply.publish()
self._reload_object(clear_counters=True)
self.assertEqual(self.review_request.issue_open_count, 1)
self.assertEqual(self.review_request.issue_resolved_count, 0)
self.assertEqual(self.review_request.issue_dropped_count, 0)
def test_save_reply_comment_to_file_attachment_comment(self):
"""Testing ReviewRequest file attachment comment issue counter and
saving reply comments
"""
file_attachment = self.create_file_attachment(self.review_request)
review = self.create_review(self.review_request)
comment = self.create_file_attachment_comment(review, file_attachment,
issue_opened=True)
review.publish()
self._reload_object(clear_counters=True)
self.assertEqual(self.review_request.issue_open_count, 1)
self.assertEqual(self.review_request.issue_resolved_count, 0)
self.assertEqual(self.review_request.issue_dropped_count, 0)
reply = self.create_reply(review)
reply_comment = self.create_file_attachment_comment(
reply, file_attachment,
reply_to=comment,
issue_opened=True)
reply.publish()
self._reload_object()
self.assertEqual(self.review_request.issue_open_count, 1)
self.assertEqual(self.review_request.issue_resolved_count, 0)
self.assertEqual(self.review_request.issue_dropped_count, 0)
reply_comment.save()
self._reload_object()
self.assertEqual(self.review_request.issue_open_count, 1)
self.assertEqual(self.review_request.issue_resolved_count, 0)
self.assertEqual(self.review_request.issue_dropped_count, 0)
def test_save_reply_comment_to_general_comment(self):
"""Testing ReviewRequest general comment issue counter and saving
reply comments.
"""
review = self.create_review(self.review_request)
comment = self.create_general_comment(review, issue_opened=True)
review.publish()
self._reload_object(clear_counters=True)
self.assertEqual(self.review_request.issue_open_count, 1)
self.assertEqual(self.review_request.issue_resolved_count, 0)
self.assertEqual(self.review_request.issue_dropped_count, 0)
reply = self.create_reply(review)
reply_comment = self.create_general_comment(
reply, reply_to=comment, issue_opened=True)
reply.publish()
self._reload_object()
self.assertEqual(self.review_request.issue_open_count, 1)
self.assertEqual(self.review_request.issue_resolved_count, 0)
self.assertEqual(self.review_request.issue_dropped_count, 0)
reply_comment.save()
self._reload_object()
self.assertEqual(self.review_request.issue_open_count, 1)
self.assertEqual(self.review_request.issue_resolved_count, 0)
self.assertEqual(self.review_request.issue_dropped_count, 0)
def _test_issue_counts(self, create_comment_func):
review = self.create_review(self.review_request)
# One comment without an issue opened.
create_comment_func(review, issue_opened=False)
# One comment without an issue opened, which will have its
# status set to a valid status, while closed.
closed_with_status_comment = \
create_comment_func(review, issue_opened=False)
# Three comments with an issue opened.
for i in range(3):
create_comment_func(review, issue_opened=True)
# Two comments that will have their issues dropped.
dropped_comments = [
create_comment_func(review, issue_opened=True)
for i in range(2)
]
# One comment that will have its issue resolved.
resolved_comments = [
create_comment_func(review, issue_opened=True)
]
# One comment will be in Verifying Dropped mode.
verify_dropped_comments = [
create_comment_func(review, issue_opened=True)
]
# Two comments will be in Verifying Resolved mode.
verify_resolved_comments = [
create_comment_func(review, issue_opened=True)
for i in range(2)
]
# The issue counts should be end up being 0, since they'll initialize
# during load.
self._reload_object(clear_counters=True)
self.assertEqual(self.review_request.issue_open_count, 0)
self.assertEqual(self.review_request.issue_resolved_count, 0)
self.assertEqual(self.review_request.issue_dropped_count, 0)
self.assertEqual(self.review_request.issue_verifying_count, 0)
# Now publish. We should have 6 open issues, by way of incrementing
# during publish.
review.publish()
self._reload_object()
self.assertEqual(self.review_request.issue_open_count, 9)
self.assertEqual(self.review_request.issue_dropped_count, 0)
self.assertEqual(self.review_request.issue_resolved_count, 0)
self.assertEqual(self.review_request.issue_verifying_count, 0)
# Make sure we get the same number back when initializing counters.
self._reload_object(clear_counters=True)
self.assertEqual(self.review_request.issue_open_count, 9)
self.assertEqual(self.review_request.issue_dropped_count, 0)
self.assertEqual(self.review_request.issue_resolved_count, 0)
self.assertEqual(self.review_request.issue_verifying_count, 0)
# Set the issue statuses.
for comment in dropped_comments:
comment.issue_status = Comment.DROPPED
comment.save()
for comment in resolved_comments:
comment.issue_status = Comment.RESOLVED
comment.save()
for comment in verify_resolved_comments:
comment.issue_status = Comment.VERIFYING_RESOLVED
comment.save()
for comment in verify_dropped_comments:
comment.issue_status = Comment.VERIFYING_DROPPED
comment.save()
closed_with_status_comment.issue_status = Comment.OPEN
closed_with_status_comment.save()
self._reload_object()
self.assertEqual(self.review_request.issue_open_count, 3)
self.assertEqual(self.review_request.issue_dropped_count, 2)
self.assertEqual(self.review_request.issue_resolved_count, 1)
self.assertEqual(self.review_request.issue_verifying_count, 3)
# Make sure we get the same number back when initializing counters.
self._reload_object(clear_counters=True)
self.assertEqual(self.review_request.issue_open_count, 3)
self.assertEqual(self.review_request.issue_dropped_count, 2)
self.assertEqual(self.review_request.issue_resolved_count, 1)
self.assertEqual(self.review_request.issue_verifying_count, 3)
def _reload_object(self, clear_counters=False):
if clear_counters:
# 3 queries: One for the review request fetch, one for
# the issue status load, and one for updating the issue counts.
expected_query_count = 3
self._reset_counts()
else:
# One query for the review request fetch.
expected_query_count = 1
with self.assertNumQueries(expected_query_count):
self.review_request = \
ReviewRequest.objects.get(pk=self.review_request.pk)
def _reset_counts(self):
self.review_request.issue_open_count = None
self.review_request.issue_resolved_count = None
self.review_request.issue_dropped_count = None
self.review_request.issue_verifying_count = None
self.review_request.save()
class ApprovalTests(TestCase):
"""Unit tests for ReviewRequest approval logic."""
fixtures = ['test_users']
def setUp(self):
super(ApprovalTests, self).setUp()
self.review_request = self.create_review_request(publish=True)
def test_approval_states_ship_it(self):
"""Testing ReviewRequest default approval logic with Ship It"""
self.create_review(self.review_request, ship_it=True, publish=True)
self.assertTrue(self.review_request.approved)
self.assertIsNone(self.review_request.approval_failure)
def test_approval_states_no_ship_its(self):
"""Testing ReviewRequest default approval logic with no Ship-Its"""
self.create_review(self.review_request, ship_it=False, publish=True)
self.assertFalse(self.review_request.approved)
self.assertEqual(self.review_request.approval_failure,
'The review request has not been marked "Ship It!"')
def test_approval_states_open_issues(self):
"""Testing ReviewRequest default approval logic with open issues"""
review = self.create_review(self.review_request, ship_it=True)
self.create_general_comment(review, issue_opened=True)
review.publish()
self.review_request.reload_issue_open_count()
self.assertFalse(self.review_request.approved)
self.assertEqual(self.review_request.approval_failure,
'The review request has open issues.')
def test_approval_states_unverified_issues(self):
"""Testing ReviewRequest default approval logic with unverified issues
"""
review = self.create_review(self.review_request, ship_it=True)
comment = self.create_general_comment(review, issue_opened=True)
review.publish()
comment.issue_status = Comment.VERIFYING_RESOLVED
comment.save()
self.review_request.reload_issue_open_count()
self.review_request.reload_issue_verifying_count()
self.assertFalse(self.review_request.approved)
self.assertEqual(self.review_request.approval_failure,
'The review request has unverified issues.')
| {
"pile_set_name": "Github"
} |
var test = require('tape')
var through = require('../')
// must emit end before close.
test('buffering', function(assert) {
var ts = through(function (data) {
this.queue(data)
}, function () {
this.queue(null)
})
var ended = false, actual = []
ts.on('data', actual.push.bind(actual))
ts.on('end', function () {
ended = true
})
ts.write(1)
ts.write(2)
ts.write(3)
assert.deepEqual(actual, [1, 2, 3])
ts.pause()
ts.write(4)
ts.write(5)
ts.write(6)
assert.deepEqual(actual, [1, 2, 3])
ts.resume()
assert.deepEqual(actual, [1, 2, 3, 4, 5, 6])
ts.pause()
ts.end()
assert.ok(!ended)
ts.resume()
assert.ok(ended)
assert.end()
})
test('buffering has data in queue, when ends', function (assert) {
/*
* If stream ends while paused with data in the queue,
* stream should still emit end after all data is written
* on resume.
*/
var ts = through(function (data) {
this.queue(data)
}, function () {
this.queue(null)
})
var ended = false, actual = []
ts.on('data', actual.push.bind(actual))
ts.on('end', function () {
ended = true
})
ts.pause()
ts.write(1)
ts.write(2)
ts.write(3)
ts.end()
assert.deepEqual(actual, [], 'no data written yet, still paused')
assert.ok(!ended, 'end not emitted yet, still paused')
ts.resume()
assert.deepEqual(actual, [1, 2, 3], 'resumed, all data should be delivered')
assert.ok(ended, 'end should be emitted once all data was delivered')
assert.end();
})
| {
"pile_set_name": "Github"
} |
// TODO:
// * Lisada timeout serveri ühenduse jaoks
var Stream = require("stream").Stream,
utillib = require("util"),
net = require("net"),
tls = require("tls"),
oslib = require("os"),
starttls = require("./starttls").starttls,
xoauth2 = require("xoauth2"),
crypto = require("crypto");
// monkey patch net and tls to support nodejs 0.4
if(!net.connect && net.createConnection){
net.connect = net.createConnection;
}
if(!tls.connect && tls.createConnection){
tls.connect = tls.createConnection;
}
// expose to the world
module.exports = function(port, host, options){
var connection = new SMTPClient(port, host, options);
process.nextTick(connection.connect.bind(connection));
return connection;
};
/**
* <p>Generates a SMTP connection object</p>
*
* <p>Optional options object takes the following possible properties:</p>
* <ul>
* <li><b>secureConnection</b> - use SSL</li>
* <li><b>name</b> - the name of the client server</li>
* <li><b>auth</b> - authentication object <code>{user:"...", pass:"..."}</code>
* <li><b>ignoreTLS</b> - ignore server support for STARTTLS</li>
* <li><b>debug</b> - output client and server messages to console</li>
* <li><b>instanceId</b> - unique instance id for debugging</li>
* </ul>
*
* @constructor
* @namespace SMTP Client module
* @param {Number} [port=25] Port number to connect to
* @param {String} [host="localhost"] Hostname to connect to
* @param {Object} [options] Option properties
*/
function SMTPClient(port, host, options){
Stream.call(this);
this.writable = true;
this.readable = true;
this.options = options || {};
this.port = port || (this.options.secureConnection ? 465 : 25);
this.host = host || "localhost";
this.options.secureConnection = !!this.options.secureConnection;
this.options.auth = this.options.auth || false;
this.options.maxConnections = this.options.maxConnections || 5;
if(!this.options.name){
// defaul hostname is machine hostname or [IP]
var defaultHostname = (oslib.hostname && oslib.hostname()) ||
(oslib.getHostname && oslib.getHostname()) ||
"";
if(defaultHostname.indexOf('.')<0){
defaultHostname = "[127.0.0.1]";
}
if(defaultHostname.match(/^\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}$/)){
defaultHostname = "["+defaultHostname+"]";
}
this.options.name = defaultHostname;
}
this._init();
}
utillib.inherits(SMTPClient, Stream);
/**
* <p>Initializes instance variables</p>
*/
SMTPClient.prototype._init = function(){
/**
* Defines if the current connection is secure or not. If not,
* STARTTLS can be used if available
* @private
*/
this._secureMode = false;
/**
* Ignore incoming data on TLS negotiation
* @private
*/
this._ignoreData = false;
/**
* Store incomplete messages coming from the server
* @private
*/
this._remainder = "";
/**
* If set to true, then this object is no longer active
* @private
*/
this.destroyed = false;
/**
* The socket connecting to the server
* @publick
*/
this.socket = false;
/**
* Lists supported auth mechanisms
* @private
*/
this._supportedAuth = [];
/**
* Currently in data transfer state
* @private
*/
this._dataMode = false;
/**
* Keep track if the client sends a leading \r\n in data mode
* @private
*/
this._lastDataBytes = new Buffer(2);
/**
* Function to run if a data chunk comes from the server
* @private
*/
this._currentAction = false;
if(this.options.ignoreTLS || this.options.secureConnection){
this._secureMode = true;
}
/**
* XOAuth2 token generator if XOAUTH2 auth is used
* @private
*/
this._xoauth2 = false;
if(typeof this.options.auth.XOAuth2 == "object" && typeof this.options.auth.XOAuth2.getToken == "function"){
this._xoauth2 = this.options.auth.XOAuth2;
}else if(typeof this.options.auth.XOAuth2 == "object"){
if(!this.options.auth.XOAuth2.user && this.options.auth.user){
this.options.auth.XOAuth2.user = this.options.auth.user;
}
this._xoauth2 = xoauth2.createXOAuth2Generator(this.options.auth.XOAuth2);
}
};
/**
* <p>Creates a connection to a SMTP server and sets up connection
* listener</p>
*/
SMTPClient.prototype.connect = function(){
if(this.options.secureConnection){
this.socket = tls.connect(this.port, this.host, {}, this._onConnect.bind(this));
}else{
this.socket = net.connect(this.port, this.host);
this.socket.on("connect", this._onConnect.bind(this));
}
this.socket.on("error", this._onError.bind(this));
};
/**
* <p>Upgrades the connection to TLS</p>
*
* @param {Function} callback Callbac function to run when the connection
* has been secured
*/
SMTPClient.prototype._upgradeConnection = function(callback){
this._ignoreData = true;
starttls(this.socket, (function(socket){
this.socket = socket;
this._ignoreData = false;
this._secureMode = true;
this.socket.on("data", this._onData.bind(this));
return callback(null, true);
}).bind(this));
};
/**
* <p>Connection listener that is run when the connection to
* the server is opened</p>
*
* @event
*/
SMTPClient.prototype._onConnect = function(){
if("setKeepAlive" in this.socket){
this.socket.setKeepAlive(true);
}else if(this.socket.encrypted && "setKeepAlive" in this.socket.encrypted){
this.socket.encrypted.setKeepAlive(true); // secure connection
}
this.socket.on("data", this._onData.bind(this));
this.socket.on("close", this._onClose.bind(this));
this.socket.on("end", this._onEnd.bind(this));
this.socket.setTimeout(3 * 3600 * 1000); // 1 hours
this.socket.on("timeout", this._onTimeout.bind(this));
this._currentAction = this._actionGreeting;
};
/**
* <p>Destroys the client - removes listeners etc.</p>
*/
SMTPClient.prototype._destroy = function(){
if(this._destroyed)return;
this._destroyed = true;
this._ignoreData = true;
this.emit("end");
this.removeAllListeners();
};
/**
* <p>'data' listener for data coming from the server</p>
*
* @event
* @param {Buffer} chunk Data chunk coming from the server
*/
SMTPClient.prototype._onData = function(chunk){
var str;
if(this._ignoreData || !chunk || !chunk.length){
return;
}
// Wait until end of line
if(chunk[chunk.length-1] != 0x0A){
this._remainder += chunk.toString();
return;
}else{
str = (this._remainder + chunk.toString()).trim();
this._remainder = "";
}
// if this is a multi line reply, wait until the ending
if(str.match(/(?:^|\n)\d{3}-.+$/)){
this._remainder = str;
return;
}
if(this.options.debug){
console.log("SERVER"+(this.options.instanceId?" "+
this.options.instanceId:"")+":\n└──"+str.replace(/\r?\n/g,"\n "));
}
if(typeof this._currentAction == "function"){
this._currentAction.call(this, str);
}
};
/**
* <p>'error' listener for the socket</p>
*
* @event
* @param {Error} err Error object
* @param {String} type Error name
*/
SMTPClient.prototype._onError = function(err, type, data){
if(type && type != "Error"){
err.name = type;
}
if(data){
err.data = data;
}
this.emit("error", err);
this.close();
};
/**
* <p>'close' listener for the socket</p>
*
* @event
*/
SMTPClient.prototype._onClose = function(){
this._destroy();
};
/**
* <p>'end' listener for the socket</p>
*
* @event
*/
SMTPClient.prototype._onEnd = function(){
this._destroy();
};
/**
* <p>'timeout' listener for the socket</p>
*
* @event
*/
SMTPClient.prototype._onTimeout = function(){
this.close();
};
/**
* <p>Passes data stream to socket if in data mode</p>
*
* @param {Buffer} chunk Chunk of data to be sent to the server
*/
SMTPClient.prototype.write = function(chunk){
// works only in data mode
if(!this._dataMode){
// this line should never be reached but if it does, then
// say act like everything's normal.
return true;
}
if(typeof chunk == "string"){
chunk = new Buffer(chunk, "utf-8");
}
if(chunk.length > 2){
this._lastDataBytes[0] = chunk[chunk.length-2];
this._lastDataBytes[1] = chunk[chunk.length-1];
}else if(chunk.length == 1){
this._lastDataBytes[0] = this._lastDataBytes[1];
this._lastDataBytes[1] = chunk[0];
}
if(this.options.debug){
console.log("CLIENT (DATA)"+(this.options.instanceId?" "+
this.options.instanceId:"")+":\n└──"+chunk.toString().trim().replace(/\n/g,"\n "));
}
// pass the chunk to the socket
return this.socket.write(chunk);
};
/**
* <p>Indicates that a data stream for the socket is ended. Works only
* in data mode.</p>
*
* @param {Buffer} [chunk] Chunk of data to be sent to the server
*/
SMTPClient.prototype.end = function(chunk){
// works only in data mode
if(!this._dataMode){
// this line should never be reached but if it does, then
// say act like everything's normal.
return true;
}
if(chunk && chunk.length){
this.write(chunk);
}
// redirect output from the server to _actionStream
this._currentAction = this._actionStream;
// indicate that the stream has ended by sending a single dot on its own line
// if the client already closed the data with \r\n no need to do it again
if(this._lastDataBytes[0] == 0x0D && this._lastDataBytes[1] == 0x0A){
this.socket.write(new Buffer(".\r\n", "utf-8"));
}else if(this._lastDataBytes[1] == 0x0D){
this.socket.write(new Buffer("\n.\r\n"));
}else{
this.socket.write(new Buffer("\r\n.\r\n"));
}
// end data mode
this._dataMode = false;
};
/**
* <p>Send a command to the server, append \r\n</p>
*
* @param {String} str String to be sent to the server
*/
SMTPClient.prototype.sendCommand = function(str){
if(this.options.debug){
console.log("CLIENT"+(this.options.instanceId?" "+
this.options.instanceId:"")+":\n└──"+(str || "").toString().trim().replace(/\n/g,"\n "));
}
this.socket.write(new Buffer(str+"\r\n", "utf-8"));
};
/**
* <p>Sends QUIT</p>
*/
SMTPClient.prototype.quit = function(){
this.sendCommand("QUIT");
this._currentAction = this.close;
};
/**
* <p>Closes the connection to the server</p>
*/
SMTPClient.prototype.close = function(){
if(this.options.debug){
console.log("Closing connection to the server");
}
if(this.socket && this.socket.socket && this.socket.socket.end && !this.socket.socket.destroyed){
this.socket.socket.end();
}
if(this.socket && this.socket.end && !this.socket.destroyed){
this.socket.end();
}
this._destroy();
};
/**
* <p>Initiates a new message by submitting envelope data, starting with
* <code>MAIL FROM:</code> command</p>
*
* @param {Object} envelope Envelope object in the form of
* <code>{from:"...", to:["..."]}</code>
*/
SMTPClient.prototype.useEnvelope = function(envelope){
this._envelope = envelope || {};
this._envelope.from = this._envelope.from || ("anonymous@"+this.options.name);
// clone the recipients array for latter manipulation
this._envelope.rcptQueue = JSON.parse(JSON.stringify(this._envelope.to || []));
this._envelope.rcptFailed = [];
this._currentAction = this._actionMAIL;
this.sendCommand("MAIL FROM:<"+(this._envelope.from)+">");
};
/**
* <p>If needed starts the authentication, if not emits 'idle' to
* indicate that this client is ready to take in an outgoing mail</p>
*/
SMTPClient.prototype._authenticateUser = function(){
if(!this.options.auth){
// no need to authenticate, at least no data given
this._currentAction = this._actionIdle;
this.emit("idle"); // ready to take orders
return;
}
var auth;
if(this.options.auth.XOAuthToken && this._supportedAuth.indexOf("XOAUTH")>=0){
auth = "XOAUTH";
}else if(this._xoauth2 && this._supportedAuth.indexOf("XOAUTH2")>=0){
auth = "XOAUTH2";
}else if(this.options.authMethod) {
auth = this.options.authMethod.toUpperCase().trim();
}else{
// use first supported
auth = (this._supportedAuth[0] || "PLAIN").toUpperCase().trim();
}
switch(auth){
case "XOAUTH":
this._currentAction = this._actionAUTHComplete;
if(typeof this.options.auth.XOAuthToken == "object" &&
typeof this.options.auth.XOAuthToken.generate == "function"){
this.options.auth.XOAuthToken.generate((function(err, XOAuthToken){
if(err){
return this._onError(err, "XOAuthTokenError");
}
this.sendCommand("AUTH XOAUTH " + XOAuthToken);
}).bind(this));
}else{
this.sendCommand("AUTH XOAUTH " + this.options.auth.XOAuthToken.toString());
}
return;
case "XOAUTH2":
this._currentAction = this._actionAUTHComplete;
this._xoauth2.getToken((function(err, token){
if(err){
this._onError(err, "XOAUTH2Error");
return;
}
this.sendCommand("AUTH XOAUTH2 " + token);
}).bind(this));
return;
case "LOGIN":
this._currentAction = this._actionAUTH_LOGIN_USER;
this.sendCommand("AUTH LOGIN");
return;
case "PLAIN":
this._currentAction = this._actionAUTHComplete;
this.sendCommand("AUTH PLAIN "+new Buffer(
this.options.auth.user+"\u0000"+
this.options.auth.user+"\u0000"+
this.options.auth.pass,"utf-8").toString("base64"));
return;
case "CRAM-MD5":
this._currentAction = this._actionAUTH_CRAM_MD5;
this.sendCommand("AUTH CRAM-MD5");
return;
}
this._onError(new Error("Unknown authentication method - "+auth), "UnknowAuthError");
};
/** ACTIONS **/
/**
* <p>Will be run after the connection is created and the server sends
* a greeting. If the incoming message starts with 220 initiate
* SMTP session by sending EHLO command</p>
*
* @param {String} str Message from the server
*/
SMTPClient.prototype._actionGreeting = function(str){
if(str.substr(0,3) != "220"){
this._onError(new Error("Invalid greeting from server - "+str), false, str);
return;
}
this._currentAction = this._actionEHLO;
this.sendCommand("EHLO "+this.options.name);
};
/**
* <p>Handles server response for EHLO command. If it yielded in
* error, try HELO instead, otherwise initiate TLS negotiation
* if STARTTLS is supported by the server or move into the
* authentication phase.</p>
*
* @param {String} str Message from the server
*/
SMTPClient.prototype._actionEHLO = function(str){
if(str.charAt(0) != "2"){
// Try HELO instead
this._currentAction = this._actionHELO;
this.sendCommand("HELO "+this.options.name);
return;
}
// Detect if the server supports STARTTLS
if(!this._secureMode && str.match(/[ \-]STARTTLS\r?$/mi)){
this.sendCommand("STARTTLS");
this._currentAction = this._actionSTARTTLS;
return;
}
// Detect if the server supports PLAIN auth
if(str.match(/AUTH(?:\s+[^\n]*\s+|\s+)PLAIN/i)){
this._supportedAuth.push("PLAIN");
}
// Detect if the server supports LOGIN auth
if(str.match(/AUTH(?:\s+[^\n]*\s+|\s+)LOGIN/i)){
this._supportedAuth.push("LOGIN");
}
// Detect if the server supports CRAM-MD5 auth
if(str.match(/AUTH(?:\s+[^\n]*\s+|\s+)CRAM-MD5/i)){
this._supportedAuth.push("CRAM-MD5");
}
// Detect if the server supports XOAUTH auth
if(str.match(/AUTH(?:\s+[^\n]*\s+|\s+)XOAUTH/i)){
this._supportedAuth.push("XOAUTH");
}
// Detect if the server supports XOAUTH2 auth
if(str.match(/AUTH(?:\s+[^\n]*\s+|\s+)XOAUTH2/i)){
this._supportedAuth.push("XOAUTH2");
}
this._authenticateUser.call(this);
};
/**
* <p>Handles server response for HELO command. If it yielded in
* error, emit 'error', otherwise move into the authentication phase.</p>
*
* @param {String} str Message from the server
*/
SMTPClient.prototype._actionHELO = function(str){
if(str.charAt(0) != "2"){
this._onError(new Error("Invalid response for EHLO/HELO - "+str), false, str);
return;
}
this._authenticateUser.call(this);
};
/**
* <p>Handles server response for STARTTLS command. If there's an error
* try HELO instead, otherwise initiate TLS upgrade. If the upgrade
* succeedes restart the EHLO</p>
*
* @param {String} str Message from the server
*/
SMTPClient.prototype._actionSTARTTLS = function(str){
if(str.charAt(0) != "2"){
// Try HELO instead
this._currentAction = this._actionHELO;
this.sendCommand("HELO "+this.options.name);
return;
}
this._upgradeConnection((function(err, secured){
if(err){
this._onError(new Error("Error initiating TLS - "+(err.message || err)), "TLSError");
return;
}
if(this.options.debug){
console.log("Connection secured");
}
if(secured){
// restart session
this._currentAction = this._actionEHLO;
this.sendCommand("EHLO "+this.options.name);
}else{
this._authenticateUser.call(this);
}
}).bind(this));
};
/**
* <p>Handle the response for AUTH LOGIN command. We are expecting
* '334 VXNlcm5hbWU6' (base64 for 'Username:'). Data to be sent as
* response needs to be base64 encoded username.</p>
*
* @param {String} str Message from the server
*/
SMTPClient.prototype._actionAUTH_LOGIN_USER = function(str){
if(str != "334 VXNlcm5hbWU6"){
this._onError(new Error("Invalid login sequence while waiting for '334 VXNlcm5hbWU6' - "+str), false, str);
return;
}
this._currentAction = this._actionAUTH_LOGIN_PASS;
this.sendCommand(new Buffer(
this.options.auth.user, "utf-8").toString("base64"));
};
/**
* <p>Handle the response for AUTH CRAM-MD5 command. We are expecting
* '334 <challenge string>'. Data to be sent as response needs to be
* base64 decoded challenge string, MD5 hashed using the password as
* a HMAC key, prefixed by the username and a space, and finally all
* base64 encoded again.</p>
*
* @param {String} str Message from the server
*/
SMTPClient.prototype._actionAUTH_CRAM_MD5 = function(str) {
var challengeMatch = str.match(/^334\s+(.+)$/),
challengeString = "";
if (!challengeMatch) {
this._onError(new Error("Invalid login sequence while waiting for server challenge string - "+str), false, str);
return;
} else {
challengeString = challengeMatch[1];
}
// Decode from base64
var base64decoded = new Buffer(challengeString, 'base64').toString('ascii'),
hmac_md5 = crypto.createHmac('md5', this.options.auth.pass);
hmac_md5.update(base64decoded);
var hex_hmac = hmac_md5.digest('hex'),
prepended = this.options.auth.user + " " + hex_hmac;
this._currentAction = this._actionAUTH_CRAM_MD5_PASS;
this.sendCommand(new Buffer(prepended).toString("base64"));
};
/**
* <p>Handles the response to CRAM-MD5 authentication, if there's no error,
* the user can be considered logged in. Emit 'idle' and start
* waiting for a message to send</p>
*
* @param {String} str Message from the server
*/
SMTPClient.prototype._actionAUTH_CRAM_MD5_PASS = function(str) {
if (!str.match(/^235\s+/)) {
this._onError(new Error("Invalid login sequence while waiting for '235 go ahead' - "+str), false, str);
return;
}
this._currentAction = this._actionIdle;
this.emit("idle"); // ready to take orders
};
/**
* <p>Handle the response for AUTH LOGIN command. We are expecting
* '334 UGFzc3dvcmQ6' (base64 for 'Password:'). Data to be sent as
* response needs to be base64 encoded password.</p>
*
* @param {String} str Message from the server
*/
SMTPClient.prototype._actionAUTH_LOGIN_PASS = function(str){
if(str != "334 UGFzc3dvcmQ6"){
this._onError(new Error("Invalid login sequence while waiting for '334 UGFzc3dvcmQ6' - "+str), false, str);
return;
}
this._currentAction = this._actionAUTHComplete;
this.sendCommand(new Buffer(this.options.auth.pass, "utf-8").toString("base64"));
};
/**
* <p>Handles the response for authentication, if there's no error,
* the user can be considered logged in. Emit 'idle' and start
* waiting for a message to send</p>
*
* @param {String} str Message from the server
*/
SMTPClient.prototype._actionAUTHComplete = function(str){
var response;
if(this._xoauth2 && str.substr(0, 3) == "334"){
try{
response = str.split(" ");
response.shift();
response = JSON.parse(new Buffer(response.join(" "), "base64").toString("utf-8"));
if((!this._xoauth2.reconnectCount || this._xoauth2.reconnectCount < 2) && ['400','401'].indexOf(response.status)>=0){
this._xoauth2.reconnectCount = (this._xoauth2.reconnectCount || 0) + 1;
this._currentAction = this._actionXOAUTHRetry;
}else{
this._xoauth2.reconnectCount = 0;
this._currentAction = this._actionAUTHComplete;
}
this.sendCommand(new Buffer(0));
return;
}catch(E){}
}
this._xoauth2.reconnectCount = 0;
if(str.charAt(0) != "2"){
this._onError(new Error("Invalid login - "+str), "AuthError", str);
return;
}
this._currentAction = this._actionIdle;
this.emit("idle"); // ready to take orders
};
SMTPClient.prototype._actionXOAUTHRetry = function(str){
this._xoauth2.generateToken((function(err, token){
if(err){
this._onError(err, "XOAUTH2Error");
return;
}
this._currentAction = this._actionAUTHComplete;
this.sendCommand("AUTH XOAUTH2 " + token);
}).bind(this));
}
/**
* <p>This function is not expected to run. If it does then there's probably
* an error (timeout etc.)</p>
*
* @param {String} str Message from the server
*/
SMTPClient.prototype._actionIdle = function(str){
if(Number(str.charAt(0)) > 3){
this._onError(new Error(str), false, str);
return;
}
// this line should never get called
};
/**
* <p>Handle response for a <code>MAIL FROM:</code> command</p>
*
* @param {String} str Message from the server
*/
SMTPClient.prototype._actionMAIL = function(str){
if(Number(str.charAt(0)) != "2"){
this._onError(new Error("Mail from command failed - " + str), "SenderError", str);
return;
}
if(!this._envelope.rcptQueue.length){
this._onError(new Error("Can't send mail - no recipients defined"), "RecipientError");
}else{
this._envelope.curRecipient = this._envelope.rcptQueue.shift();
this._currentAction = this._actionRCPT;
this.sendCommand("RCPT TO:<"+this._envelope.curRecipient+">");
}
};
/**
* <p>Handle response for a <code>RCPT TO:</code> command</p>
*
* @param {String} str Message from the server
*/
SMTPClient.prototype._actionRCPT = function(str){
if(Number(str.charAt(0)) != "2"){
// this is a soft error
this._envelope.rcptFailed.push(this._envelope.curRecipient);
}
if(!this._envelope.rcptQueue.length){
if(this._envelope.rcptFailed.length < this._envelope.to.length){
this.emit("rcptFailed", this._envelope.rcptFailed);
this._currentAction = this._actionDATA;
this.sendCommand("DATA");
}else{
this._onError(new Error("Can't send mail - all recipients were rejected"), "RecipientError");
return;
}
}else{
this._envelope.curRecipient = this._envelope.rcptQueue.shift();
this._currentAction = this._actionRCPT;
this.sendCommand("RCPT TO:<"+this._envelope.curRecipient+">");
}
};
/**
* <p>Handle response for a <code>DATA</code> command</p>
*
* @param {String} str Message from the server
*/
SMTPClient.prototype._actionDATA = function(str){
// response should be 354 but according to this issue https://github.com/eleith/emailjs/issues/24
// some servers might use 250 instead, so lets check for 2 or 3 as the first digit
if([2,3].indexOf(Number(str.charAt(0)))<0){
this._onError(new Error("Data command failed - " + str), false, str);
return;
}
// Emit that connection is set up for streaming
this._dataMode = true;
this._currentAction = this._actionIdle;
this.emit("message");
};
/**
* <p>Handle response for a <code>DATA</code> stream</p>
*
* @param {String} str Message from the server
*/
SMTPClient.prototype._actionStream = function(str){
if(Number(str.charAt(0)) != "2"){
// Message failed
this.emit("ready", false, str);
}else{
// Message sent succesfully
this.emit("ready", true, str);
}
// Waiting for new connections
this._currentAction = this._actionIdle;
process.nextTick(this.emit.bind(this, "idle"));
};
| {
"pile_set_name": "Github"
} |
/*
* TeleStax, Open Source Cloud Communications
* Copyright 2011-2013, Telestax Inc and individual contributors
* by the @authors tag.
*
* This is free software; you can redistribute it and/or modify it
* under the terms of the GNU Lesser General Public License as
* published by the Free Software Foundation; either version 2.1 of
* the License, or (at your option) any later version.
*
* This software is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this software; if not, write to the Free
* Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
* 02110-1301 USA, or see the FSF site: http://www.fsf.org.
*/
package org.restcomm.protocols.ss7.isup.message.parameter;
import org.mobicents.protocols.asn.Tag;
/**
* @author baranowb
*
*/
public interface ReturnResult extends RemoteOperation {
int _TAG = 0x02;
boolean _TAG_PC_PRIMITIVE = false;
int _TAG_CLASS = Tag.CLASS_CONTEXT_SPECIFIC;
int _TAG_IID = 0x02;
boolean _TAG_IID_PC_PRIMITIVE = true;
int _TAG_IID_CLASS = Tag.CLASS_UNIVERSAL;
//FIXME: is this correct?
int _TAG_SEQ = Tag.SEQUENCE;
boolean _TAG_SEQ_PC_PRIMITIVE = true;
int _TAG_SEQ_CLASS = Tag.CLASS_UNIVERSAL;
// optional
void setOperationCodes(OperationCode... i);
OperationCode[] getOperationCodes();
// optional
void setParameter(Parameter p);
Parameter getParameter();
} | {
"pile_set_name": "Github"
} |
'use strict';
Object.defineProperty(exports, "__esModule", {
value: true
});
exports.default = removeEmptyAttributes;
// Source: https://www.w3.org/TR/html4/sgml/dtd.html#events (Generic Attributes)
var safeToRemoveAttrs = new Set(['id', 'class', 'style', 'title', 'lang', 'dir', 'onclick', 'ondblclick', 'onmousedown', 'onmouseup', 'onmouseover', 'onmousemove', 'onmouseout', 'onkeypress', 'onkeydown', 'onkeyup']);
/** Removes empty attributes */
function removeEmptyAttributes(tree) {
tree.walk(function (node) {
if (!node.attrs) {
return node;
}
Object.keys(node.attrs).forEach(function (attrName) {
var attrNameLower = attrName.toLowerCase();
if (!safeToRemoveAttrs.has(attrNameLower)) {
return;
}
var attrValue = node.attrs[attrName];
if (attrValue === '' || (attrValue || '').match(/^\s+$/)) {
delete node.attrs[attrName];
}
});
return node;
});
return tree;
} | {
"pile_set_name": "Github"
} |
sha256:d216b425004fcb6d8047f74e81b30e7ead55f73e73511ca53a329c358786b6c9
| {
"pile_set_name": "Github"
} |
#[doc = "Reader of register CA0R"]
pub type R = crate::R<u32, super::CA0R>;
#[doc = "Timeout Clock Frequency\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq)]
#[repr(u8)]
pub enum TEOCLKF_A {
#[doc = "0: Get information via another method"]
OTHER = 0,
}
impl From<TEOCLKF_A> for u8 {
#[inline(always)]
fn from(variant: TEOCLKF_A) -> Self {
variant as _
}
}
#[doc = "Reader of field `TEOCLKF`"]
pub type TEOCLKF_R = crate::R<u8, TEOCLKF_A>;
impl TEOCLKF_R {
#[doc = r"Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> crate::Variant<u8, TEOCLKF_A> {
use crate::Variant::*;
match self.bits {
0 => Val(TEOCLKF_A::OTHER),
i => Res(i),
}
}
#[doc = "Checks if the value of the field is `OTHER`"]
#[inline(always)]
pub fn is_other(&self) -> bool {
*self == TEOCLKF_A::OTHER
}
}
#[doc = "Timeout Clock Unit\n\nValue on reset: 1"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum TEOCLKU_A {
#[doc = "0: KHz"]
KHZ = 0,
#[doc = "1: MHz"]
MHZ = 1,
}
impl From<TEOCLKU_A> for bool {
#[inline(always)]
fn from(variant: TEOCLKU_A) -> Self {
variant as u8 != 0
}
}
#[doc = "Reader of field `TEOCLKU`"]
pub type TEOCLKU_R = crate::R<bool, TEOCLKU_A>;
impl TEOCLKU_R {
#[doc = r"Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> TEOCLKU_A {
match self.bits {
false => TEOCLKU_A::KHZ,
true => TEOCLKU_A::MHZ,
}
}
#[doc = "Checks if the value of the field is `KHZ`"]
#[inline(always)]
pub fn is_khz(&self) -> bool {
*self == TEOCLKU_A::KHZ
}
#[doc = "Checks if the value of the field is `MHZ`"]
#[inline(always)]
pub fn is_mhz(&self) -> bool {
*self == TEOCLKU_A::MHZ
}
}
#[doc = "Base Clock Frequency\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq)]
#[repr(u8)]
pub enum BASECLKF_A {
#[doc = "0: Get information via another method"]
OTHER = 0,
}
impl From<BASECLKF_A> for u8 {
#[inline(always)]
fn from(variant: BASECLKF_A) -> Self {
variant as _
}
}
#[doc = "Reader of field `BASECLKF`"]
pub type BASECLKF_R = crate::R<u8, BASECLKF_A>;
impl BASECLKF_R {
#[doc = r"Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> crate::Variant<u8, BASECLKF_A> {
use crate::Variant::*;
match self.bits {
0 => Val(BASECLKF_A::OTHER),
i => Res(i),
}
}
#[doc = "Checks if the value of the field is `OTHER`"]
#[inline(always)]
pub fn is_other(&self) -> bool {
*self == BASECLKF_A::OTHER
}
}
#[doc = "Max Block Length\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq)]
#[repr(u8)]
pub enum MAXBLKL_A {
#[doc = "0: 512 bytes"]
_512 = 0,
#[doc = "1: 1024 bytes"]
_1024 = 1,
#[doc = "2: 2048 bytes"]
_2048 = 2,
}
impl From<MAXBLKL_A> for u8 {
#[inline(always)]
fn from(variant: MAXBLKL_A) -> Self {
variant as _
}
}
#[doc = "Reader of field `MAXBLKL`"]
pub type MAXBLKL_R = crate::R<u8, MAXBLKL_A>;
impl MAXBLKL_R {
#[doc = r"Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> crate::Variant<u8, MAXBLKL_A> {
use crate::Variant::*;
match self.bits {
0 => Val(MAXBLKL_A::_512),
1 => Val(MAXBLKL_A::_1024),
2 => Val(MAXBLKL_A::_2048),
i => Res(i),
}
}
#[doc = "Checks if the value of the field is `_512`"]
#[inline(always)]
pub fn is_512(&self) -> bool {
*self == MAXBLKL_A::_512
}
#[doc = "Checks if the value of the field is `_1024`"]
#[inline(always)]
pub fn is_1024(&self) -> bool {
*self == MAXBLKL_A::_1024
}
#[doc = "Checks if the value of the field is `_2048`"]
#[inline(always)]
pub fn is_2048(&self) -> bool {
*self == MAXBLKL_A::_2048
}
}
#[doc = "8-bit Support for Embedded Device\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum ED8SUP_A {
#[doc = "0: 8-bit Bus Width not Supported"]
NO = 0,
#[doc = "1: 8-bit Bus Width Supported"]
YES = 1,
}
impl From<ED8SUP_A> for bool {
#[inline(always)]
fn from(variant: ED8SUP_A) -> Self {
variant as u8 != 0
}
}
#[doc = "Reader of field `ED8SUP`"]
pub type ED8SUP_R = crate::R<bool, ED8SUP_A>;
impl ED8SUP_R {
#[doc = r"Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> ED8SUP_A {
match self.bits {
false => ED8SUP_A::NO,
true => ED8SUP_A::YES,
}
}
#[doc = "Checks if the value of the field is `NO`"]
#[inline(always)]
pub fn is_no(&self) -> bool {
*self == ED8SUP_A::NO
}
#[doc = "Checks if the value of the field is `YES`"]
#[inline(always)]
pub fn is_yes(&self) -> bool {
*self == ED8SUP_A::YES
}
}
#[doc = "ADMA2 Support\n\nValue on reset: 1"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum ADMA2SUP_A {
#[doc = "0: ADMA2 not Supported"]
NO = 0,
#[doc = "1: ADMA2 Supported"]
YES = 1,
}
impl From<ADMA2SUP_A> for bool {
#[inline(always)]
fn from(variant: ADMA2SUP_A) -> Self {
variant as u8 != 0
}
}
#[doc = "Reader of field `ADMA2SUP`"]
pub type ADMA2SUP_R = crate::R<bool, ADMA2SUP_A>;
impl ADMA2SUP_R {
#[doc = r"Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> ADMA2SUP_A {
match self.bits {
false => ADMA2SUP_A::NO,
true => ADMA2SUP_A::YES,
}
}
#[doc = "Checks if the value of the field is `NO`"]
#[inline(always)]
pub fn is_no(&self) -> bool {
*self == ADMA2SUP_A::NO
}
#[doc = "Checks if the value of the field is `YES`"]
#[inline(always)]
pub fn is_yes(&self) -> bool {
*self == ADMA2SUP_A::YES
}
}
#[doc = "High Speed Support\n\nValue on reset: 1"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum HSSUP_A {
#[doc = "0: High Speed not Supported"]
NO = 0,
#[doc = "1: High Speed Supported"]
YES = 1,
}
impl From<HSSUP_A> for bool {
#[inline(always)]
fn from(variant: HSSUP_A) -> Self {
variant as u8 != 0
}
}
#[doc = "Reader of field `HSSUP`"]
pub type HSSUP_R = crate::R<bool, HSSUP_A>;
impl HSSUP_R {
#[doc = r"Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> HSSUP_A {
match self.bits {
false => HSSUP_A::NO,
true => HSSUP_A::YES,
}
}
#[doc = "Checks if the value of the field is `NO`"]
#[inline(always)]
pub fn is_no(&self) -> bool {
*self == HSSUP_A::NO
}
#[doc = "Checks if the value of the field is `YES`"]
#[inline(always)]
pub fn is_yes(&self) -> bool {
*self == HSSUP_A::YES
}
}
#[doc = "SDMA Support\n\nValue on reset: 1"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum SDMASUP_A {
#[doc = "0: SDMA not Supported"]
NO = 0,
#[doc = "1: SDMA Supported"]
YES = 1,
}
impl From<SDMASUP_A> for bool {
#[inline(always)]
fn from(variant: SDMASUP_A) -> Self {
variant as u8 != 0
}
}
#[doc = "Reader of field `SDMASUP`"]
pub type SDMASUP_R = crate::R<bool, SDMASUP_A>;
impl SDMASUP_R {
#[doc = r"Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> SDMASUP_A {
match self.bits {
false => SDMASUP_A::NO,
true => SDMASUP_A::YES,
}
}
#[doc = "Checks if the value of the field is `NO`"]
#[inline(always)]
pub fn is_no(&self) -> bool {
*self == SDMASUP_A::NO
}
#[doc = "Checks if the value of the field is `YES`"]
#[inline(always)]
pub fn is_yes(&self) -> bool {
*self == SDMASUP_A::YES
}
}
#[doc = "Suspend/Resume Support\n\nValue on reset: 1"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum SRSUP_A {
#[doc = "0: Suspend/Resume not Supported"]
NO = 0,
#[doc = "1: Suspend/Resume Supported"]
YES = 1,
}
impl From<SRSUP_A> for bool {
#[inline(always)]
fn from(variant: SRSUP_A) -> Self {
variant as u8 != 0
}
}
#[doc = "Reader of field `SRSUP`"]
pub type SRSUP_R = crate::R<bool, SRSUP_A>;
impl SRSUP_R {
#[doc = r"Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> SRSUP_A {
match self.bits {
false => SRSUP_A::NO,
true => SRSUP_A::YES,
}
}
#[doc = "Checks if the value of the field is `NO`"]
#[inline(always)]
pub fn is_no(&self) -> bool {
*self == SRSUP_A::NO
}
#[doc = "Checks if the value of the field is `YES`"]
#[inline(always)]
pub fn is_yes(&self) -> bool {
*self == SRSUP_A::YES
}
}
#[doc = "Voltage Support 3.3V\n\nValue on reset: 1"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum V33VSUP_A {
#[doc = "0: 3.3V Not Supported"]
NO = 0,
#[doc = "1: 3.3V Supported"]
YES = 1,
}
impl From<V33VSUP_A> for bool {
#[inline(always)]
fn from(variant: V33VSUP_A) -> Self {
variant as u8 != 0
}
}
#[doc = "Reader of field `V33VSUP`"]
pub type V33VSUP_R = crate::R<bool, V33VSUP_A>;
impl V33VSUP_R {
#[doc = r"Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> V33VSUP_A {
match self.bits {
false => V33VSUP_A::NO,
true => V33VSUP_A::YES,
}
}
#[doc = "Checks if the value of the field is `NO`"]
#[inline(always)]
pub fn is_no(&self) -> bool {
*self == V33VSUP_A::NO
}
#[doc = "Checks if the value of the field is `YES`"]
#[inline(always)]
pub fn is_yes(&self) -> bool {
*self == V33VSUP_A::YES
}
}
#[doc = "Voltage Support 3.0V\n\nValue on reset: 1"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum V30VSUP_A {
#[doc = "0: 3.0V Not Supported"]
NO = 0,
#[doc = "1: 3.0V Supported"]
YES = 1,
}
impl From<V30VSUP_A> for bool {
#[inline(always)]
fn from(variant: V30VSUP_A) -> Self {
variant as u8 != 0
}
}
#[doc = "Reader of field `V30VSUP`"]
pub type V30VSUP_R = crate::R<bool, V30VSUP_A>;
impl V30VSUP_R {
#[doc = r"Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> V30VSUP_A {
match self.bits {
false => V30VSUP_A::NO,
true => V30VSUP_A::YES,
}
}
#[doc = "Checks if the value of the field is `NO`"]
#[inline(always)]
pub fn is_no(&self) -> bool {
*self == V30VSUP_A::NO
}
#[doc = "Checks if the value of the field is `YES`"]
#[inline(always)]
pub fn is_yes(&self) -> bool {
*self == V30VSUP_A::YES
}
}
#[doc = "Voltage Support 1.8V\n\nValue on reset: 1"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum V18VSUP_A {
#[doc = "0: 1.8V Not Supported"]
NO = 0,
#[doc = "1: 1.8V Supported"]
YES = 1,
}
impl From<V18VSUP_A> for bool {
#[inline(always)]
fn from(variant: V18VSUP_A) -> Self {
variant as u8 != 0
}
}
#[doc = "Reader of field `V18VSUP`"]
pub type V18VSUP_R = crate::R<bool, V18VSUP_A>;
impl V18VSUP_R {
#[doc = r"Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> V18VSUP_A {
match self.bits {
false => V18VSUP_A::NO,
true => V18VSUP_A::YES,
}
}
#[doc = "Checks if the value of the field is `NO`"]
#[inline(always)]
pub fn is_no(&self) -> bool {
*self == V18VSUP_A::NO
}
#[doc = "Checks if the value of the field is `YES`"]
#[inline(always)]
pub fn is_yes(&self) -> bool {
*self == V18VSUP_A::YES
}
}
#[doc = "64-Bit System Bus Support\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum SB64SUP_A {
#[doc = "0: 32-bit Address Descriptors and System Bus"]
NO = 0,
#[doc = "1: 64-bit Address Descriptors and System Bus"]
YES = 1,
}
impl From<SB64SUP_A> for bool {
#[inline(always)]
fn from(variant: SB64SUP_A) -> Self {
variant as u8 != 0
}
}
#[doc = "Reader of field `SB64SUP`"]
pub type SB64SUP_R = crate::R<bool, SB64SUP_A>;
impl SB64SUP_R {
#[doc = r"Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> SB64SUP_A {
match self.bits {
false => SB64SUP_A::NO,
true => SB64SUP_A::YES,
}
}
#[doc = "Checks if the value of the field is `NO`"]
#[inline(always)]
pub fn is_no(&self) -> bool {
*self == SB64SUP_A::NO
}
#[doc = "Checks if the value of the field is `YES`"]
#[inline(always)]
pub fn is_yes(&self) -> bool {
*self == SB64SUP_A::YES
}
}
#[doc = "Asynchronous Interrupt Support\n\nValue on reset: 1"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum ASINTSUP_A {
#[doc = "0: Asynchronous Interrupt not Supported"]
NO = 0,
#[doc = "1: Asynchronous Interrupt supported"]
YES = 1,
}
impl From<ASINTSUP_A> for bool {
#[inline(always)]
fn from(variant: ASINTSUP_A) -> Self {
variant as u8 != 0
}
}
#[doc = "Reader of field `ASINTSUP`"]
pub type ASINTSUP_R = crate::R<bool, ASINTSUP_A>;
impl ASINTSUP_R {
#[doc = r"Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> ASINTSUP_A {
match self.bits {
false => ASINTSUP_A::NO,
true => ASINTSUP_A::YES,
}
}
#[doc = "Checks if the value of the field is `NO`"]
#[inline(always)]
pub fn is_no(&self) -> bool {
*self == ASINTSUP_A::NO
}
#[doc = "Checks if the value of the field is `YES`"]
#[inline(always)]
pub fn is_yes(&self) -> bool {
*self == ASINTSUP_A::YES
}
}
#[doc = "Slot Type\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq)]
#[repr(u8)]
pub enum SLTYPE_A {
#[doc = "0: Removable Card Slot"]
REMOVABLE = 0,
#[doc = "1: Embedded Slot for One Device"]
EMBEDDED = 1,
}
impl From<SLTYPE_A> for u8 {
#[inline(always)]
fn from(variant: SLTYPE_A) -> Self {
variant as _
}
}
#[doc = "Reader of field `SLTYPE`"]
pub type SLTYPE_R = crate::R<u8, SLTYPE_A>;
impl SLTYPE_R {
#[doc = r"Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> crate::Variant<u8, SLTYPE_A> {
use crate::Variant::*;
match self.bits {
0 => Val(SLTYPE_A::REMOVABLE),
1 => Val(SLTYPE_A::EMBEDDED),
i => Res(i),
}
}
#[doc = "Checks if the value of the field is `REMOVABLE`"]
#[inline(always)]
pub fn is_removable(&self) -> bool {
*self == SLTYPE_A::REMOVABLE
}
#[doc = "Checks if the value of the field is `EMBEDDED`"]
#[inline(always)]
pub fn is_embedded(&self) -> bool {
*self == SLTYPE_A::EMBEDDED
}
}
impl R {
#[doc = "Bits 0:5 - Timeout Clock Frequency"]
#[inline(always)]
pub fn teoclkf(&self) -> TEOCLKF_R {
TEOCLKF_R::new((self.bits & 0x3f) as u8)
}
#[doc = "Bit 7 - Timeout Clock Unit"]
#[inline(always)]
pub fn teoclku(&self) -> TEOCLKU_R {
TEOCLKU_R::new(((self.bits >> 7) & 0x01) != 0)
}
#[doc = "Bits 8:15 - Base Clock Frequency"]
#[inline(always)]
pub fn baseclkf(&self) -> BASECLKF_R {
BASECLKF_R::new(((self.bits >> 8) & 0xff) as u8)
}
#[doc = "Bits 16:17 - Max Block Length"]
#[inline(always)]
pub fn maxblkl(&self) -> MAXBLKL_R {
MAXBLKL_R::new(((self.bits >> 16) & 0x03) as u8)
}
#[doc = "Bit 18 - 8-bit Support for Embedded Device"]
#[inline(always)]
pub fn ed8sup(&self) -> ED8SUP_R {
ED8SUP_R::new(((self.bits >> 18) & 0x01) != 0)
}
#[doc = "Bit 19 - ADMA2 Support"]
#[inline(always)]
pub fn adma2sup(&self) -> ADMA2SUP_R {
ADMA2SUP_R::new(((self.bits >> 19) & 0x01) != 0)
}
#[doc = "Bit 21 - High Speed Support"]
#[inline(always)]
pub fn hssup(&self) -> HSSUP_R {
HSSUP_R::new(((self.bits >> 21) & 0x01) != 0)
}
#[doc = "Bit 22 - SDMA Support"]
#[inline(always)]
pub fn sdmasup(&self) -> SDMASUP_R {
SDMASUP_R::new(((self.bits >> 22) & 0x01) != 0)
}
#[doc = "Bit 23 - Suspend/Resume Support"]
#[inline(always)]
pub fn srsup(&self) -> SRSUP_R {
SRSUP_R::new(((self.bits >> 23) & 0x01) != 0)
}
#[doc = "Bit 24 - Voltage Support 3.3V"]
#[inline(always)]
pub fn v33vsup(&self) -> V33VSUP_R {
V33VSUP_R::new(((self.bits >> 24) & 0x01) != 0)
}
#[doc = "Bit 25 - Voltage Support 3.0V"]
#[inline(always)]
pub fn v30vsup(&self) -> V30VSUP_R {
V30VSUP_R::new(((self.bits >> 25) & 0x01) != 0)
}
#[doc = "Bit 26 - Voltage Support 1.8V"]
#[inline(always)]
pub fn v18vsup(&self) -> V18VSUP_R {
V18VSUP_R::new(((self.bits >> 26) & 0x01) != 0)
}
#[doc = "Bit 28 - 64-Bit System Bus Support"]
#[inline(always)]
pub fn sb64sup(&self) -> SB64SUP_R {
SB64SUP_R::new(((self.bits >> 28) & 0x01) != 0)
}
#[doc = "Bit 29 - Asynchronous Interrupt Support"]
#[inline(always)]
pub fn asintsup(&self) -> ASINTSUP_R {
ASINTSUP_R::new(((self.bits >> 29) & 0x01) != 0)
}
#[doc = "Bits 30:31 - Slot Type"]
#[inline(always)]
pub fn sltype(&self) -> SLTYPE_R {
SLTYPE_R::new(((self.bits >> 30) & 0x03) as u8)
}
}
| {
"pile_set_name": "Github"
} |
// Tencent is pleased to support the open source community by making RapidJSON available.
//
// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved.
//
// Licensed under the MIT License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// http://opensource.org/licenses/MIT
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
#ifndef RAPIDJSON_MEMORYSTREAM_H_
#define RAPIDJSON_MEMORYSTREAM_H_
#include "stream.h"
#ifdef __clang__
RAPIDJSON_DIAG_PUSH
RAPIDJSON_DIAG_OFF(unreachable-code)
RAPIDJSON_DIAG_OFF(missing-noreturn)
#endif
RAPIDJSON_NAMESPACE_BEGIN
//! Represents an in-memory input byte stream.
/*!
This class is mainly for being wrapped by EncodedInputStream or AutoUTFInputStream.
It is similar to FileReadBuffer but the source is an in-memory buffer instead of a file.
Differences between MemoryStream and StringStream:
1. StringStream has encoding but MemoryStream is a byte stream.
2. MemoryStream needs size of the source buffer and the buffer don't need to be null terminated. StringStream assume null-terminated string as source.
3. MemoryStream supports Peek4() for encoding detection. StringStream is specified with an encoding so it should not have Peek4().
\note implements Stream concept
*/
struct MemoryStream {
typedef char Ch; // byte
MemoryStream(const Ch *src, size_t size) : src_(src), begin_(src), end_(src + size), size_(size) {}
Ch Peek() const { return RAPIDJSON_UNLIKELY(src_ == end_) ? '\0' : *src_; }
Ch Take() { return RAPIDJSON_UNLIKELY(src_ == end_) ? '\0' : *src_++; }
size_t Tell() const { return static_cast<size_t>(src_ - begin_); }
Ch* PutBegin() { RAPIDJSON_ASSERT(false); return 0; }
void Put(Ch) { RAPIDJSON_ASSERT(false); }
void Flush() { RAPIDJSON_ASSERT(false); }
size_t PutEnd(Ch*) { RAPIDJSON_ASSERT(false); return 0; }
// For encoding detection only.
const Ch* Peek4() const {
return Tell() + 4 <= size_ ? src_ : 0;
}
const Ch* src_; //!< Current read position.
const Ch* begin_; //!< Original head of the string.
const Ch* end_; //!< End of stream.
size_t size_; //!< Size of the stream.
};
RAPIDJSON_NAMESPACE_END
#ifdef __clang__
RAPIDJSON_DIAG_POP
#endif
#endif // RAPIDJSON_MEMORYBUFFER_H_
| {
"pile_set_name": "Github"
} |
// Copyright 2016 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
let {session, contextGroup, Protocol} = InspectorTest.start("Check that exceptionThrown is supported by test runner.")
Protocol.Runtime.enable();
Protocol.Runtime.onExceptionThrown(message => InspectorTest.logMessage(message));
Protocol.Runtime.evaluate({ expression: "setTimeout(() => { \n throw new Error() }, 0)" });
Protocol.Runtime.evaluate({ expression: "setTimeout(\" }\", 0)" });
Protocol.Runtime.evaluate({ expression: "setTimeout(() => { \n throw 239; }, 0)" });
InspectorTest.waitForPendingTasks().then(InspectorTest.completeTest);
| {
"pile_set_name": "Github"
} |
/* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#include "tensorflow/compiler/xla/tests/literal_test_util.h"
#include <unistd.h>
#include <cmath>
#include <vector>
#include "tensorflow/compiler/xla/index_util.h"
#include "tensorflow/compiler/xla/layout_util.h"
#include "tensorflow/compiler/xla/literal_util.h"
#include "tensorflow/compiler/xla/ptr_util.h"
#include "tensorflow/compiler/xla/shape_util.h"
#include "tensorflow/compiler/xla/test.h"
#include "tensorflow/compiler/xla/types.h"
#include "tensorflow/core/lib/core/casts.h"
#include "tensorflow/core/lib/io/path.h"
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/lib/strings/strcat.h"
#include "tensorflow/core/lib/strings/stringprintf.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/protobuf.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/types.h"
namespace xla {
/* static */ ::testing::AssertionResult LiteralTestUtil::EqualShapes(
const Shape& expected, const Shape& actual) {
if (ShapeUtil::IsTuple(expected) != ShapeUtil::IsTuple(actual)) {
return ::testing::AssertionFailure()
<< "tupleness-mismatch! want: " << ShapeUtil::HumanString(expected)
<< " got: " << ShapeUtil::HumanString(actual);
}
if (ShapeUtil::IsTuple(expected)) {
if (ShapeUtil::TupleElementCount(expected) !=
ShapeUtil::TupleElementCount(actual)) {
return ::testing::AssertionFailure()
<< "want tuple element count: "
<< ShapeUtil::TupleElementCount(expected)
<< " got tuple element count: "
<< ShapeUtil::TupleElementCount(actual);
}
for (int i = 0; i < expected.tuple_shapes_size(); ++i) {
::testing::AssertionResult result =
EqualShapes(expected.tuple_shapes(i), actual.tuple_shapes(i))
<< "mismatch in tuple index " << i;
if (!result) {
return result;
}
}
} else {
if (ShapeUtil::Rank(expected) != ShapeUtil::Rank(actual)) {
return ::testing::AssertionFailure()
<< "want rank of: " << ShapeUtil::HumanString(expected)
<< " got rank of: " << ShapeUtil::HumanString(actual);
}
if (expected.element_type() != actual.element_type()) {
return ::testing::AssertionFailure()
<< PrimitiveType_Name(expected.element_type()) << " vs "
<< PrimitiveType_Name(actual.element_type());
}
if (expected.dimensions_size() != actual.dimensions_size()) {
return ::testing::AssertionFailure()
<< "want dimensions_size " << expected.dimensions_size()
<< " got dimensions_size " << actual.dimensions_size();
}
for (int i = 0; i < expected.dimensions_size(); ++i) {
if (expected.dimensions(i) != actual.dimensions(i)) {
return ::testing::AssertionFailure()
<< "mismatch in dimension #" << i
<< " expected: " << ShapeUtil::HumanString(expected)
<< " actual: " << ShapeUtil::HumanString(actual);
}
}
}
return ::testing::AssertionSuccess();
}
/* static */ void LiteralTestUtil::AssertEqualShapes(const Shape& expected,
const Shape& actual) {
ASSERT_TRUE(EqualShapes(expected, actual));
}
/* static */ void LiteralTestUtil::AssertEqualShapesAndLayouts(
const Shape& expected, const Shape& actual) {
ASSERT_EQ(expected.ShortDebugString(), actual.ShortDebugString());
}
namespace {
// Return a literal with all arrays of type FromNativeT converted to type
// ToNativeT in the given literal.
template <typename FromNativeT, typename ToNativeT>
std::unique_ptr<Literal> ConvertType(const Literal& literal) {
// First construct shape of the result.
Shape result_shape(literal.shape());
ShapeUtil::ForEachMutableSubshape(
&result_shape, [](Shape* subshape, const ShapeIndex&) {
if (subshape->element_type() ==
primitive_util::NativeToPrimitiveType<FromNativeT>()) {
subshape->set_element_type(
primitive_util::NativeToPrimitiveType<ToNativeT>());
}
});
auto result = MakeUnique<Literal>(result_shape);
// Then copy over the data from 'literal' converting FromNativeT values to
// ToNativeT values as necessary.
ShapeUtil::ForEachSubshape(
literal.shape(),
[&](const Shape& subshape, const ShapeIndex& shape_index) {
if (ShapeUtil::IsArray(subshape)) {
if (subshape.element_type() ==
primitive_util::NativeToPrimitiveType<FromNativeT>()) {
auto src = literal.data<FromNativeT>(shape_index);
auto dest = result->data<ToNativeT>(shape_index);
for (int64 i = 0; i < src.size(); ++i) {
dest[i] = static_cast<ToNativeT>(src[i]);
}
} else {
TF_CHECK_OK(result->CopyFrom(literal,
/*dest_shape_index=*/shape_index,
/*src_shape_index=*/shape_index));
}
}
});
return result;
}
} // namespace
/* static */ std::unique_ptr<Literal> LiteralTestUtil::ConvertBF16ToF32(
const Literal& literal) {
return ConvertType<bfloat16, float>(literal);
}
/* static */ std::unique_ptr<Literal> LiteralTestUtil::ConvertF32ToBF16(
const Literal& literal) {
return ConvertType<float, bfloat16>(literal);
}
namespace {
string Hostname() {
char hostname[1024];
gethostname(hostname, sizeof hostname);
hostname[sizeof hostname - 1] = 0;
return string(hostname);
}
// Helper function for comparing a floating point type, FloatT, bitwise equal
// between the left-hand-side and right-hand-side, by bit-casting to UnsignedT
// -- on miscompare, a nice error message is given in the AssertionFailure.
template <typename FloatT, typename UnsignedT>
::testing::AssertionResult CompareFloatsBitwiseEqual(FloatT lhs, FloatT rhs) {
auto ulhs = tensorflow::bit_cast<UnsignedT>(lhs);
auto urhs = tensorflow::bit_cast<UnsignedT>(rhs);
auto lhs_double = static_cast<double>(lhs);
auto rhs_double = static_cast<double>(rhs);
if (ulhs != urhs) {
return ::testing::AssertionFailure() << tensorflow::strings::Printf(
"floating values are not bitwise-equal; and equality testing "
"was requested: %s=%g=%a vs %s=%g=%a",
tensorflow::strings::StrCat(tensorflow::strings::Hex(ulhs))
.c_str(),
lhs_double, lhs_double,
tensorflow::strings::StrCat(tensorflow::strings::Hex(urhs))
.c_str(),
rhs_double, rhs_double);
}
return ::testing::AssertionSuccess();
}
// Templated comparator that specializes for float equality comparison with the
// bitwise helper above (this is the un-specialized fallback, to just use the
// default gunit implementation).
template <typename NativeT>
::testing::AssertionResult CompareEqual(NativeT lhs, NativeT rhs) {
if (lhs == rhs) {
return ::testing::AssertionSuccess();
}
::testing::Message msg;
msg << "Expected equality of these values:";
msg << "\n " << lhs;
msg << "\n " << rhs;
return ::testing::AssertionFailure() << msg;
}
// Specializations for floating types that do bitwise comparisons when equality
// comparison is requested.
template <>
::testing::AssertionResult CompareEqual<bfloat16>(bfloat16 lhs, bfloat16 rhs) {
return CompareFloatsBitwiseEqual<bfloat16, uint16>(lhs, rhs);
}
template <>
::testing::AssertionResult CompareEqual<float>(float lhs, float rhs) {
return CompareFloatsBitwiseEqual<float, uint32>(lhs, rhs);
}
template <>
::testing::AssertionResult CompareEqual<double>(double lhs, double rhs) {
return CompareFloatsBitwiseEqual<double, uint64>(lhs, rhs);
}
template <>
::testing::AssertionResult CompareEqual<complex64>(complex64 lhs,
complex64 rhs) {
auto res = CompareEqual<float>(lhs.real(), rhs.real());
if (!res) {
return res;
}
return CompareEqual<float>(lhs.imag(), rhs.imag());
}
// A recursive function which iterates through every index of expected and
// actual literal and compares their values elementwise. Returns true if all
// elements are equal.
template <typename NativeT>
bool ExpectLiteralsEqual(const Literal& expected, const Literal& actual,
tensorflow::gtl::MutableArraySlice<int64> multi_index,
int64 dimension) {
if (dimension == expected.shape().dimensions_size()) {
NativeT expected_value = expected.Get<NativeT>(multi_index);
NativeT actual_value = actual.Get<NativeT>(multi_index);
::testing::AssertionResult result =
CompareEqual<NativeT>(expected_value, actual_value);
return result; // Defines implicit coersion to bool.
}
bool all_match = true;
for (int64 i = 0; i < expected.shape().dimensions(dimension); ++i) {
multi_index[dimension] = i;
all_match = all_match && ExpectLiteralsEqual<NativeT>(
expected, actual, multi_index, dimension + 1);
}
return all_match;
}
} // namespace
/* static */ void LiteralTestUtil::ExpectEqual(const Literal& expected,
const Literal& actual,
const string& message) {
EXPECT_TRUE(Equal(expected, actual))
<< "expected:\n"
<< expected.ToString() << "\n\tvs actual:\n"
<< actual.ToString()
<< (message.empty()
? ""
: tensorflow::strings::StrCat("\nmessage: ", message));
}
/* static */ void LiteralTestUtil::ExpectNotEqual(const Literal& expected,
const Literal& actual) {
EXPECT_FALSE(Equal(expected, actual));
}
/* static */ ::testing::AssertionResult LiteralTestUtil::Equal(
const Literal& expected, const Literal& actual) {
VLOG(1) << "expected:";
XLA_VLOG_LINES(1, expected.ToString());
VLOG(1) << "actual:";
XLA_VLOG_LINES(1, actual.ToString());
AssertEqualShapes(expected.shape(), actual.shape());
std::vector<int64> multi_index(expected.shape().dimensions_size(), 0);
bool match = false;
switch (expected.shape().element_type()) {
case PRED:
match = ExpectLiteralsEqual<bool>(expected, actual, &multi_index, 0);
break;
case U8:
match = ExpectLiteralsEqual<uint8>(expected, actual, &multi_index, 0);
break;
case S32:
match = ExpectLiteralsEqual<int32>(expected, actual, &multi_index, 0);
break;
case S64:
match = ExpectLiteralsEqual<int64>(expected, actual, &multi_index, 0);
break;
case U32:
match = ExpectLiteralsEqual<uint32>(expected, actual, &multi_index, 0);
break;
case U64:
match = ExpectLiteralsEqual<uint64>(expected, actual, &multi_index, 0);
break;
case BF16:
match = ExpectLiteralsEqual<bfloat16>(expected, actual, &multi_index, 0);
break;
case F32:
match = ExpectLiteralsEqual<float>(expected, actual, &multi_index, 0);
break;
case F64:
match = ExpectLiteralsEqual<double>(expected, actual, &multi_index, 0);
break;
case C64:
match = ExpectLiteralsEqual<complex64>(expected, actual, &multi_index, 0);
break;
case TUPLE: {
bool tuple_match = true;
for (int i = 0; i < ShapeUtil::TupleElementCount(expected.shape()); ++i) {
// Create LiteralViews of the expected and actual elements.
auto result = Equal(LiteralView::Create(expected, {i}),
LiteralView::Create(actual, {i}));
tuple_match = tuple_match ? !!result : false;
}
match = tuple_match;
break;
}
default:
LOG(FATAL)
<< "Unsupported primitive type in LiteralTestUtil::ExpectEqual: "
<< PrimitiveType_Name(expected.shape().element_type());
}
::testing::AssertionResult result = ::testing::AssertionSuccess();
if (!match) {
result = ::testing::AssertionFailure()
<< "expected: " << expected.ToString()
<< "\nactual: " << actual.ToString();
VLOG(1) << result.message();
}
return result;
}
/* static */ ::testing::AssertionResult LiteralTestUtil::EqualTuple(
const Literal& expected, const Literal& actual) {
VLOG(1) << "expected: " << expected.ToString();
VLOG(1) << "actual: " << actual.ToString();
if (!ShapeUtil::IsTuple(expected.shape()) ||
!ShapeUtil::IsTuple(actual.shape())) {
return ::testing::AssertionFailure()
<< "tuples expected shape = " << expected.shape().ShortDebugString()
<< " actual shape = " << actual.shape().ShortDebugString();
}
AssertEqualShapes(expected.shape(), actual.shape());
::testing::AssertionResult err = ::testing::AssertionSuccess();
for (int64 i = 0; i < ShapeUtil::TupleElementCount(expected.shape()); ++i) {
SCOPED_TRACE(tensorflow::strings::StrCat(
"Tuple index ", i, " in ", ShapeUtil::HumanString(expected.shape())));
const auto expected_element = LiteralView::Create(expected, {i});
const auto actual_element = LiteralView::Create(actual, {i});
::testing::AssertionResult res = [&] {
if (ShapeUtil::IsTuple(expected_element.shape())) {
return EqualTuple(expected_element, actual_element);
} else {
return Equal(expected_element, actual_element);
}
}();
if (!res && err) {
err = res;
}
}
return err;
}
/* static */ void LiteralTestUtil::ExpectEqualTuple(const Literal& expected,
const Literal& actual) {
EXPECT_TRUE(EqualTuple(expected, actual));
}
namespace {
// Helper class for comparing floating-point literals within an error bound.
class NearComparator {
public:
explicit NearComparator(ErrorSpec error) : error_(error) {}
// Compares the two literals elementwise. EXPECTs each pair of elements to be
// within the error bound. Emits useful log messages and dumps literals to
// temporary files on failure. Returns true if literals match.
bool ExpectNear(const Literal& expected, const Literal& actual) {
VLOG(1) << "expected:";
XLA_VLOG_LINES(1, expected.ToString());
VLOG(1) << "actual:";
XLA_VLOG_LINES(1, actual.ToString());
// If the shapes mismatch, we simply fail the expectation instead of
// printing out data, as it's a type error rather than a value error.
::testing::AssertionResult equal_shapes =
LiteralTestUtil::EqualShapes(expected.shape(), actual.shape());
if (!equal_shapes) {
EXPECT_TRUE(equal_shapes);
return false;
}
// Set up members used during the comparison.
num_miscompares_ = 0;
abs_diff_sum_ = 0.0;
abs_expected_sum_ = 0.0;
abs_diff_miscompare_sum_ = 0.0;
abs_expected_miscompare_sum_ = 0.0;
max_rel_err_ = 0.0;
max_abs_err_ = 0.0;
miscompares_ = Literal(ShapeUtil::ChangeElementType(actual.shape(), PRED));
multi_index_.resize(expected.shape().dimensions_size(), 0);
switch (expected.shape().element_type()) {
case BF16:
ExpectLiteralsNear<bfloat16>(expected, actual, 0);
break;
case F32:
ExpectLiteralsNear<float>(expected, actual, 0);
break;
case F64:
ExpectLiteralsNear<double>(expected, actual, 0);
break;
case C64:
ExpectLiteralsNear<complex64>(expected, actual, 0);
break;
default:
LOG(FATAL) << "Unsupported primitive type in near comparator: "
<< PrimitiveType_Name(expected.shape().element_type())
<< ". Must be floating-point type.";
}
if (num_miscompares_ > 0) {
if (!VLOG_IS_ON(1)) {
LOG(INFO) << "expected: " << ShapeUtil::HumanString(expected.shape())
<< " " << expected.ToString();
LOG(INFO) << "actual: " << ShapeUtil::HumanString(actual.shape())
<< " " << actual.ToString();
}
EXPECT_TRUE(num_miscompares_ == 0)
<< "\nmax relative mismatch at index "
<< LiteralTestUtil::MultiIndexAsString(max_rel_multi_index_)
<< "\nmaximum relative error " << max_rel_err_
<< "\nmax absolute mismatch at index "
<< LiteralTestUtil::MultiIndexAsString(max_abs_multi_index_)
<< "\nmaximum absolute error " << max_abs_err_
<< "\nfirst mismatch at index "
<< LiteralTestUtil::MultiIndexAsString(first_multi_index_)
<< "\nlast mismatch at index "
<< LiteralTestUtil::MultiIndexAsString(last_multi_index_)
<< "\ntotal absolute error " << abs_diff_sum_
<< "\ntotal absolute error of miscompares "
<< abs_diff_miscompare_sum_ << "\ntotal relative error "
<< (abs_diff_sum_ / abs_expected_sum_)
<< "\ntotal relative error of miscompares "
<< (abs_diff_miscompare_sum_ / abs_expected_miscompare_sum_)
<< "\nfailure count " << num_miscompares_;
WriteLiteralToTempFile(expected, "expected");
WriteLiteralToTempFile(actual, "actual");
WriteLiteralToTempFile(miscompares_, "miscompares");
}
return num_miscompares_ == 0;
}
private:
template <typename NativeT>
bool NanMismatch(NativeT lhs, NativeT rhs) {
return std::isnan(lhs) != std::isnan(rhs);
}
template <typename NativeT>
void ExpectNear(NativeT expected, NativeT actual,
const ::testing::Message& message) {
EXPECT_NEAR(expected, actual, error_.abs)
<< "expected:\n " << expected << "\n\tvs actual:\n " << actual << "\n"
<< message;
}
// EXPECTs that the two given scalar values are within the error bound. Keeps
// track of how many mismatches have occurred to keep the size of the output
// manageable.
template <typename NativeT>
bool ExpectValuesNear(NativeT expected, NativeT actual) {
if (expected == actual) {
return true;
}
float abs_diff = std::abs(actual - expected);
float rel_err = abs_diff / std::abs(expected);
abs_diff_sum_ += abs_diff;
abs_expected_sum_ += std::abs(expected);
if (rel_err > max_rel_err_) {
max_rel_err_ = rel_err;
max_rel_multi_index_ = multi_index_;
}
if (abs_diff > max_abs_err_) {
max_abs_err_ = abs_diff;
max_abs_multi_index_ = multi_index_;
}
VLOG(10) << tensorflow::strings::Printf(
"index %s abs_diff %f rel_err %f",
LiteralTestUtil::MultiIndexAsString(multi_index_).c_str(), abs_diff,
rel_err);
bool nan_mismatch = NanMismatch<NativeT>(expected, actual);
bool mismatch =
(nan_mismatch || (abs_diff >= error_.abs && rel_err >= error_.rel));
if (mismatch) {
abs_diff_miscompare_sum_ += abs_diff;
abs_expected_miscompare_sum_ += std::abs(expected);
const int64 kMaxFailures = 2;
if (num_miscompares_ < kMaxFailures) {
::testing::Message msg;
msg << "mismatch at index "
<< LiteralTestUtil::MultiIndexAsString(multi_index_) << " abs diff "
<< abs_diff << " rel err " << rel_err << " failure #"
<< num_miscompares_;
ExpectNear<NativeT>(expected, actual, msg);
} else if (num_miscompares_ == kMaxFailures) {
LOG(ERROR)
<< "reached max 'loud' failure count; silently proceeding...";
}
if (num_miscompares_ == 0) {
first_multi_index_ = multi_index_;
}
num_miscompares_++;
last_multi_index_ = multi_index_;
}
return !mismatch;
}
// Recursive function which compares the two given literals elementwise.
template <typename NativeT>
void ExpectLiteralsNear(const Literal& expected, const Literal& actual,
int64 dimension) {
if (dimension == expected.shape().dimensions_size()) {
bool near = ExpectValuesNear(expected.Get<NativeT>(multi_index_),
actual.Get<NativeT>(multi_index_));
miscompares_.Set<bool>(multi_index_, !near);
} else {
for (int64 i = 0; i < expected.shape().dimensions(dimension); ++i) {
multi_index_[dimension] = i;
ExpectLiteralsNear<NativeT>(expected, actual, dimension + 1);
}
}
}
// Writes the given literal to a file in the test temporary directory.
void WriteLiteralToTempFile(const Literal& literal, const string& name) {
int64 now_usec = tensorflow::Env::Default()->NowMicros();
string filename = tensorflow::io::JoinPath(
tensorflow::testing::TmpDir(),
tensorflow::strings::Printf("tempfile-%s-%llx-%s", Hostname().c_str(),
now_usec, name.c_str()));
TF_CHECK_OK(tensorflow::WriteBinaryProto(tensorflow::Env::Default(),
filename, literal.ToProto()));
LOG(ERROR) << "wrote to " << name << " file: " << filename;
}
ErrorSpec error_;
// Number of element miscomparisons encountered so far.
int64 num_miscompares_;
// A Literal containing which elements did not match in the expected and
// actual literals. miscompares_ contains PREDs and is of the same sizes as
// the comparison literals.
Literal miscompares_;
// A multidimensional index used when performing the recursive comparison.
std::vector<int64> multi_index_;
// Aggregated Statistics on input.
double abs_diff_sum_;
double abs_expected_sum_;
double abs_diff_miscompare_sum_;
double abs_expected_miscompare_sum_;
float max_rel_err_;
float max_abs_err_;
std::vector<int64> first_multi_index_;
std::vector<int64> last_multi_index_;
std::vector<int64> max_rel_multi_index_;
std::vector<int64> max_abs_multi_index_;
};
template <>
bool NearComparator::NanMismatch<complex64>(complex64 lhs, complex64 rhs) {
return std::isnan(lhs.real()) != std::isnan(rhs.real()) ||
std::isnan(lhs.imag()) != std::isnan(rhs.imag());
}
template <>
void NearComparator::ExpectNear<complex64>(complex64 expected, complex64 actual,
const ::testing::Message& message) {
EXPECT_NEAR(expected.real(), actual.real(), error_.abs)
<< "expected:\n " << expected << "\n\tvs actual:\n " << actual << "\n"
<< message;
EXPECT_NEAR(expected.imag(), actual.imag(), error_.abs)
<< "expected:\n " << expected << "\n\tvs actual:\n " << actual << "\n"
<< message;
}
template <>
bool NearComparator::ExpectValuesNear<bfloat16>(bfloat16 expected,
bfloat16 actual) {
return ExpectValuesNear(static_cast<float>(expected),
static_cast<float>(actual));
}
} // namespace
/* static */ ::testing::AssertionResult LiteralTestUtil::Near(
const Literal& expected, const Literal& actual, const ErrorSpec& error) {
NearComparator comparator(error);
return comparator.ExpectNear(expected, actual)
? ::testing::AssertionSuccess()
: ::testing::AssertionFailure() << "values were not near";
}
/* static */ void LiteralTestUtil::ExpectNear(const Literal& expected,
const Literal& actual,
const ErrorSpec& error,
const string& message) {
EXPECT_TRUE(Near(expected, actual, error))
<< (message.empty()
? ""
: tensorflow::strings::StrCat("\nmessage: ", message));
}
/* static */ ::testing::AssertionResult LiteralTestUtil::NearTuple(
const Literal& expected, const Literal& actual, const ErrorSpec& error) {
VLOG(1) << "expected: " << expected.ToString();
VLOG(1) << "actual: " << actual.ToString();
if (!ShapeUtil::IsTuple(expected.shape()) ||
!ShapeUtil::IsTuple(actual.shape())) {
return ::testing::AssertionFailure()
<< "tuples expected shape = " << expected.shape().ShortDebugString()
<< " actual shape = " << actual.shape().ShortDebugString();
}
AssertEqualShapes(expected.shape(), actual.shape());
::testing::AssertionResult err = ::testing::AssertionSuccess();
for (int64 i = 0; i < ShapeUtil::TupleElementCount(expected.shape()); ++i) {
SCOPED_TRACE(tensorflow::strings::StrCat(
"Tuple index ", i, " in ", ShapeUtil::HumanString(expected.shape())));
const auto expected_element = LiteralView::Create(expected, {i});
const auto actual_element = LiteralView::Create(actual, {i});
::testing::AssertionResult res = [&] {
if (ShapeUtil::IsTuple(expected_element.shape())) {
return NearTuple(expected_element, actual_element, error);
} else if (ShapeUtil::ElementIsFloating(expected_element.shape())) {
return Near(expected_element, actual_element, error);
} else {
return Equal(expected_element, actual_element);
}
}();
if (err && !res) {
err = res;
}
}
return err;
}
/* static */ void LiteralTestUtil::ExpectNearTuple(const Literal& expected,
const Literal& actual,
const ErrorSpec& error) {
EXPECT_TRUE(NearTuple(expected, actual, error));
}
/*static*/ ::testing::AssertionResult LiteralTestUtil::NearOrEqual(
const Literal& expected, const Literal& actual,
const tensorflow::gtl::optional<ErrorSpec>& error) {
bool is_tuple = ShapeUtil::IsTuple(expected.shape());
if (error.has_value()) {
if (is_tuple) {
VLOG(1) << "Expects near tuple";
return NearTuple(expected, actual, *error);
}
VLOG(1) << "Expects near";
return Near(expected, actual, *error);
}
if (is_tuple) {
VLOG(1) << "Expects equal tuple";
return EqualTuple(expected, actual);
}
VLOG(1) << "Expects equal";
return Equal(expected, actual);
}
/*static*/ void LiteralTestUtil::ExpectNearOrEqual(
const Literal& expected, const Literal& actual,
const tensorflow::gtl::optional<ErrorSpec>& error) {
EXPECT_TRUE(NearOrEqual(expected, actual, error));
}
/* static */ string LiteralTestUtil::MultiIndexAsString(
tensorflow::gtl::ArraySlice<int64> multi_index) {
return tensorflow::strings::StrCat(
"{", tensorflow::str_util::Join(multi_index, ","), "}");
}
/* static */ std::unique_ptr<Literal> LiteralTestUtil::Reshape(
tensorflow::gtl::ArraySlice<int64> new_dimensions,
tensorflow::gtl::ArraySlice<int64> minor_to_major, const Literal& literal) {
int64 new_num_elements = 1;
for (int64 i = 0; i < new_dimensions.size(); ++i) {
new_num_elements *= new_dimensions[i];
}
CHECK_EQ(ShapeUtil::ElementsIn(literal.shape()), new_num_elements);
auto new_literal = MakeUnique<Literal>(
ShapeUtil::MakeShape(literal.shape().element_type(), new_dimensions));
// Create a new shape with the given minor-to-major layout. This shape is used
// solely for converting linear address to multi-dimensional addresses when
// writing elements to the new literal.
Shape shape_with_layout = new_literal->shape();
*shape_with_layout.mutable_layout() = LayoutUtil::MakeLayout(minor_to_major);
// Copy data into new literal, element-by-element.
for (int64 i = 0; i < ShapeUtil::ElementsIn(literal.shape()); ++i) {
std::vector<int64> from_multi_index =
IndexUtil::LinearIndexToMultidimensionalIndex(literal.shape(), i);
std::vector<int64> to_multi_index =
IndexUtil::LinearIndexToMultidimensionalIndex(shape_with_layout, i);
switch (literal.shape().element_type()) {
case PRED:
new_literal->Set<bool>(to_multi_index,
literal.Get<bool>(from_multi_index));
break;
case U8:
new_literal->Set<uint8>(to_multi_index,
literal.Get<uint8>(from_multi_index));
break;
case U32:
new_literal->Set<uint32>(to_multi_index,
literal.Get<uint32>(from_multi_index));
break;
case S32:
new_literal->Set<int32>(to_multi_index,
literal.Get<int32>(from_multi_index));
break;
case U64:
new_literal->Set<uint64>(to_multi_index,
literal.Get<uint64>(from_multi_index));
break;
case S64:
new_literal->Set<int64>(to_multi_index,
literal.Get<int64>(from_multi_index));
break;
case F32:
new_literal->Set<float>(to_multi_index,
literal.Get<float>(from_multi_index));
break;
case F64:
new_literal->Set<double>(to_multi_index,
literal.Get<double>(from_multi_index));
break;
default:
LOG(FATAL) << "Unhandled primitive element type: "
<< PrimitiveType_Name(literal.shape().element_type());
}
}
return new_literal;
}
} // namespace xla
| {
"pile_set_name": "Github"
} |
I
800
OBJ
POINT_COUNTS 36 0 0 36
TEXTURE ../../../../Users/alxunru/Desktop/X-Plane 10/Aircraft/Test Cases/objects/test.png
VT -0.5 0.5 0.5 -1 0 0 0 0 # 0
VT -0.5 -0.5 0.5 -1 0 0 0 0 # 1
VT -0.5 -0.5 -0.5 -1 0 0 0 0 # 2
VT -0.5 -0.5 -0.5 -1 0 0 0 0 # 3
VT -0.5 0.5 -0.5 -1 0 0 0 0 # 4
VT -0.5 0.5 0.5 -1 0 0 0 0 # 5
VT 0.5 -0.5 -0.5 0 0 -1 0 0 # 6
VT 0.5 0.5 -0.5 0 0 -1 0 0 # 7
VT -0.5 0.5 -0.5 0 0 -1 0 0 # 8
VT -0.5 0.5 -0.5 0 0 -1 0 0 # 9
VT -0.5 -0.5 -0.5 0 0 -1 0 0 # 10
VT 0.5 -0.5 -0.5 0 0 -1 0 0 # 11
VT 0.5 -0.5 0.5 1 0 0 0 0 # 12
VT 0.5 0.5 0.5 1 0 0 0 0 # 13
VT 0.5 0.5 -0.5 1 0 0 0 0 # 14
VT 0.5 0.5 -0.5 1 0 0 0 0 # 15
VT 0.5 -0.5 -0.5 1 0 0 0 0 # 16
VT 0.5 -0.5 0.5 1 0 0 0 0 # 17
VT 0.5 0.5 0.5 0 0 1 0 0 # 18
VT 0.5 -0.5 0.5 0 0 1 0 0 # 19
VT -0.5 -0.5 0.5 0 0 1 0 0 # 20
VT -0.5 -0.5 0.5 0 0 1 0 0 # 21
VT -0.5 0.5 0.5 0 0 1 0 0 # 22
VT 0.5 0.5 0.5 0 0 1 0 0 # 23
VT 0.5 -0.5 -0.5 0 -1 0 0 0 # 24
VT -0.5 -0.5 -0.5 0 -1 0 0 0 # 25
VT -0.5 -0.5 0.5 0 -1 0 0 0 # 26
VT -0.5 -0.5 0.5 0 -1 0 0 0 # 27
VT 0.5 -0.5 0.5 0 -1 0 0 0 # 28
VT 0.5 -0.5 -0.5 0 -1 0 0 0 # 29
VT -0.5 0.5 -0.5 0 1 0 0 0 # 30
VT 0.5 0.5 -0.5 0 1 0 0 0 # 31
VT 0.5 0.5 0.5 0 1 0 0 0 # 32
VT 0.5 0.5 0.5 0 1 0 0 0 # 33
VT -0.5 0.5 0.5 0 1 0 0 0 # 34
VT -0.5 0.5 -0.5 0 1 0 0 0 # 35
IDX10 0 1 2 3 4 5 6 7 8 9
IDX10 10 11 12 13 14 15 16 17 18 19
IDX10 20 21 22 23 24 25 26 27 28 29
IDX 30
IDX 31
IDX 32
IDX 33
IDX 34
IDX 35
# 0 ROOT
# 1 Object: Cube.005
ANIM_begin
# rotation keyframes
ANIM_rotate_begin 0 1 0 sim/cockpit2/engine/actuators/throttle_ratio[0]
ANIM_rotate_key 0 0
ANIM_rotate_key 1 45.00010371
ANIM_rotate_end
# PRIMITIVE: Cube.005 weight: 0
# MATERIAL: Material
ATTR_shiny_rat 0.5
TRIS 0 36
ANIM_end
# Build with Blender 2.76 (sub 0) (build b'f337fea') Exported with XPlane2Blender 3.3.13 | {
"pile_set_name": "Github"
} |
//----------------------------------------------------------------
// Copyright (c) Microsoft Corporation. All rights reserved.
//----------------------------------------------------------------
namespace System.ServiceModel.Discovery
{
class DiscoveryMessageProperty
{
public const string Name = "System.ServiceModel.Discovery.DiscoveryMessageProperty";
public DiscoveryMessageProperty()
{
}
public DiscoveryMessageProperty(object correlationState)
{
this.CorrelationState = correlationState;
}
public object CorrelationState { get; set; }
}
}
| {
"pile_set_name": "Github"
} |
/*=============================================================================
Copyright (c) 2001-2011 Joel de Guzman
Copyright (c) 2001-2011 Hartmut Kaiser
Distributed under the Boost Software License, Version 1.0. (See accompanying
file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
=============================================================================*/
#if !defined(SPIRIT_OPTIONAL_MARCH_23_2007_1117PM)
#define SPIRIT_OPTIONAL_MARCH_23_2007_1117PM
#if defined(_MSC_VER)
#pragma once
#endif
#include <boost/spirit/home/support/unused.hpp>
#include <boost/spirit/home/qi/detail/attributes.hpp>
#include <boost/spirit/home/support/has_semantic_action.hpp>
#include <boost/spirit/home/support/handles_container.hpp>
#include <boost/spirit/home/support/info.hpp>
#include <boost/spirit/home/support/container.hpp>
#include <boost/spirit/home/qi/parser.hpp>
#include <boost/spirit/home/qi/meta_compiler.hpp>
#include <boost/spirit/home/qi/detail/assign_to.hpp>
#include <boost/optional.hpp>
#include <vector>
namespace boost { namespace spirit
{
///////////////////////////////////////////////////////////////////////////
// Enablers
///////////////////////////////////////////////////////////////////////////
template <>
struct use_operator<qi::domain, proto::tag::negate> // enables -p
: mpl::true_ {};
}}
namespace boost { namespace spirit { namespace qi
{
template <typename Subject>
struct optional : unary_parser<optional<Subject> >
{
typedef Subject subject_type;
template <typename Context, typename Iterator>
struct attribute
{
// Build a boost::optional from the subject's attribute. Note
// that boost::optional may return unused_type if the
// subject's attribute is an unused_type.
typedef typename
traits::build_optional<
typename traits::
attribute_of<Subject, Context, Iterator>::type
>::type
type;
};
optional(Subject const& subject_)
: subject(subject_) {}
template <typename Iterator, typename Context
, typename Skipper, typename Attribute>
bool parse_impl(Iterator& first, Iterator const& last
, Context& context, Skipper const& skipper
, Attribute& attr_, mpl::false_) const
{
// create a local value if Attribute is not unused_type
typename spirit::result_of::optional_value<Attribute>::type val =
typename spirit::result_of::optional_value<Attribute>::type();
if (subject.parse(first, last, context, skipper, val))
{
// assign the parsed value into our attribute
spirit::traits::assign_to(val, attr_);
}
return true;
}
template <typename Iterator, typename Context
, typename Skipper, typename Attribute>
bool parse_impl(Iterator& first, Iterator const& last
, Context& context, Skipper const& skipper
, Attribute& attr_, mpl::true_) const
{
subject.parse(first, last, context, skipper, attr_);
return true;
}
template <typename Iterator, typename Context
, typename Skipper, typename Attribute>
bool parse(Iterator& first, Iterator const& last
, Context& context, Skipper const& skipper
, Attribute& attr_) const
{
typedef typename spirit::result_of::optional_value<Attribute>::type
attribute_type;
return parse_impl(first, last, context, skipper, attr_
, traits::is_container<attribute_type>());
}
template <typename Context>
info what(Context& context) const
{
return info("optional", subject.what(context));
}
Subject subject;
};
///////////////////////////////////////////////////////////////////////////
// Parser generators: make_xxx function (objects)
///////////////////////////////////////////////////////////////////////////
template <typename Elements, typename Modifiers>
struct make_composite<proto::tag::negate, Elements, Modifiers>
: make_unary_composite<Elements, optional>
{};
}}}
namespace boost { namespace spirit { namespace traits
{
///////////////////////////////////////////////////////////////////////////
template <typename Subject>
struct has_semantic_action<qi::optional<Subject> >
: unary_has_semantic_action<Subject> {};
///////////////////////////////////////////////////////////////////////////
template <typename Subject, typename Attribute, typename Context
, typename Iterator>
struct handles_container<qi::optional<Subject>, Attribute
, Context, Iterator>
: mpl::true_ {};
}}}
#endif
| {
"pile_set_name": "Github"
} |
// Copyright 2018 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// AUTO-GENERATED CODE. DO NOT EDIT.
package trace
import (
"math"
"time"
"cloud.google.com/go/internal/version"
"github.com/golang/protobuf/proto"
gax "github.com/googleapis/gax-go"
"golang.org/x/net/context"
"google.golang.org/api/iterator"
"google.golang.org/api/option"
"google.golang.org/api/transport"
cloudtracepb "google.golang.org/genproto/googleapis/devtools/cloudtrace/v1"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/metadata"
)
// CallOptions contains the retry settings for each method of Client.
type CallOptions struct {
PatchTraces []gax.CallOption
GetTrace []gax.CallOption
ListTraces []gax.CallOption
}
func defaultClientOptions() []option.ClientOption {
return []option.ClientOption{
option.WithEndpoint("cloudtrace.googleapis.com:443"),
option.WithScopes(DefaultAuthScopes()...),
}
}
func defaultCallOptions() *CallOptions {
retry := map[[2]string][]gax.CallOption{
{"default", "idempotent"}: {
gax.WithRetry(func() gax.Retryer {
return gax.OnCodes([]codes.Code{
codes.DeadlineExceeded,
codes.Unavailable,
}, gax.Backoff{
Initial: 100 * time.Millisecond,
Max: 1000 * time.Millisecond,
Multiplier: 1.2,
})
}),
},
}
return &CallOptions{
PatchTraces: retry[[2]string{"default", "idempotent"}],
GetTrace: retry[[2]string{"default", "idempotent"}],
ListTraces: retry[[2]string{"default", "idempotent"}],
}
}
// Client is a client for interacting with Stackdriver Trace API.
//
// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls.
type Client struct {
// The connection to the service.
conn *grpc.ClientConn
// The gRPC API client.
client cloudtracepb.TraceServiceClient
// The call options for this service.
CallOptions *CallOptions
// The x-goog-* metadata to be sent with each request.
xGoogMetadata metadata.MD
}
// NewClient creates a new trace service client.
//
// This file describes an API for collecting and viewing traces and spans
// within a trace. A Trace is a collection of spans corresponding to a single
// operation or set of operations for an application. A span is an individual
// timed event which forms a node of the trace tree. Spans for a single trace
// may span multiple services.
func NewClient(ctx context.Context, opts ...option.ClientOption) (*Client, error) {
conn, err := transport.DialGRPC(ctx, append(defaultClientOptions(), opts...)...)
if err != nil {
return nil, err
}
c := &Client{
conn: conn,
CallOptions: defaultCallOptions(),
client: cloudtracepb.NewTraceServiceClient(conn),
}
c.SetGoogleClientInfo()
return c, nil
}
// Connection returns the client's connection to the API service.
func (c *Client) Connection() *grpc.ClientConn {
return c.conn
}
// Close closes the connection to the API service. The user should invoke this when
// the client is no longer required.
func (c *Client) Close() error {
return c.conn.Close()
}
// SetGoogleClientInfo sets the name and version of the application in
// the `x-goog-api-client` header passed on each request. Intended for
// use by Google-written clients.
func (c *Client) SetGoogleClientInfo(keyval ...string) {
kv := append([]string{"gl-go", version.Go()}, keyval...)
kv = append(kv, "gapic", version.Repo, "gax", gax.Version, "grpc", grpc.Version)
c.xGoogMetadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...))
}
// PatchTraces sends new traces to Stackdriver Trace or updates existing traces. If the ID
// of a trace that you send matches that of an existing trace, any fields
// in the existing trace and its spans are overwritten by the provided values,
// and any new fields provided are merged with the existing trace data. If the
// ID does not match, a new trace is created.
func (c *Client) PatchTraces(ctx context.Context, req *cloudtracepb.PatchTracesRequest, opts ...gax.CallOption) error {
ctx = insertMetadata(ctx, c.xGoogMetadata)
opts = append(c.CallOptions.PatchTraces[0:len(c.CallOptions.PatchTraces):len(c.CallOptions.PatchTraces)], opts...)
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
_, err = c.client.PatchTraces(ctx, req, settings.GRPC...)
return err
}, opts...)
return err
}
// GetTrace gets a single trace by its ID.
func (c *Client) GetTrace(ctx context.Context, req *cloudtracepb.GetTraceRequest, opts ...gax.CallOption) (*cloudtracepb.Trace, error) {
ctx = insertMetadata(ctx, c.xGoogMetadata)
opts = append(c.CallOptions.GetTrace[0:len(c.CallOptions.GetTrace):len(c.CallOptions.GetTrace)], opts...)
var resp *cloudtracepb.Trace
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
resp, err = c.client.GetTrace(ctx, req, settings.GRPC...)
return err
}, opts...)
if err != nil {
return nil, err
}
return resp, nil
}
// ListTraces returns of a list of traces that match the specified filter conditions.
func (c *Client) ListTraces(ctx context.Context, req *cloudtracepb.ListTracesRequest, opts ...gax.CallOption) *TraceIterator {
ctx = insertMetadata(ctx, c.xGoogMetadata)
opts = append(c.CallOptions.ListTraces[0:len(c.CallOptions.ListTraces):len(c.CallOptions.ListTraces)], opts...)
it := &TraceIterator{}
req = proto.Clone(req).(*cloudtracepb.ListTracesRequest)
it.InternalFetch = func(pageSize int, pageToken string) ([]*cloudtracepb.Trace, string, error) {
var resp *cloudtracepb.ListTracesResponse
req.PageToken = pageToken
if pageSize > math.MaxInt32 {
req.PageSize = math.MaxInt32
} else {
req.PageSize = int32(pageSize)
}
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
resp, err = c.client.ListTraces(ctx, req, settings.GRPC...)
return err
}, opts...)
if err != nil {
return nil, "", err
}
return resp.Traces, resp.NextPageToken, nil
}
fetch := func(pageSize int, pageToken string) (string, error) {
items, nextPageToken, err := it.InternalFetch(pageSize, pageToken)
if err != nil {
return "", err
}
it.items = append(it.items, items...)
return nextPageToken, nil
}
it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)
it.pageInfo.MaxSize = int(req.PageSize)
return it
}
// TraceIterator manages a stream of *cloudtracepb.Trace.
type TraceIterator struct {
items []*cloudtracepb.Trace
pageInfo *iterator.PageInfo
nextFunc func() error
// InternalFetch is for use by the Google Cloud Libraries only.
// It is not part of the stable interface of this package.
//
// InternalFetch returns results from a single call to the underlying RPC.
// The number of results is no greater than pageSize.
// If there are no more results, nextPageToken is empty and err is nil.
InternalFetch func(pageSize int, pageToken string) (results []*cloudtracepb.Trace, nextPageToken string, err error)
}
// PageInfo supports pagination. See the google.golang.org/api/iterator package for details.
func (it *TraceIterator) PageInfo() *iterator.PageInfo {
return it.pageInfo
}
// Next returns the next result. Its second return value is iterator.Done if there are no more
// results. Once Next returns Done, all subsequent calls will return Done.
func (it *TraceIterator) Next() (*cloudtracepb.Trace, error) {
var item *cloudtracepb.Trace
if err := it.nextFunc(); err != nil {
return item, err
}
item = it.items[0]
it.items = it.items[1:]
return item, nil
}
func (it *TraceIterator) bufLen() int {
return len(it.items)
}
func (it *TraceIterator) takeBuf() interface{} {
b := it.items
it.items = nil
return b
}
| {
"pile_set_name": "Github"
} |
/*
mxb - v4l2 driver for the Multimedia eXtension Board
Copyright (C) 1998-2006 Michael Hunold <[email protected]>
Visit http://www.themm.net/~mihu/linux/saa7146/mxb.html
for further details about this card.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#define DEBUG_VARIABLE debug
#include <media/drv-intf/saa7146_vv.h>
#include <media/tuner.h>
#include <media/v4l2-common.h>
#include <media/i2c/saa7115.h>
#include <linux/module.h>
#include "tea6415c.h"
#include "tea6420.h"
#define MXB_AUDIOS 6
#define I2C_SAA7111A 0x24
#define I2C_TDA9840 0x42
#define I2C_TEA6415C 0x43
#define I2C_TEA6420_1 0x4c
#define I2C_TEA6420_2 0x4d
#define I2C_TUNER 0x60
#define MXB_BOARD_CAN_DO_VBI(dev) (dev->revision != 0)
/* global variable */
static int mxb_num;
/* initial frequence the tuner will be tuned to.
in verden (lower saxony, germany) 4148 is a
channel called "phoenix" */
static int freq = 4148;
module_param(freq, int, 0644);
MODULE_PARM_DESC(freq, "initial frequency the tuner will be tuned to while setup");
static int debug;
module_param(debug, int, 0644);
MODULE_PARM_DESC(debug, "Turn on/off device debugging (default:off).");
#define MXB_INPUTS 4
enum { TUNER, AUX1, AUX3, AUX3_YC };
static struct v4l2_input mxb_inputs[MXB_INPUTS] = {
{ TUNER, "Tuner", V4L2_INPUT_TYPE_TUNER, 0x3f, 0,
V4L2_STD_PAL_BG | V4L2_STD_PAL_I, 0, V4L2_IN_CAP_STD },
{ AUX1, "AUX1", V4L2_INPUT_TYPE_CAMERA, 0x3f, 0,
V4L2_STD_ALL, 0, V4L2_IN_CAP_STD },
{ AUX3, "AUX3 Composite", V4L2_INPUT_TYPE_CAMERA, 0x3f, 0,
V4L2_STD_ALL, 0, V4L2_IN_CAP_STD },
{ AUX3_YC, "AUX3 S-Video", V4L2_INPUT_TYPE_CAMERA, 0x3f, 0,
V4L2_STD_ALL, 0, V4L2_IN_CAP_STD },
};
/* this array holds the information, which port of the saa7146 each
input actually uses. the mxb uses port 0 for every input */
static struct {
int hps_source;
int hps_sync;
} input_port_selection[MXB_INPUTS] = {
{ SAA7146_HPS_SOURCE_PORT_A, SAA7146_HPS_SYNC_PORT_A },
{ SAA7146_HPS_SOURCE_PORT_A, SAA7146_HPS_SYNC_PORT_A },
{ SAA7146_HPS_SOURCE_PORT_A, SAA7146_HPS_SYNC_PORT_A },
{ SAA7146_HPS_SOURCE_PORT_A, SAA7146_HPS_SYNC_PORT_A },
};
/* this array holds the information of the audio source (mxb_audios),
which has to be switched corresponding to the video source (mxb_channels) */
static int video_audio_connect[MXB_INPUTS] =
{ 0, 1, 3, 3 };
struct mxb_routing {
u32 input;
u32 output;
};
/* these are the available audio sources, which can switched
to the line- and cd-output individually */
static struct v4l2_audio mxb_audios[MXB_AUDIOS] = {
{
.index = 0,
.name = "Tuner",
.capability = V4L2_AUDCAP_STEREO,
} , {
.index = 1,
.name = "AUX1",
.capability = V4L2_AUDCAP_STEREO,
} , {
.index = 2,
.name = "AUX2",
.capability = V4L2_AUDCAP_STEREO,
} , {
.index = 3,
.name = "AUX3",
.capability = V4L2_AUDCAP_STEREO,
} , {
.index = 4,
.name = "Radio (X9)",
.capability = V4L2_AUDCAP_STEREO,
} , {
.index = 5,
.name = "CD-ROM (X10)",
.capability = V4L2_AUDCAP_STEREO,
}
};
/* These are the necessary input-output-pins for bringing one audio source
(see above) to the CD-output. Note that gain is set to 0 in this table. */
static struct mxb_routing TEA6420_cd[MXB_AUDIOS + 1][2] = {
{ { 1, 1 }, { 1, 1 } }, /* Tuner */
{ { 5, 1 }, { 6, 1 } }, /* AUX 1 */
{ { 4, 1 }, { 6, 1 } }, /* AUX 2 */
{ { 3, 1 }, { 6, 1 } }, /* AUX 3 */
{ { 1, 1 }, { 3, 1 } }, /* Radio */
{ { 1, 1 }, { 2, 1 } }, /* CD-Rom */
{ { 6, 1 }, { 6, 1 } } /* Mute */
};
/* These are the necessary input-output-pins for bringing one audio source
(see above) to the line-output. Note that gain is set to 0 in this table. */
static struct mxb_routing TEA6420_line[MXB_AUDIOS + 1][2] = {
{ { 2, 3 }, { 1, 2 } },
{ { 5, 3 }, { 6, 2 } },
{ { 4, 3 }, { 6, 2 } },
{ { 3, 3 }, { 6, 2 } },
{ { 2, 3 }, { 3, 2 } },
{ { 2, 3 }, { 2, 2 } },
{ { 6, 3 }, { 6, 2 } } /* Mute */
};
struct mxb
{
struct video_device video_dev;
struct video_device vbi_dev;
struct i2c_adapter i2c_adapter;
struct v4l2_subdev *saa7111a;
struct v4l2_subdev *tda9840;
struct v4l2_subdev *tea6415c;
struct v4l2_subdev *tuner;
struct v4l2_subdev *tea6420_1;
struct v4l2_subdev *tea6420_2;
int cur_mode; /* current audio mode (mono, stereo, ...) */
int cur_input; /* current input */
int cur_audinput; /* current audio input */
int cur_mute; /* current mute status */
struct v4l2_frequency cur_freq; /* current frequency the tuner is tuned to */
};
#define saa7111a_call(mxb, o, f, args...) \
v4l2_subdev_call(mxb->saa7111a, o, f, ##args)
#define tda9840_call(mxb, o, f, args...) \
v4l2_subdev_call(mxb->tda9840, o, f, ##args)
#define tea6415c_call(mxb, o, f, args...) \
v4l2_subdev_call(mxb->tea6415c, o, f, ##args)
#define tuner_call(mxb, o, f, args...) \
v4l2_subdev_call(mxb->tuner, o, f, ##args)
#define call_all(dev, o, f, args...) \
v4l2_device_call_until_err(&dev->v4l2_dev, 0, o, f, ##args)
static void mxb_update_audmode(struct mxb *mxb)
{
struct v4l2_tuner t = {
.audmode = mxb->cur_mode,
};
tda9840_call(mxb, tuner, s_tuner, &t);
}
static inline void tea6420_route(struct mxb *mxb, int idx)
{
v4l2_subdev_call(mxb->tea6420_1, audio, s_routing,
TEA6420_cd[idx][0].input, TEA6420_cd[idx][0].output, 0);
v4l2_subdev_call(mxb->tea6420_2, audio, s_routing,
TEA6420_cd[idx][1].input, TEA6420_cd[idx][1].output, 0);
v4l2_subdev_call(mxb->tea6420_1, audio, s_routing,
TEA6420_line[idx][0].input, TEA6420_line[idx][0].output, 0);
v4l2_subdev_call(mxb->tea6420_2, audio, s_routing,
TEA6420_line[idx][1].input, TEA6420_line[idx][1].output, 0);
}
static struct saa7146_extension extension;
static int mxb_s_ctrl(struct v4l2_ctrl *ctrl)
{
struct saa7146_dev *dev = container_of(ctrl->handler,
struct saa7146_dev, ctrl_handler);
struct mxb *mxb = dev->ext_priv;
switch (ctrl->id) {
case V4L2_CID_AUDIO_MUTE:
mxb->cur_mute = ctrl->val;
/* switch the audio-source */
tea6420_route(mxb, ctrl->val ? 6 :
video_audio_connect[mxb->cur_input]);
break;
default:
return -EINVAL;
}
return 0;
}
static const struct v4l2_ctrl_ops mxb_ctrl_ops = {
.s_ctrl = mxb_s_ctrl,
};
static int mxb_probe(struct saa7146_dev *dev)
{
struct v4l2_ctrl_handler *hdl = &dev->ctrl_handler;
struct mxb *mxb = NULL;
v4l2_ctrl_new_std(hdl, &mxb_ctrl_ops,
V4L2_CID_AUDIO_MUTE, 0, 1, 1, 1);
if (hdl->error)
return hdl->error;
mxb = kzalloc(sizeof(struct mxb), GFP_KERNEL);
if (mxb == NULL) {
DEB_D("not enough kernel memory\n");
return -ENOMEM;
}
snprintf(mxb->i2c_adapter.name, sizeof(mxb->i2c_adapter.name), "mxb%d", mxb_num);
saa7146_i2c_adapter_prepare(dev, &mxb->i2c_adapter, SAA7146_I2C_BUS_BIT_RATE_480);
if (i2c_add_adapter(&mxb->i2c_adapter) < 0) {
DEB_S("cannot register i2c-device. skipping.\n");
kfree(mxb);
return -EFAULT;
}
mxb->saa7111a = v4l2_i2c_new_subdev(&dev->v4l2_dev, &mxb->i2c_adapter,
"saa7111", I2C_SAA7111A, NULL);
mxb->tea6420_1 = v4l2_i2c_new_subdev(&dev->v4l2_dev, &mxb->i2c_adapter,
"tea6420", I2C_TEA6420_1, NULL);
mxb->tea6420_2 = v4l2_i2c_new_subdev(&dev->v4l2_dev, &mxb->i2c_adapter,
"tea6420", I2C_TEA6420_2, NULL);
mxb->tea6415c = v4l2_i2c_new_subdev(&dev->v4l2_dev, &mxb->i2c_adapter,
"tea6415c", I2C_TEA6415C, NULL);
mxb->tda9840 = v4l2_i2c_new_subdev(&dev->v4l2_dev, &mxb->i2c_adapter,
"tda9840", I2C_TDA9840, NULL);
mxb->tuner = v4l2_i2c_new_subdev(&dev->v4l2_dev, &mxb->i2c_adapter,
"tuner", I2C_TUNER, NULL);
/* check if all devices are present */
if (!mxb->tea6420_1 || !mxb->tea6420_2 || !mxb->tea6415c ||
!mxb->tda9840 || !mxb->saa7111a || !mxb->tuner) {
pr_err("did not find all i2c devices. aborting\n");
i2c_del_adapter(&mxb->i2c_adapter);
kfree(mxb);
return -ENODEV;
}
/* all devices are present, probe was successful */
/* we store the pointer in our private data field */
dev->ext_priv = mxb;
v4l2_ctrl_handler_setup(hdl);
return 0;
}
/* some init data for the saa7740, the so-called 'sound arena module'.
there are no specs available, so we simply use some init values */
static struct {
int length;
char data[9];
} mxb_saa7740_init[] = {
{ 3, { 0x80, 0x00, 0x00 } },{ 3, { 0x80, 0x89, 0x00 } },
{ 3, { 0x80, 0xb0, 0x0a } },{ 3, { 0x00, 0x00, 0x00 } },
{ 3, { 0x49, 0x00, 0x00 } },{ 3, { 0x4a, 0x00, 0x00 } },
{ 3, { 0x4b, 0x00, 0x00 } },{ 3, { 0x4c, 0x00, 0x00 } },
{ 3, { 0x4d, 0x00, 0x00 } },{ 3, { 0x4e, 0x00, 0x00 } },
{ 3, { 0x4f, 0x00, 0x00 } },{ 3, { 0x50, 0x00, 0x00 } },
{ 3, { 0x51, 0x00, 0x00 } },{ 3, { 0x52, 0x00, 0x00 } },
{ 3, { 0x53, 0x00, 0x00 } },{ 3, { 0x54, 0x00, 0x00 } },
{ 3, { 0x55, 0x00, 0x00 } },{ 3, { 0x56, 0x00, 0x00 } },
{ 3, { 0x57, 0x00, 0x00 } },{ 3, { 0x58, 0x00, 0x00 } },
{ 3, { 0x59, 0x00, 0x00 } },{ 3, { 0x5a, 0x00, 0x00 } },
{ 3, { 0x5b, 0x00, 0x00 } },{ 3, { 0x5c, 0x00, 0x00 } },
{ 3, { 0x5d, 0x00, 0x00 } },{ 3, { 0x5e, 0x00, 0x00 } },
{ 3, { 0x5f, 0x00, 0x00 } },{ 3, { 0x60, 0x00, 0x00 } },
{ 3, { 0x61, 0x00, 0x00 } },{ 3, { 0x62, 0x00, 0x00 } },
{ 3, { 0x63, 0x00, 0x00 } },{ 3, { 0x64, 0x00, 0x00 } },
{ 3, { 0x65, 0x00, 0x00 } },{ 3, { 0x66, 0x00, 0x00 } },
{ 3, { 0x67, 0x00, 0x00 } },{ 3, { 0x68, 0x00, 0x00 } },
{ 3, { 0x69, 0x00, 0x00 } },{ 3, { 0x6a, 0x00, 0x00 } },
{ 3, { 0x6b, 0x00, 0x00 } },{ 3, { 0x6c, 0x00, 0x00 } },
{ 3, { 0x6d, 0x00, 0x00 } },{ 3, { 0x6e, 0x00, 0x00 } },
{ 3, { 0x6f, 0x00, 0x00 } },{ 3, { 0x70, 0x00, 0x00 } },
{ 3, { 0x71, 0x00, 0x00 } },{ 3, { 0x72, 0x00, 0x00 } },
{ 3, { 0x73, 0x00, 0x00 } },{ 3, { 0x74, 0x00, 0x00 } },
{ 3, { 0x75, 0x00, 0x00 } },{ 3, { 0x76, 0x00, 0x00 } },
{ 3, { 0x77, 0x00, 0x00 } },{ 3, { 0x41, 0x00, 0x42 } },
{ 3, { 0x42, 0x10, 0x42 } },{ 3, { 0x43, 0x20, 0x42 } },
{ 3, { 0x44, 0x30, 0x42 } },{ 3, { 0x45, 0x00, 0x01 } },
{ 3, { 0x46, 0x00, 0x01 } },{ 3, { 0x47, 0x00, 0x01 } },
{ 3, { 0x48, 0x00, 0x01 } },
{ 9, { 0x01, 0x03, 0xc5, 0x5c, 0x7a, 0x85, 0x01, 0x00, 0x54 } },
{ 9, { 0x21, 0x03, 0xc5, 0x5c, 0x7a, 0x85, 0x01, 0x00, 0x54 } },
{ 9, { 0x09, 0x0b, 0xb4, 0x6b, 0x74, 0x85, 0x95, 0x00, 0x34 } },
{ 9, { 0x29, 0x0b, 0xb4, 0x6b, 0x74, 0x85, 0x95, 0x00, 0x34 } },
{ 9, { 0x11, 0x17, 0x43, 0x62, 0x68, 0x89, 0xd1, 0xff, 0xb0 } },
{ 9, { 0x31, 0x17, 0x43, 0x62, 0x68, 0x89, 0xd1, 0xff, 0xb0 } },
{ 9, { 0x19, 0x20, 0x62, 0x51, 0x5a, 0x95, 0x19, 0x01, 0x50 } },
{ 9, { 0x39, 0x20, 0x62, 0x51, 0x5a, 0x95, 0x19, 0x01, 0x50 } },
{ 9, { 0x05, 0x3e, 0xd2, 0x69, 0x4e, 0x9a, 0x51, 0x00, 0xf0 } },
{ 9, { 0x25, 0x3e, 0xd2, 0x69, 0x4e, 0x9a, 0x51, 0x00, 0xf0 } },
{ 9, { 0x0d, 0x3d, 0xa1, 0x40, 0x7d, 0x9f, 0x29, 0xfe, 0x14 } },
{ 9, { 0x2d, 0x3d, 0xa1, 0x40, 0x7d, 0x9f, 0x29, 0xfe, 0x14 } },
{ 9, { 0x15, 0x73, 0xa1, 0x50, 0x5d, 0xa6, 0xf5, 0xfe, 0x38 } },
{ 9, { 0x35, 0x73, 0xa1, 0x50, 0x5d, 0xa6, 0xf5, 0xfe, 0x38 } },
{ 9, { 0x1d, 0xed, 0xd0, 0x68, 0x29, 0xb4, 0xe1, 0x00, 0xb8 } },
{ 9, { 0x3d, 0xed, 0xd0, 0x68, 0x29, 0xb4, 0xe1, 0x00, 0xb8 } },
{ 3, { 0x80, 0xb3, 0x0a } },
{-1, { 0 } }
};
/* bring hardware to a sane state. this has to be done, just in case someone
wants to capture from this device before it has been properly initialized.
the capture engine would badly fail, because no valid signal arrives on the
saa7146, thus leading to timeouts and stuff. */
static int mxb_init_done(struct saa7146_dev* dev)
{
struct mxb* mxb = (struct mxb*)dev->ext_priv;
struct i2c_msg msg;
struct tuner_setup tun_setup;
v4l2_std_id std = V4L2_STD_PAL_BG;
int i = 0, err = 0;
/* mute audio on tea6420s */
tea6420_route(mxb, 6);
/* select video mode in saa7111a */
saa7111a_call(mxb, video, s_std, std);
/* select tuner-output on saa7111a */
i = 0;
saa7111a_call(mxb, video, s_routing, SAA7115_COMPOSITE0,
SAA7111_FMT_CCIR, 0);
/* select a tuner type */
tun_setup.mode_mask = T_ANALOG_TV;
tun_setup.addr = ADDR_UNSET;
tun_setup.type = TUNER_PHILIPS_PAL;
tuner_call(mxb, tuner, s_type_addr, &tun_setup);
/* tune in some frequency on tuner */
mxb->cur_freq.tuner = 0;
mxb->cur_freq.type = V4L2_TUNER_ANALOG_TV;
mxb->cur_freq.frequency = freq;
tuner_call(mxb, tuner, s_frequency, &mxb->cur_freq);
/* set a default video standard */
/* These two gpio calls set the GPIO pins that control the tda9820 */
saa7146_write(dev, GPIO_CTRL, 0x00404050);
saa7111a_call(mxb, core, s_gpio, 1);
saa7111a_call(mxb, video, s_std, std);
tuner_call(mxb, video, s_std, std);
/* switch to tuner-channel on tea6415c */
tea6415c_call(mxb, video, s_routing, 3, 17, 0);
/* select tuner-output on multicable on tea6415c */
tea6415c_call(mxb, video, s_routing, 3, 13, 0);
/* the rest for mxb */
mxb->cur_input = 0;
mxb->cur_audinput = video_audio_connect[mxb->cur_input];
mxb->cur_mute = 1;
mxb->cur_mode = V4L2_TUNER_MODE_STEREO;
mxb_update_audmode(mxb);
/* check if the saa7740 (aka 'sound arena module') is present
on the mxb. if so, we must initialize it. due to lack of
informations about the saa7740, the values were reverse
engineered. */
msg.addr = 0x1b;
msg.flags = 0;
msg.len = mxb_saa7740_init[0].length;
msg.buf = &mxb_saa7740_init[0].data[0];
err = i2c_transfer(&mxb->i2c_adapter, &msg, 1);
if (err == 1) {
/* the sound arena module is a pos, that's probably the reason
philips refuses to hand out a datasheet for the saa7740...
it seems to screw up the i2c bus, so we disable fast irq
based i2c transactions here and rely on the slow and safe
polling method ... */
extension.flags &= ~SAA7146_USE_I2C_IRQ;
for (i = 1; ; i++) {
if (-1 == mxb_saa7740_init[i].length)
break;
msg.len = mxb_saa7740_init[i].length;
msg.buf = &mxb_saa7740_init[i].data[0];
err = i2c_transfer(&mxb->i2c_adapter, &msg, 1);
if (err != 1) {
DEB_D("failed to initialize 'sound arena module'\n");
goto err;
}
}
pr_info("'sound arena module' detected\n");
}
err:
/* the rest for saa7146: you should definitely set some basic values
for the input-port handling of the saa7146. */
/* ext->saa has been filled by the core driver */
/* some stuff is done via variables */
saa7146_set_hps_source_and_sync(dev, input_port_selection[mxb->cur_input].hps_source,
input_port_selection[mxb->cur_input].hps_sync);
/* some stuff is done via direct write to the registers */
/* this is ugly, but because of the fact that this is completely
hardware dependend, it should be done directly... */
saa7146_write(dev, DD1_STREAM_B, 0x00000000);
saa7146_write(dev, DD1_INIT, 0x02000200);
saa7146_write(dev, MC2, (MASK_09 | MASK_25 | MASK_10 | MASK_26));
return 0;
}
/* interrupt-handler. this gets called when irq_mask is != 0.
it must clear the interrupt-bits in irq_mask it has handled */
/*
void mxb_irq_bh(struct saa7146_dev* dev, u32* irq_mask)
{
struct mxb* mxb = (struct mxb*)dev->ext_priv;
}
*/
static int vidioc_enum_input(struct file *file, void *fh, struct v4l2_input *i)
{
DEB_EE("VIDIOC_ENUMINPUT %d\n", i->index);
if (i->index >= MXB_INPUTS)
return -EINVAL;
memcpy(i, &mxb_inputs[i->index], sizeof(struct v4l2_input));
return 0;
}
static int vidioc_g_input(struct file *file, void *fh, unsigned int *i)
{
struct saa7146_dev *dev = ((struct saa7146_fh *)fh)->dev;
struct mxb *mxb = (struct mxb *)dev->ext_priv;
*i = mxb->cur_input;
DEB_EE("VIDIOC_G_INPUT %d\n", *i);
return 0;
}
static int vidioc_s_input(struct file *file, void *fh, unsigned int input)
{
struct saa7146_dev *dev = ((struct saa7146_fh *)fh)->dev;
struct mxb *mxb = (struct mxb *)dev->ext_priv;
int err = 0;
int i = 0;
DEB_EE("VIDIOC_S_INPUT %d\n", input);
if (input >= MXB_INPUTS)
return -EINVAL;
mxb->cur_input = input;
saa7146_set_hps_source_and_sync(dev, input_port_selection[input].hps_source,
input_port_selection[input].hps_sync);
/* prepare switching of tea6415c and saa7111a;
have a look at the 'background'-file for further informations */
switch (input) {
case TUNER:
i = SAA7115_COMPOSITE0;
err = tea6415c_call(mxb, video, s_routing, 3, 17, 0);
/* connect tuner-output always to multicable */
if (!err)
err = tea6415c_call(mxb, video, s_routing, 3, 13, 0);
break;
case AUX3_YC:
/* nothing to be done here. aux3_yc is
directly connected to the saa711a */
i = SAA7115_SVIDEO1;
break;
case AUX3:
/* nothing to be done here. aux3 is
directly connected to the saa711a */
i = SAA7115_COMPOSITE1;
break;
case AUX1:
i = SAA7115_COMPOSITE0;
err = tea6415c_call(mxb, video, s_routing, 1, 17, 0);
break;
}
if (err)
return err;
/* switch video in saa7111a */
if (saa7111a_call(mxb, video, s_routing, i, SAA7111_FMT_CCIR, 0))
pr_err("VIDIOC_S_INPUT: could not address saa7111a\n");
mxb->cur_audinput = video_audio_connect[input];
/* switch the audio-source only if necessary */
if (0 == mxb->cur_mute)
tea6420_route(mxb, mxb->cur_audinput);
if (mxb->cur_audinput == 0)
mxb_update_audmode(mxb);
return 0;
}
static int vidioc_g_tuner(struct file *file, void *fh, struct v4l2_tuner *t)
{
struct saa7146_dev *dev = ((struct saa7146_fh *)fh)->dev;
struct mxb *mxb = (struct mxb *)dev->ext_priv;
if (t->index) {
DEB_D("VIDIOC_G_TUNER: channel %d does not have a tuner attached\n",
t->index);
return -EINVAL;
}
DEB_EE("VIDIOC_G_TUNER: %d\n", t->index);
memset(t, 0, sizeof(*t));
strlcpy(t->name, "TV Tuner", sizeof(t->name));
t->type = V4L2_TUNER_ANALOG_TV;
t->capability = V4L2_TUNER_CAP_NORM | V4L2_TUNER_CAP_STEREO |
V4L2_TUNER_CAP_LANG1 | V4L2_TUNER_CAP_LANG2 | V4L2_TUNER_CAP_SAP;
t->audmode = mxb->cur_mode;
return call_all(dev, tuner, g_tuner, t);
}
static int vidioc_s_tuner(struct file *file, void *fh, const struct v4l2_tuner *t)
{
struct saa7146_dev *dev = ((struct saa7146_fh *)fh)->dev;
struct mxb *mxb = (struct mxb *)dev->ext_priv;
if (t->index) {
DEB_D("VIDIOC_S_TUNER: channel %d does not have a tuner attached\n",
t->index);
return -EINVAL;
}
mxb->cur_mode = t->audmode;
return call_all(dev, tuner, s_tuner, t);
}
static int vidioc_querystd(struct file *file, void *fh, v4l2_std_id *norm)
{
struct saa7146_dev *dev = ((struct saa7146_fh *)fh)->dev;
return call_all(dev, video, querystd, norm);
}
static int vidioc_g_frequency(struct file *file, void *fh, struct v4l2_frequency *f)
{
struct saa7146_dev *dev = ((struct saa7146_fh *)fh)->dev;
struct mxb *mxb = (struct mxb *)dev->ext_priv;
if (f->tuner)
return -EINVAL;
*f = mxb->cur_freq;
DEB_EE("VIDIOC_G_FREQ: freq:0x%08x\n", mxb->cur_freq.frequency);
return 0;
}
static int vidioc_s_frequency(struct file *file, void *fh, const struct v4l2_frequency *f)
{
struct saa7146_dev *dev = ((struct saa7146_fh *)fh)->dev;
struct mxb *mxb = (struct mxb *)dev->ext_priv;
struct saa7146_vv *vv = dev->vv_data;
if (f->tuner)
return -EINVAL;
if (V4L2_TUNER_ANALOG_TV != f->type)
return -EINVAL;
DEB_EE("VIDIOC_S_FREQUENCY: freq:0x%08x\n", mxb->cur_freq.frequency);
/* tune in desired frequency */
tuner_call(mxb, tuner, s_frequency, f);
/* let the tuner subdev clamp the frequency to the tuner range */
mxb->cur_freq = *f;
tuner_call(mxb, tuner, g_frequency, &mxb->cur_freq);
if (mxb->cur_audinput == 0)
mxb_update_audmode(mxb);
if (mxb->cur_input)
return 0;
/* hack: changing the frequency should invalidate the vbi-counter (=> alevt) */
spin_lock(&dev->slock);
vv->vbi_fieldcount = 0;
spin_unlock(&dev->slock);
return 0;
}
static int vidioc_enumaudio(struct file *file, void *fh, struct v4l2_audio *a)
{
if (a->index >= MXB_AUDIOS)
return -EINVAL;
*a = mxb_audios[a->index];
return 0;
}
static int vidioc_g_audio(struct file *file, void *fh, struct v4l2_audio *a)
{
struct saa7146_dev *dev = ((struct saa7146_fh *)fh)->dev;
struct mxb *mxb = (struct mxb *)dev->ext_priv;
DEB_EE("VIDIOC_G_AUDIO\n");
*a = mxb_audios[mxb->cur_audinput];
return 0;
}
static int vidioc_s_audio(struct file *file, void *fh, const struct v4l2_audio *a)
{
struct saa7146_dev *dev = ((struct saa7146_fh *)fh)->dev;
struct mxb *mxb = (struct mxb *)dev->ext_priv;
DEB_D("VIDIOC_S_AUDIO %d\n", a->index);
if (mxb_inputs[mxb->cur_input].audioset & (1 << a->index)) {
if (mxb->cur_audinput != a->index) {
mxb->cur_audinput = a->index;
tea6420_route(mxb, a->index);
if (mxb->cur_audinput == 0)
mxb_update_audmode(mxb);
}
return 0;
}
return -EINVAL;
}
#ifdef CONFIG_VIDEO_ADV_DEBUG
static int vidioc_g_register(struct file *file, void *fh, struct v4l2_dbg_register *reg)
{
struct saa7146_dev *dev = ((struct saa7146_fh *)fh)->dev;
if (reg->reg > pci_resource_len(dev->pci, 0) - 4)
return -EINVAL;
reg->val = saa7146_read(dev, reg->reg);
reg->size = 4;
return 0;
}
static int vidioc_s_register(struct file *file, void *fh, const struct v4l2_dbg_register *reg)
{
struct saa7146_dev *dev = ((struct saa7146_fh *)fh)->dev;
if (reg->reg > pci_resource_len(dev->pci, 0) - 4)
return -EINVAL;
saa7146_write(dev, reg->reg, reg->val);
return 0;
}
#endif
static struct saa7146_ext_vv vv_data;
/* this function only gets called when the probing was successful */
static int mxb_attach(struct saa7146_dev *dev, struct saa7146_pci_extension_data *info)
{
struct mxb *mxb;
DEB_EE("dev:%p\n", dev);
saa7146_vv_init(dev, &vv_data);
if (mxb_probe(dev)) {
saa7146_vv_release(dev);
return -1;
}
mxb = (struct mxb *)dev->ext_priv;
vv_data.vid_ops.vidioc_enum_input = vidioc_enum_input;
vv_data.vid_ops.vidioc_g_input = vidioc_g_input;
vv_data.vid_ops.vidioc_s_input = vidioc_s_input;
vv_data.vid_ops.vidioc_querystd = vidioc_querystd;
vv_data.vid_ops.vidioc_g_tuner = vidioc_g_tuner;
vv_data.vid_ops.vidioc_s_tuner = vidioc_s_tuner;
vv_data.vid_ops.vidioc_g_frequency = vidioc_g_frequency;
vv_data.vid_ops.vidioc_s_frequency = vidioc_s_frequency;
vv_data.vid_ops.vidioc_enumaudio = vidioc_enumaudio;
vv_data.vid_ops.vidioc_g_audio = vidioc_g_audio;
vv_data.vid_ops.vidioc_s_audio = vidioc_s_audio;
#ifdef CONFIG_VIDEO_ADV_DEBUG
vv_data.vid_ops.vidioc_g_register = vidioc_g_register;
vv_data.vid_ops.vidioc_s_register = vidioc_s_register;
#endif
if (saa7146_register_device(&mxb->video_dev, dev, "mxb", VFL_TYPE_GRABBER)) {
ERR("cannot register capture v4l2 device. skipping.\n");
saa7146_vv_release(dev);
return -1;
}
/* initialization stuff (vbi) (only for revision > 0 and for extensions which want it)*/
if (MXB_BOARD_CAN_DO_VBI(dev)) {
if (saa7146_register_device(&mxb->vbi_dev, dev, "mxb", VFL_TYPE_VBI)) {
ERR("cannot register vbi v4l2 device. skipping.\n");
}
}
pr_info("found Multimedia eXtension Board #%d\n", mxb_num);
mxb_num++;
mxb_init_done(dev);
return 0;
}
static int mxb_detach(struct saa7146_dev *dev)
{
struct mxb *mxb = (struct mxb *)dev->ext_priv;
DEB_EE("dev:%p\n", dev);
/* mute audio on tea6420s */
tea6420_route(mxb, 6);
saa7146_unregister_device(&mxb->video_dev,dev);
if (MXB_BOARD_CAN_DO_VBI(dev))
saa7146_unregister_device(&mxb->vbi_dev, dev);
saa7146_vv_release(dev);
mxb_num--;
i2c_del_adapter(&mxb->i2c_adapter);
kfree(mxb);
return 0;
}
static int std_callback(struct saa7146_dev *dev, struct saa7146_standard *standard)
{
struct mxb *mxb = (struct mxb *)dev->ext_priv;
if (V4L2_STD_PAL_I == standard->id) {
v4l2_std_id std = V4L2_STD_PAL_I;
DEB_D("VIDIOC_S_STD: setting mxb for PAL_I\n");
/* These two gpio calls set the GPIO pins that control the tda9820 */
saa7146_write(dev, GPIO_CTRL, 0x00404050);
saa7111a_call(mxb, core, s_gpio, 0);
saa7111a_call(mxb, video, s_std, std);
if (mxb->cur_input == 0)
tuner_call(mxb, video, s_std, std);
} else {
v4l2_std_id std = V4L2_STD_PAL_BG;
if (mxb->cur_input)
std = standard->id;
DEB_D("VIDIOC_S_STD: setting mxb for PAL/NTSC/SECAM\n");
/* These two gpio calls set the GPIO pins that control the tda9820 */
saa7146_write(dev, GPIO_CTRL, 0x00404050);
saa7111a_call(mxb, core, s_gpio, 1);
saa7111a_call(mxb, video, s_std, std);
if (mxb->cur_input == 0)
tuner_call(mxb, video, s_std, std);
}
return 0;
}
static struct saa7146_standard standard[] = {
{
.name = "PAL-BG", .id = V4L2_STD_PAL_BG,
.v_offset = 0x17, .v_field = 288,
.h_offset = 0x14, .h_pixels = 680,
.v_max_out = 576, .h_max_out = 768,
}, {
.name = "PAL-I", .id = V4L2_STD_PAL_I,
.v_offset = 0x17, .v_field = 288,
.h_offset = 0x14, .h_pixels = 680,
.v_max_out = 576, .h_max_out = 768,
}, {
.name = "NTSC", .id = V4L2_STD_NTSC,
.v_offset = 0x16, .v_field = 240,
.h_offset = 0x06, .h_pixels = 708,
.v_max_out = 480, .h_max_out = 640,
}, {
.name = "SECAM", .id = V4L2_STD_SECAM,
.v_offset = 0x14, .v_field = 288,
.h_offset = 0x14, .h_pixels = 720,
.v_max_out = 576, .h_max_out = 768,
}
};
static struct saa7146_pci_extension_data mxb = {
.ext_priv = "Multimedia eXtension Board",
.ext = &extension,
};
static struct pci_device_id pci_tbl[] = {
{
.vendor = PCI_VENDOR_ID_PHILIPS,
.device = PCI_DEVICE_ID_PHILIPS_SAA7146,
.subvendor = 0x0000,
.subdevice = 0x0000,
.driver_data = (unsigned long)&mxb,
}, {
.vendor = 0,
}
};
MODULE_DEVICE_TABLE(pci, pci_tbl);
static struct saa7146_ext_vv vv_data = {
.inputs = MXB_INPUTS,
.capabilities = V4L2_CAP_TUNER | V4L2_CAP_VBI_CAPTURE | V4L2_CAP_AUDIO,
.stds = &standard[0],
.num_stds = sizeof(standard)/sizeof(struct saa7146_standard),
.std_callback = &std_callback,
};
static struct saa7146_extension extension = {
.name = "Multimedia eXtension Board",
.flags = SAA7146_USE_I2C_IRQ,
.pci_tbl = &pci_tbl[0],
.module = THIS_MODULE,
.attach = mxb_attach,
.detach = mxb_detach,
.irq_mask = 0,
.irq_func = NULL,
};
static int __init mxb_init_module(void)
{
if (saa7146_register_extension(&extension)) {
DEB_S("failed to register extension\n");
return -ENODEV;
}
return 0;
}
static void __exit mxb_cleanup_module(void)
{
saa7146_unregister_extension(&extension);
}
module_init(mxb_init_module);
module_exit(mxb_cleanup_module);
MODULE_DESCRIPTION("video4linux-2 driver for the Siemens-Nixdorf 'Multimedia eXtension board'");
MODULE_AUTHOR("Michael Hunold <[email protected]>");
MODULE_LICENSE("GPL");
| {
"pile_set_name": "Github"
} |
var mkdirp = require('mkdirp');
mkdirp('/tmp/foo/bar/baz', function (err) {
if (err) console.error(err)
else console.log('pow!')
});
| {
"pile_set_name": "Github"
} |
/*============================================================================
The Medical Imaging Interaction Toolkit (MITK)
Copyright (c) German Cancer Research Center (DKFZ)
All rights reserved.
Use of this source code is governed by a 3-clause BSD license that can be
found in the LICENSE file.
============================================================================*/
#include "mitkRegEvalStyleProperty.h"
mitk::RegEvalStyleProperty::RegEvalStyleProperty( )
{
AddTypes();
SetValue( 0 );
}
mitk::RegEvalStyleProperty::RegEvalStyleProperty( const IdType& value )
{
AddTypes();
if ( IsValidEnumerationValue( value ) )
{
SetValue( value ) ;
}
else
{
SetValue( 0 );
}
}
mitk::RegEvalStyleProperty::RegEvalStyleProperty( const std::string& value )
{
AddTypes();
if ( IsValidEnumerationValue( value ) )
{
SetValue( value );
}
else
{
SetValue( 0 );
}
}
void mitk::RegEvalStyleProperty::AddTypes()
{
AddEnum( "Blend", static_cast<IdType>( 0 ) );
AddEnum( "Color Blend", static_cast<IdType>( 1 ) );
AddEnum( "Checkerboard", static_cast<IdType>( 2 ) );
AddEnum( "Wipe", static_cast<IdType>( 3 ) );
AddEnum( "Difference", static_cast<IdType>( 4 ) );
AddEnum( "Contour", static_cast<IdType>( 5 ) );
}
bool mitk::RegEvalStyleProperty::AddEnum( const std::string& name, const IdType& id )
{
return Superclass::AddEnum( name, id );
}
| {
"pile_set_name": "Github"
} |
<div class="container content-container">
<div class="row clearfix">
<div class="content-wrapper">
<div ng-include="'views/partials/_update_password_form.html'"></div>
<p class="form-info">
<a ui-sref="app.signup" class="signup-link">سجل حساب جديد</a> أو
<a ui-sref="app.login" class="signup-link">سجل الدخول</a>
</p>
</div>
</div>
</div>
| {
"pile_set_name": "Github"
} |
/* PKIXCertPathValidatorResult.java -- PKIX cert path builder result
Copyright (C) 2003 Free Software Foundation, Inc.
This file is part of GNU Classpath.
GNU Classpath is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2, or (at your option)
any later version.
GNU Classpath is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received a copy of the GNU General Public License
along with GNU Classpath; see the file COPYING. If not, write to the
Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
02110-1301 USA.
Linking this library statically or dynamically with other modules is
making a combined work based on this library. Thus, the terms and
conditions of the GNU General Public License cover the whole
combination.
As a special exception, the copyright holders of this library give you
permission to link this library with independent modules to produce an
executable, regardless of the license terms of these independent
modules, and to copy and distribute the resulting executable under
terms of your choice, provided that you also meet, for each linked
independent module, the terms and conditions of the license of that
module. An independent module is a module which is not derived from
or based on this library. If you modify this library, you may extend
this exception to your version of the library, but you are not
obligated to do so. If you do not wish to do so, delete this
exception statement from your version. */
package java.security.cert;
import java.security.PublicKey;
/**
* Results returned by the {@link
* CertPathValidator#validate(java.security.cert.CertPath,java.security.cert.CertPathParameters)}
* method for PKIX {@link CertPathValidator}s.
*
* @see CertPathValidator
*/
public class PKIXCertPathValidatorResult implements CertPathValidatorResult
{
// Fields.
// ------------------------------------------------------------------------
/** The trust anchor. */
private final TrustAnchor trustAnchor;
/** The root node of the policy tree. */
private final PolicyNode policyTree;
/** The subject's public key. */
private final PublicKey subjectPublicKey;
// Constructor.
// ------------------------------------------------------------------------
/**
* Creates a new PKIXCertPathValidatorResult.
*
* @param trustAnchor The trust anchor.
* @param policyTree The root node of the policy tree.
* @param subjectPublicKey The public key.
* @throws NullPointerException If either <i>trustAnchor</i> or
* <i>subjectPublicKey</i> is null.
*/
public PKIXCertPathValidatorResult(TrustAnchor trustAnchor,
PolicyNode policyTree,
PublicKey subjectPublicKey)
{
if (trustAnchor == null || subjectPublicKey == null)
throw new NullPointerException();
this.trustAnchor = trustAnchor;
this.policyTree = policyTree;
this.subjectPublicKey = subjectPublicKey;
}
// Instance methods.
// ------------------------------------------------------------------------
/**
* Returns the trust anchor.
*
* @return The trust anchor.
*/
public TrustAnchor getTrustAnchor()
{
return trustAnchor;
}
/**
* Returns the root node of the policy tree.
*
* @return The root node of the policy tree.
*/
public PolicyNode getPolicyTree()
{
return policyTree;
}
/**
* Returns the subject public key.
*
* @return The subject public key.
*/
public PublicKey getPublicKey()
{
return subjectPublicKey;
}
/**
* Returns a copy of this object.
*
* @return The copy.
*/
public Object clone()
{
return new PKIXCertPathValidatorResult(trustAnchor, policyTree,
subjectPublicKey);
}
/**
* Returns a printable string representation of this result.
*
* @return A printable string representation of this result.
*/
public String toString()
{
return "[ Trust Anchor=" + trustAnchor + "; Policy Tree="
+ policyTree + "; Subject Public Key=" + subjectPublicKey + " ]";
}
}
| {
"pile_set_name": "Github"
} |
// Copyright 1998-2017 Epic Games, Inc. All Rights Reserved.
/*=============================================================================
D3D12Stats.cpp:RHI Stats and timing implementation.
=============================================================================*/
#include "D3D12RHIPrivate.h"
#include "Engine/Engine.h"
#include "Engine/GameViewportClient.h"
namespace D3D12RHI
{
void FD3DGPUProfiler::BeginFrame(FD3D12DynamicRHI* InRHI)
{
CurrentEventNode = NULL;
check(!bTrackingEvents);
check(!CurrentEventNodeFrame); // this should have already been cleaned up and the end of the previous frame
// latch the bools from the game thread into our private copy
bLatchedGProfilingGPU = GTriggerGPUProfile;
bLatchedGProfilingGPUHitches = GTriggerGPUHitchProfile;
if (bLatchedGProfilingGPUHitches)
{
bLatchedGProfilingGPU = false; // we do NOT permit an ordinary GPU profile during hitch profiles
}
// if we are starting a hitch profile or this frame is a gpu profile, then save off the state of the draw events
if (bLatchedGProfilingGPU || (!bPreviousLatchedGProfilingGPUHitches && bLatchedGProfilingGPUHitches))
{
bOriginalGEmitDrawEvents = GEmitDrawEvents;
}
if (bLatchedGProfilingGPU || bLatchedGProfilingGPUHitches)
{
if (bLatchedGProfilingGPUHitches && GPUHitchDebounce)
{
// if we are doing hitches and we had a recent hitch, wait to recover
// the reasoning is that collecting the hitch report may itself hitch the GPU
GPUHitchDebounce--;
}
else
{
GEmitDrawEvents = true; // thwart an attempt to turn this off on the game side
bTrackingEvents = true;
CurrentEventNodeFrame = new FD3D12EventNodeFrame(GetParentAdapter());
CurrentEventNodeFrame->StartFrame();
}
}
else if (bPreviousLatchedGProfilingGPUHitches)
{
// hitch profiler is turning off, clear history and restore draw events
GPUHitchEventNodeFrames.Empty();
GEmitDrawEvents = bOriginalGEmitDrawEvents;
}
bPreviousLatchedGProfilingGPUHitches = bLatchedGProfilingGPUHitches;
FrameTiming.StartTiming();
if (GEmitDrawEvents)
{
PushEvent(TEXT("FRAME"), FColor(0, 255, 0, 255));
}
}
}
void FD3DGPUProfiler::EndFrame(FD3D12DynamicRHI* InRHI)
{
if (GEmitDrawEvents)
{
PopEvent();
check(StackDepth == 0);
}
FrameTiming.EndTiming();
if (FrameTiming.IsSupported())
{
uint64 GPUTiming = FrameTiming.GetTiming();
uint64 GPUFreq = FrameTiming.GetTimingFrequency();
GGPUFrameTime = FMath::TruncToInt(double(GPUTiming) / double(GPUFreq) / FPlatformTime::GetSecondsPerCycle());
}
else
{
GGPUFrameTime = 0;
}
double HwGpuFrameTime = 0.0;
if (InRHI->GetHardwareGPUFrameTime(HwGpuFrameTime))
{
GGPUFrameTime = HwGpuFrameTime;
}
// if we have a frame open, close it now.
if (CurrentEventNodeFrame)
{
CurrentEventNodeFrame->EndFrame();
}
check(!bTrackingEvents || bLatchedGProfilingGPU || bLatchedGProfilingGPUHitches);
check(!bTrackingEvents || CurrentEventNodeFrame);
if (bLatchedGProfilingGPU)
{
if (bTrackingEvents)
{
GEmitDrawEvents = bOriginalGEmitDrawEvents;
UE_LOG(LogD3D12RHI, Log, TEXT(""));
UE_LOG(LogD3D12RHI, Log, TEXT(""));
CurrentEventNodeFrame->DumpEventTree();
GTriggerGPUProfile = false;
bLatchedGProfilingGPU = false;
if (RHIConfig::ShouldSaveScreenshotAfterProfilingGPU()
&& GEngine->GameViewport)
{
GEngine->GameViewport->Exec(NULL, TEXT("SCREENSHOT"), *GLog);
}
}
}
else if (bLatchedGProfilingGPUHitches)
{
//@todo this really detects any hitch, even one on the game thread.
// it would be nice to restrict the test to stalls on D3D, but for now...
// this needs to be out here because bTrackingEvents is false during the hitch debounce
static double LastTime = -1.0;
double Now = FPlatformTime::Seconds();
if (bTrackingEvents)
{
/** How long, in seconds a frame much be to be considered a hitch **/
const float HitchThreshold = RHIConfig::GetGPUHitchThreshold();
float ThisTime = Now - LastTime;
bool bHitched = (ThisTime > HitchThreshold) && LastTime > 0.0 && CurrentEventNodeFrame;
if (bHitched)
{
UE_LOG(LogD3D12RHI, Warning, TEXT("*******************************************************************************"));
UE_LOG(LogD3D12RHI, Warning, TEXT("********** Hitch detected on CPU, frametime = %6.1fms"), ThisTime * 1000.0f);
UE_LOG(LogD3D12RHI, Warning, TEXT("*******************************************************************************"));
for (int32 Frame = 0; Frame < GPUHitchEventNodeFrames.Num(); Frame++)
{
UE_LOG(LogD3D12RHI, Warning, TEXT(""));
UE_LOG(LogD3D12RHI, Warning, TEXT(""));
UE_LOG(LogD3D12RHI, Warning, TEXT("********** GPU Frame: Current - %d"), GPUHitchEventNodeFrames.Num() - Frame);
GPUHitchEventNodeFrames[Frame].DumpEventTree();
}
UE_LOG(LogD3D12RHI, Warning, TEXT(""));
UE_LOG(LogD3D12RHI, Warning, TEXT(""));
UE_LOG(LogD3D12RHI, Warning, TEXT("********** GPU Frame: Current"));
CurrentEventNodeFrame->DumpEventTree();
UE_LOG(LogD3D12RHI, Warning, TEXT("*******************************************************************************"));
UE_LOG(LogD3D12RHI, Warning, TEXT("********** End Hitch GPU Profile"));
UE_LOG(LogD3D12RHI, Warning, TEXT("*******************************************************************************"));
if (GEngine->GameViewport)
{
GEngine->GameViewport->Exec(NULL, TEXT("SCREENSHOT"), *GLog);
}
GPUHitchDebounce = 5; // don't trigger this again for a while
GPUHitchEventNodeFrames.Empty(); // clear history
}
else if (CurrentEventNodeFrame) // this will be null for discarded frames while recovering from a recent hitch
{
/** How many old frames to buffer for hitch reports **/
static const int32 HitchHistorySize = 4;
if (GPUHitchEventNodeFrames.Num() >= HitchHistorySize)
{
GPUHitchEventNodeFrames.RemoveAt(0);
}
GPUHitchEventNodeFrames.Add((FD3D12EventNodeFrame*)CurrentEventNodeFrame);
CurrentEventNodeFrame = NULL; // prevent deletion of this below; ke kept it in the history
}
}
LastTime = Now;
}
bTrackingEvents = false;
delete CurrentEventNodeFrame;
CurrentEventNodeFrame = NULL;
}
void FD3DGPUProfiler::PushEvent(const TCHAR* Name, FColor Color)
{
#if WITH_DX_PERF
D3DPERF_BeginEvent(Color.DWColor(), Name);
#endif
FGPUProfiler::PushEvent(Name, Color);
}
void FD3DGPUProfiler::PopEvent()
{
#if WITH_DX_PERF
D3DPERF_EndEvent();
#endif
FGPUProfiler::PopEvent();
}
/** Start this frame of per tracking */
void FD3D12EventNodeFrame::StartFrame()
{
EventTree.Reset();
RootEventTiming.StartTiming();
}
/** End this frame of per tracking, but do not block yet */
void FD3D12EventNodeFrame::EndFrame()
{
RootEventTiming.EndTiming();
}
float FD3D12EventNodeFrame::GetRootTimingResults()
{
double RootResult = 0.0f;
if (RootEventTiming.IsSupported())
{
const uint64 GPUTiming = RootEventTiming.GetTiming(true);
const uint64 GPUFreq = RootEventTiming.GetTimingFrequency();
RootResult = double(GPUTiming) / double(GPUFreq);
}
return (float)RootResult;
}
void FD3D12EventNodeFrame::LogDisjointQuery()
{
}
float FD3D12EventNode::GetTiming()
{
float Result = 0;
if (Timing.IsSupported())
{
// Get the timing result and block the CPU until it is ready
const uint64 GPUTiming = Timing.GetTiming(true);
const uint64 GPUFreq = Timing.GetTimingFrequency();
Result = double(GPUTiming) / double(GPUFreq);
}
return Result;
}
void UpdateBufferStats(FD3D12ResourceLocation* ResourceLocation, bool bAllocating, uint32 BufferType)
{
uint64 RequestedSize = ResourceLocation->GetSize();
const bool bUniformBuffer = BufferType == D3D12_BUFFER_TYPE_CONSTANT;
const bool bIndexBuffer = BufferType == D3D12_BUFFER_TYPE_INDEX;
const bool bVertexBuffer = BufferType == D3D12_BUFFER_TYPE_VERTEX;
if (bAllocating)
{
if (bUniformBuffer)
{
INC_MEMORY_STAT_BY(STAT_UniformBufferMemory, RequestedSize);
}
else if (bIndexBuffer)
{
INC_MEMORY_STAT_BY(STAT_IndexBufferMemory, RequestedSize);
}
else if (bVertexBuffer)
{
INC_MEMORY_STAT_BY(STAT_VertexBufferMemory, RequestedSize);
}
else
{
INC_MEMORY_STAT_BY(STAT_StructuredBufferMemory, RequestedSize);
}
}
else
{
if (bUniformBuffer)
{
DEC_MEMORY_STAT_BY(STAT_UniformBufferMemory, RequestedSize);
}
else if (bIndexBuffer)
{
DEC_MEMORY_STAT_BY(STAT_IndexBufferMemory, RequestedSize);
}
else if (bVertexBuffer)
{
DEC_MEMORY_STAT_BY(STAT_VertexBufferMemory, RequestedSize);
}
else
{
DEC_MEMORY_STAT_BY(STAT_StructuredBufferMemory, RequestedSize);
}
}
} | {
"pile_set_name": "Github"
} |
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package fake
import (
restclient "k8s.io/client-go/rest"
core "k8s.io/client-go/testing"
api "k8s.io/kubernetes/pkg/apis/core"
)
func (c *FakePods) Bind(binding *api.Binding) error {
action := core.CreateActionImpl{}
action.Verb = "create"
action.Resource = podsResource
action.Subresource = "bindings"
action.Object = binding
_, err := c.Fake.Invokes(action, binding)
return err
}
func (c *FakePods) GetLogs(name string, opts *api.PodLogOptions) *restclient.Request {
action := core.GenericActionImpl{}
action.Verb = "get"
action.Namespace = c.ns
action.Resource = podsResource
action.Subresource = "logs"
action.Value = opts
_, _ = c.Fake.Invokes(action, &api.Pod{})
return &restclient.Request{}
}
| {
"pile_set_name": "Github"
} |
// Copyright 2015, Google Inc.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Injection point for custom user configurations. See README for details
//
// ** Custom implementation starts here **
// GOOGLETEST_CM0002 DO NOT DELETE
#ifndef GMOCK_INCLUDE_GMOCK_INTERNAL_CUSTOM_GMOCK_PORT_H_
#define GMOCK_INCLUDE_GMOCK_INTERNAL_CUSTOM_GMOCK_PORT_H_
#endif // GMOCK_INCLUDE_GMOCK_INTERNAL_CUSTOM_GMOCK_PORT_H_
| {
"pile_set_name": "Github"
} |
#!/bin/sh
# install - install a program, script, or datafile
scriptversion=2004-04-01.17
# This originates from X11R5 (mit/util/scripts/install.sh), which was
# later released in X11R6 (xc/config/util/install.sh) with the
# following copyright and license.
#
# Copyright (C) 1994 X Consortium
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# X CONSORTIUM BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
# AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNEC-
# TION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
# Except as contained in this notice, the name of the X Consortium shall not
# be used in advertising or otherwise to promote the sale, use or other deal-
# ings in this Software without prior written authorization from the X Consor-
# tium.
#
#
# FSF changes to this file are in the public domain.
#
# Calling this script install-sh is preferred over install.sh, to prevent
# `make' implicit rules from creating a file called install from it
# when there is no Makefile.
#
# This script is compatible with the BSD install script, but was written
# from scratch. It can only install one file at a time, a restriction
# shared with many OS's install programs.
# set DOITPROG to echo to test this script
# Don't use :- since 4.3BSD and earlier shells don't like it.
doit="${DOITPROG-}"
# put in absolute paths if you don't have them in your path; or use env. vars.
mvprog="${MVPROG-mv}"
cpprog="${CPPROG-cp}"
chmodprog="${CHMODPROG-chmod}"
chownprog="${CHOWNPROG-chown}"
chgrpprog="${CHGRPPROG-chgrp}"
stripprog="${STRIPPROG-strip}"
rmprog="${RMPROG-rm}"
mkdirprog="${MKDIRPROG-mkdir}"
transformbasename=
transform_arg=
instcmd="$mvprog"
chmodcmd="$chmodprog 0755"
chowncmd=
chgrpcmd=
stripcmd=
rmcmd="$rmprog -f"
mvcmd="$mvprog"
src=
dst=
dir_arg=
usage="Usage: $0 [OPTION]... SRCFILE DSTFILE
or: $0 [OPTION]... SRCFILES... DIRECTORY
or: $0 -d DIRECTORIES...
In the first form, install SRCFILE to DSTFILE, removing SRCFILE by default.
In the second, create the directory path DIR.
Options:
-b=TRANSFORMBASENAME
-c copy source (using $cpprog) instead of moving (using $mvprog).
-d create directories instead of installing files.
-g GROUP $chgrp installed files to GROUP.
-m MODE $chmod installed files to MODE.
-o USER $chown installed files to USER.
-s strip installed files (using $stripprog).
-t=TRANSFORM
--help display this help and exit.
--version display version info and exit.
Environment variables override the default commands:
CHGRPPROG CHMODPROG CHOWNPROG CPPROG MKDIRPROG MVPROG RMPROG STRIPPROG
"
while test -n "$1"; do
case $1 in
-b=*) transformbasename=`echo $1 | sed 's/-b=//'`
shift
continue;;
-c) instcmd=$cpprog
shift
continue;;
-d) dir_arg=true
shift
continue;;
-g) chgrpcmd="$chgrpprog $2"
shift
shift
continue;;
--help) echo "$usage"; exit 0;;
-m) chmodcmd="$chmodprog $2"
shift
shift
continue;;
-o) chowncmd="$chownprog $2"
shift
shift
continue;;
-s) stripcmd=$stripprog
shift
continue;;
-t=*) transformarg=`echo $1 | sed 's/-t=//'`
shift
continue;;
--version) echo "$0 $scriptversion"; exit 0;;
*) # When -d is used, all remaining arguments are directories to create.
test -n "$dir_arg" && break
# Otherwise, the last argument is the destination. Remove it from $@.
for arg
do
if test -n "$dstarg"; then
# $@ is not empty: it contains at least $arg.
set fnord "$@" "$dstarg"
shift # fnord
fi
shift # arg
dstarg=$arg
done
break;;
esac
done
if test -z "$1"; then
if test -z "$dir_arg"; then
echo "$0: no input file specified." >&2
exit 1
fi
# It's OK to call `install-sh -d' without argument.
# This can happen when creating conditional directories.
exit 0
fi
for src
do
# Protect names starting with `-'.
case $src in
-*) src=./$src ;;
esac
if test -n "$dir_arg"; then
dst=$src
src=
if test -d "$dst"; then
instcmd=:
chmodcmd=
else
instcmd=$mkdirprog
fi
else
# Waiting for this to be detected by the "$instcmd $src $dsttmp" command
# might cause directories to be created, which would be especially bad
# if $src (and thus $dsttmp) contains '*'.
if test ! -f "$src" && test ! -d "$src"; then
echo "$0: $src does not exist." >&2
exit 1
fi
if test -z "$dstarg"; then
echo "$0: no destination specified." >&2
exit 1
fi
dst=$dstarg
# Protect names starting with `-'.
case $dst in
-*) dst=./$dst ;;
esac
# If destination is a directory, append the input filename; won't work
# if double slashes aren't ignored.
if test -d "$dst"; then
dst=$dst/`basename "$src"`
fi
fi
# This sed command emulates the dirname command.
dstdir=`echo "$dst" | sed -e 's,[^/]*$,,;s,/$,,;s,^$,.,'`
# Make sure that the destination directory exists.
# Skip lots of stat calls in the usual case.
if test ! -d "$dstdir"; then
defaultIFS='
'
IFS="${IFS-$defaultIFS}"
oIFS=$IFS
# Some sh's can't handle IFS=/ for some reason.
IFS='%'
set - `echo "$dstdir" | sed -e 's@/@%@g' -e 's@^%@/@'`
IFS=$oIFS
pathcomp=
while test $# -ne 0 ; do
pathcomp=$pathcomp$1
shift
if test ! -d "$pathcomp"; then
$mkdirprog "$pathcomp" || lasterr=$?
# mkdir can fail with a `File exist' error in case several
# install-sh are creating the directory concurrently. This
# is OK.
test ! -d "$pathcomp" && { (exit ${lasterr-1}); exit; }
fi
pathcomp=$pathcomp/
done
fi
if test -n "$dir_arg"; then
$doit $instcmd "$dst" \
&& { test -z "$chowncmd" || $doit $chowncmd "$dst"; } \
&& { test -z "$chgrpcmd" || $doit $chgrpcmd "$dst"; } \
&& { test -z "$stripcmd" || $doit $stripcmd "$dst"; } \
&& { test -z "$chmodcmd" || $doit $chmodcmd "$dst"; }
else
# If we're going to rename the final executable, determine the name now.
if test -z "$transformarg"; then
dstfile=`basename "$dst"`
else
dstfile=`basename "$dst" $transformbasename \
| sed $transformarg`$transformbasename
fi
# don't allow the sed command to completely eliminate the filename.
test -z "$dstfile" && dstfile=`basename "$dst"`
# Make a couple of temp file names in the proper directory.
dsttmp=$dstdir/_inst.$$_
rmtmp=$dstdir/_rm.$$_
# Trap to clean up those temp files at exit.
trap 'status=$?; rm -f "$dsttmp" "$rmtmp" && exit $status' 0
trap '(exit $?); exit' 1 2 13 15
# Move or copy the file name to the temp name
$doit $instcmd "$src" "$dsttmp" &&
# and set any options; do chmod last to preserve setuid bits.
#
# If any of these fail, we abort the whole thing. If we want to
# ignore errors from any of these, just make sure not to ignore
# errors from the above "$doit $instcmd $src $dsttmp" command.
#
{ test -z "$chowncmd" || $doit $chowncmd "$dsttmp"; } \
&& { test -z "$chgrpcmd" || $doit $chgrpcmd "$dsttmp"; } \
&& { test -z "$stripcmd" || $doit $stripcmd "$dsttmp"; } \
&& { test -z "$chmodcmd" || $doit $chmodcmd "$dsttmp"; } &&
# Now rename the file to the real destination.
{ $doit $mvcmd -f "$dsttmp" "$dstdir/$dstfile" 2>/dev/null \
|| {
# The rename failed, perhaps because mv can't rename something else
# to itself, or perhaps because mv is so ancient that it does not
# support -f.
# Now remove or move aside any old file at destination location.
# We try this two ways since rm can't unlink itself on some
# systems and the destination file might be busy for other
# reasons. In this case, the final cleanup might fail but the new
# file should still install successfully.
{
if test -f "$dstdir/$dstfile"; then
$doit $rmcmd -f "$dstdir/$dstfile" 2>/dev/null \
|| $doit $mvcmd -f "$dstdir/$dstfile" "$rmtmp" 2>/dev/null \
|| {
echo "$0: cannot unlink or rename $dstdir/$dstfile" >&2
(exit 1); exit
}
else
:
fi
} &&
# Now rename the file to the real destination.
$doit $mvcmd "$dsttmp" "$dstdir/$dstfile"
}
}
fi || { (exit 1); exit; }
done
# The final little trick to "correctly" pass the exit status to the exit trap.
{
(exit 0); exit
}
# Local variables:
# eval: (add-hook 'write-file-hooks 'time-stamp)
# time-stamp-start: "scriptversion="
# time-stamp-format: "%:y-%02m-%02d.%02H"
# time-stamp-end: "$"
# End:
| {
"pile_set_name": "Github"
} |
# Translation of Odoo Server.
# This file contains the translation of the following modules:
# * crm_iap_lead
#
msgid ""
msgstr ""
"Project-Id-Version: Odoo Server 13.0\n"
"Report-Msgid-Bugs-To: \n"
"POT-Creation-Date: 2020-04-27 09:21+0000\n"
"PO-Revision-Date: 2020-04-27 09:21+0000\n"
"Last-Translator: \n"
"Language-Team: \n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: \n"
"Plural-Forms: \n"
#. module: crm_iap_lead
#: code:addons/crm_iap_lead/models/crm_iap_lead_mining_request.py:0
#, python-format
msgid "%d credits will be consumed to find %d companies."
msgstr ""
#. module: crm_iap_lead
#: model_terms:ir.ui.view,arch_db:crm_iap_lead.lead_message_template
msgid "(Time Now)"
msgstr ""
#. module: crm_iap_lead
#: model_terms:ir.ui.view,arch_db:crm_iap_lead.lead_message_template
msgid "<b>Contacts</b>"
msgstr ""
#. module: crm_iap_lead
#: model_terms:ir.ui.view,arch_db:crm_iap_lead.lead_message_template
msgid "<b>Phone :</b>"
msgstr ""
#. module: crm_iap_lead
#: model_terms:ir.ui.view,arch_db:crm_iap_lead.lead_message_template
msgid "<b>Timezone : </b>"
msgstr ""
#. module: crm_iap_lead
#: model:mail.template,body_html:crm_iap_lead.lead_generation_no_credits
msgid ""
"<div style=\"margin: 0px; padding: 0px;\">\n"
" <p>Dear,</p>\n"
" <p>There are no more credits on your IAP Lead Generation account.<br/>\n"
" You can charge your IAP Lead Generation account in the settings of the CRM app.<br/></p>\n"
" <p>Best regards,</p>\n"
" <p>Odoo S.A.</p>\n"
"</div>"
msgstr ""
#. module: crm_iap_lead
#: code:addons/crm_iap_lead/models/crm_iap_lead_mining_request.py:0
#, python-format
msgid ""
"<p class=\"o_view_nocontent_empty_folder\">\n"
" No leads found\n"
" </p><p>\n"
" No leads could be generated according to your search criteria\n"
" </p>"
msgstr ""
#. module: crm_iap_lead
#: code:addons/crm_iap_lead/models/crm_iap_lead_mining_request.py:0
#, python-format
msgid ""
"<p class=\"o_view_nocontent_empty_folder\">\n"
" No opportunities found\n"
" </p><p>\n"
" No opportunities could be generated according to your search criteria\n"
" </p>"
msgstr ""
#. module: crm_iap_lead
#: model_terms:ir.ui.view,arch_db:crm_iap_lead.crm_iap_lead_mining_request_form
msgid "<span class=\"o_stat_text\">Leads</span>"
msgstr ""
#. module: crm_iap_lead
#: model_terms:ir.ui.view,arch_db:crm_iap_lead.crm_iap_lead_mining_request_form
msgid "<span class=\"o_stat_text\">Opportunities</span>"
msgstr ""
#. module: crm_iap_lead
#: model:crm.iap.lead.industry,name:crm_iap_lead.crm_iap_lead_industry_238
msgid "Automobiles & Components"
msgstr ""
#. module: crm_iap_lead
#: model:crm.iap.lead.industry,name:crm_iap_lead.crm_iap_lead_industry_157
msgid "Banks"
msgstr ""
#. module: crm_iap_lead
#: model:ir.model,name:crm_iap_lead.model_crm_iap_lead_mining_request
msgid "CRM Lead Mining Request"
msgstr ""
#. module: crm_iap_lead
#: model_terms:ir.ui.view,arch_db:crm_iap_lead.crm_iap_lead_mining_request_form
msgid "Cancel"
msgstr ""
#. module: crm_iap_lead
#: model:crm.iap.lead.industry,name:crm_iap_lead.crm_iap_lead_industry_162
msgid "Capital Goods"
msgstr ""
#. module: crm_iap_lead
#: model:ir.model.fields,field_description:crm_iap_lead.field_crm_iap_lead_industry__color
#: model:ir.model.fields,field_description:crm_iap_lead.field_crm_iap_lead_role__color
msgid "Color Index"
msgstr ""
#. module: crm_iap_lead
#: model:crm.iap.lead.industry,name:crm_iap_lead.crm_iap_lead_industry_163
msgid "Commercial & Professional Services"
msgstr ""
#. module: crm_iap_lead
#: model:ir.model.fields.selection,name:crm_iap_lead.selection__crm_iap_lead_mining_request__search_type__companies
#: model_terms:ir.ui.view,arch_db:crm_iap_lead.crm_iap_lead_mining_request_form
msgid "Companies"
msgstr ""
#. module: crm_iap_lead
#: model:ir.model.fields.selection,name:crm_iap_lead.selection__crm_iap_lead_mining_request__search_type__people
msgid "Companies and their Contacts"
msgstr ""
#. module: crm_iap_lead
#: model:ir.model.fields,field_description:crm_iap_lead.field_crm_iap_lead_mining_request__company_size_max
msgid "Company Size Max"
msgstr ""
#. module: crm_iap_lead
#: model:crm.iap.lead.industry,name:crm_iap_lead.crm_iap_lead_industry_167
msgid "Construction Materials"
msgstr ""
#. module: crm_iap_lead
#: model:crm.iap.lead.industry,name:crm_iap_lead.crm_iap_lead_industry_30
msgid "Consumer Discretionary"
msgstr ""
#. module: crm_iap_lead
#: model:crm.iap.lead.industry,name:crm_iap_lead.crm_iap_lead_industry_239
msgid "Consumer Durables & Apparel"
msgstr ""
#. module: crm_iap_lead
#: model:crm.iap.lead.industry,name:crm_iap_lead.crm_iap_lead_industry_150
msgid "Consumer Services"
msgstr ""
#. module: crm_iap_lead
#: model:crm.iap.lead.industry,name:crm_iap_lead.crm_iap_lead_industry_33
msgid "Consumer Staples"
msgstr ""
#. module: crm_iap_lead
#: model_terms:ir.ui.view,arch_db:crm_iap_lead.crm_iap_lead_mining_request_form
msgid "Contacts"
msgstr ""
#. module: crm_iap_lead
#: model:ir.model.fields,field_description:crm_iap_lead.field_crm_iap_lead_mining_request__country_ids
msgid "Countries"
msgstr ""
#. module: crm_iap_lead
#: code:addons/crm_iap_lead/models/crm_iap_lead_mining_request.py:0
#, python-format
msgid "Create a Lead Mining Request"
msgstr ""
#. module: crm_iap_lead
#: model:ir.model.fields,field_description:crm_iap_lead.field_crm_iap_lead_helpers__create_uid
#: model:ir.model.fields,field_description:crm_iap_lead.field_crm_iap_lead_industry__create_uid
#: model:ir.model.fields,field_description:crm_iap_lead.field_crm_iap_lead_mining_request__create_uid
#: model:ir.model.fields,field_description:crm_iap_lead.field_crm_iap_lead_role__create_uid
#: model:ir.model.fields,field_description:crm_iap_lead.field_crm_iap_lead_seniority__create_uid
msgid "Created by"
msgstr ""
#. module: crm_iap_lead
#: model:ir.model.fields,field_description:crm_iap_lead.field_crm_iap_lead_helpers__create_date
#: model:ir.model.fields,field_description:crm_iap_lead.field_crm_iap_lead_industry__create_date
#: model:ir.model.fields,field_description:crm_iap_lead.field_crm_iap_lead_mining_request__create_date
#: model:ir.model.fields,field_description:crm_iap_lead.field_crm_iap_lead_role__create_date
#: model:ir.model.fields,field_description:crm_iap_lead.field_crm_iap_lead_seniority__create_date
msgid "Created on"
msgstr ""
#. module: crm_iap_lead
#: model:ir.model.fields,field_description:crm_iap_lead.field_crm_iap_lead_helpers__display_name
#: model:ir.model.fields,field_description:crm_iap_lead.field_crm_iap_lead_industry__display_name
#: model:ir.model.fields,field_description:crm_iap_lead.field_crm_iap_lead_mining_request__display_name
#: model:ir.model.fields,field_description:crm_iap_lead.field_crm_iap_lead_role__display_name
#: model:ir.model.fields,field_description:crm_iap_lead.field_crm_iap_lead_seniority__display_name
msgid "Display Name"
msgstr ""
#. module: crm_iap_lead
#: model:crm.iap.lead.industry,name:crm_iap_lead.crm_iap_lead_industry_151
msgid "Diversified Consumer Services"
msgstr ""
#. module: crm_iap_lead
#: model:crm.iap.lead.industry,name:crm_iap_lead.crm_iap_lead_industry_158
msgid "Diversified Financial Services"
msgstr ""
#. module: crm_iap_lead
#: model:crm.iap.lead.industry,name:crm_iap_lead.crm_iap_lead_industry_159
msgid "Diversified Financials"
msgstr ""
#. module: crm_iap_lead
#: model:ir.model.fields.selection,name:crm_iap_lead.selection__crm_iap_lead_mining_request__state__done
#: model_terms:ir.ui.view,arch_db:crm_iap_lead.crm_iap_lead_mining_request_search
msgid "Done"
msgstr ""
#. module: crm_iap_lead
#: model:ir.model.fields.selection,name:crm_iap_lead.selection__crm_iap_lead_mining_request__state__draft
#: model_terms:ir.ui.view,arch_db:crm_iap_lead.crm_iap_lead_mining_request_search
msgid "Draft"
msgstr ""
#. module: crm_iap_lead
#: model_terms:ir.ui.view,arch_db:crm_iap_lead.lead_message_template
msgid "Email"
msgstr ""
#. module: crm_iap_lead
#: model:crm.iap.lead.industry,name:crm_iap_lead.crm_iap_lead_industry_156
msgid "Energy Equipment & Services"
msgstr ""
#. module: crm_iap_lead
#: model:ir.model.fields,field_description:crm_iap_lead.field_crm_iap_lead_mining_request__error
#: model:ir.model.fields.selection,name:crm_iap_lead.selection__crm_iap_lead_mining_request__state__error
#: model_terms:ir.ui.view,arch_db:crm_iap_lead.crm_iap_lead_mining_request_search
msgid "Error"
msgstr ""
#. module: crm_iap_lead
#: model:ir.model,name:crm_iap_lead.model_ir_model_fields
msgid "Fields"
msgstr ""
#. module: crm_iap_lead
#: model:ir.model.fields,field_description:crm_iap_lead.field_crm_iap_lead_mining_request__contact_filter_type
msgid "Filter on"
msgstr ""
#. module: crm_iap_lead
#: model:ir.model.fields,field_description:crm_iap_lead.field_crm_iap_lead_mining_request__filter_on_size
msgid "Filter on Size"
msgstr ""
#. module: crm_iap_lead
#: model:crm.iap.lead.industry,name:crm_iap_lead.crm_iap_lead_industry_153
msgid "Food & Staples Retailing"
msgstr ""
#. module: crm_iap_lead
#: model:crm.iap.lead.industry,name:crm_iap_lead.crm_iap_lead_industry_154
msgid "Food, Beverage & Tobacco"
msgstr ""
#. module: crm_iap_lead
#: model_terms:ir.ui.view,arch_db:crm_iap_lead.crm_iap_lead_mining_request_form
msgid "From"
msgstr ""
#. module: crm_iap_lead
#. openerp-web
#: code:addons/crm_iap_lead/static/src/xml/leads_tree_generate_leads_views.xml:0
#, python-format
msgid "Generate Leads"
msgstr ""
#. module: crm_iap_lead
#: code:addons/crm_iap_lead/models/crm_iap_lead_mining_request.py:0
#, python-format
msgid "Generate new leads based on their country, industry, size, etc."
msgstr ""
#. module: crm_iap_lead
#: model:ir.model.fields,field_description:crm_iap_lead.field_crm_iap_lead_mining_request__lead_ids
msgid "Generated Lead / Opportunity"
msgstr ""
#. module: crm_iap_lead
#: model_terms:ir.ui.view,arch_db:crm_iap_lead.crm_iap_lead_mining_request_search
msgid "Group By"
msgstr ""
#. module: crm_iap_lead
#: model:crm.iap.lead.industry,name:crm_iap_lead.crm_iap_lead_industry_160
msgid "Health Care Equipment & Services"
msgstr ""
#. module: crm_iap_lead
#: model:ir.model,name:crm_iap_lead.model_crm_iap_lead_helpers
msgid "Helper methods for crm_iap_lead modules"
msgstr ""
#. module: crm_iap_lead
#: model:crm.iap.lead.industry,name:crm_iap_lead.crm_iap_lead_industry_155
msgid "Household & Personal Products"
msgstr ""
#. module: crm_iap_lead
#: model:mail.template,subject:crm_iap_lead.lead_generation_no_credits
msgid "IAP Lead Generation Notification"
msgstr ""
#. module: crm_iap_lead
#: model:ir.model.fields,field_description:crm_iap_lead.field_crm_iap_lead_helpers__id
#: model:ir.model.fields,field_description:crm_iap_lead.field_crm_iap_lead_industry__id
#: model:ir.model.fields,field_description:crm_iap_lead.field_crm_iap_lead_mining_request__id
#: model:ir.model.fields,field_description:crm_iap_lead.field_crm_iap_lead_role__id
#: model:ir.model.fields,field_description:crm_iap_lead.field_crm_iap_lead_seniority__id
msgid "ID"
msgstr ""
#. module: crm_iap_lead
#: model:crm.iap.lead.industry,name:crm_iap_lead.crm_iap_lead_industry_168
msgid "Independent Power and Renewable Electricity Producers"
msgstr ""
#. module: crm_iap_lead
#: model:crm.iap.lead.industry,name:crm_iap_lead.crm_iap_lead_industry_146
msgid "Industrials"
msgstr ""
#. module: crm_iap_lead
#: model:ir.model.fields,field_description:crm_iap_lead.field_crm_iap_lead_mining_request__industry_ids
msgid "Industries"
msgstr ""
#. module: crm_iap_lead
#: model:ir.model,name:crm_iap_lead.model_crm_iap_lead_industry
msgid "Industry Tag"
msgstr ""
#. module: crm_iap_lead
#: model:crm.iap.lead.industry,name:crm_iap_lead.crm_iap_lead_industry_69
msgid "Insurance"
msgstr ""
#. module: crm_iap_lead
#: model:ir.model.fields,field_description:crm_iap_lead.field_crm_iap_lead_helpers____last_update
#: model:ir.model.fields,field_description:crm_iap_lead.field_crm_iap_lead_industry____last_update
#: model:ir.model.fields,field_description:crm_iap_lead.field_crm_iap_lead_mining_request____last_update
#: model:ir.model.fields,field_description:crm_iap_lead.field_crm_iap_lead_role____last_update
#: model:ir.model.fields,field_description:crm_iap_lead.field_crm_iap_lead_seniority____last_update
msgid "Last Modified on"
msgstr ""
#. module: crm_iap_lead
#: model:ir.model.fields,field_description:crm_iap_lead.field_crm_iap_lead_helpers__write_uid
#: model:ir.model.fields,field_description:crm_iap_lead.field_crm_iap_lead_industry__write_uid
#: model:ir.model.fields,field_description:crm_iap_lead.field_crm_iap_lead_mining_request__write_uid
#: model:ir.model.fields,field_description:crm_iap_lead.field_crm_iap_lead_role__write_uid
#: model:ir.model.fields,field_description:crm_iap_lead.field_crm_iap_lead_seniority__write_uid
msgid "Last Updated by"
msgstr ""
#. module: crm_iap_lead
#: model:ir.model.fields,field_description:crm_iap_lead.field_crm_iap_lead_helpers__write_date
#: model:ir.model.fields,field_description:crm_iap_lead.field_crm_iap_lead_industry__write_date
#: model:ir.model.fields,field_description:crm_iap_lead.field_crm_iap_lead_mining_request__write_date
#: model:ir.model.fields,field_description:crm_iap_lead.field_crm_iap_lead_role__write_date
#: model:ir.model.fields,field_description:crm_iap_lead.field_crm_iap_lead_seniority__write_date
msgid "Last Updated on"
msgstr ""
#. module: crm_iap_lead
#: model:ir.model.fields.selection,name:crm_iap_lead.selection__crm_iap_lead_mining_request__lead_type__lead
msgid "Lead"
msgstr ""
#. module: crm_iap_lead
#: model:ir.model.fields,field_description:crm_iap_lead.field_crm_iap_lead_mining_request__lead_contacts_credits
msgid "Lead Contacts Credits"
msgstr ""
#. module: crm_iap_lead
#: model:ir.model.fields,field_description:crm_iap_lead.field_crm_iap_lead_mining_request__lead_credits
msgid "Lead Credits"
msgstr ""
#. module: crm_iap_lead
#: model:ir.ui.menu,name:crm_iap_lead.crm_menu_lead_generation
msgid "Lead Generation"
msgstr ""
#. module: crm_iap_lead
#: model_terms:ir.ui.view,arch_db:crm_iap_lead.crm_iap_lead_mining_request_form
msgid "Lead Information"
msgstr ""
#. module: crm_iap_lead
#: model:ir.model.fields,field_description:crm_iap_lead.field_crm_lead__lead_mining_request_id
#: model_terms:ir.ui.view,arch_db:crm_iap_lead.crm_iap_lead_mining_request_search
msgid "Lead Mining Request"
msgstr ""
#. module: crm_iap_lead
#: model:ir.actions.act_window,name:crm_iap_lead.crm_iap_lead_mining_request_action
#: model:ir.ui.menu,name:crm_iap_lead.crm_iap_lead_mining_request_menu_action
msgid "Lead Mining Requests"
msgstr ""
#. module: crm_iap_lead
#: model:ir.model.fields,field_description:crm_iap_lead.field_crm_iap_lead_mining_request__lead_total_credits
msgid "Lead Total Credits"
msgstr ""
#. module: crm_iap_lead
#: model:ir.model,name:crm_iap_lead.model_crm_lead
msgid "Lead/Opportunity"
msgstr ""
#. module: crm_iap_lead
#: model_terms:ir.ui.view,arch_db:crm_iap_lead.crm_iap_lead_mining_request_search
msgid "Leads"
msgstr ""
#. module: crm_iap_lead
#: model_terms:ir.ui.view,arch_db:crm_iap_lead.crm_iap_lead_mining_request_form
msgid ""
"Make sure you know if you have to be GDPR compliant for storing personal "
"data."
msgstr ""
#. module: crm_iap_lead
#: model:crm.iap.lead.industry,name:crm_iap_lead.crm_iap_lead_industry_148
msgid "Materials"
msgstr ""
#. module: crm_iap_lead
#: model:crm.iap.lead.industry,name:crm_iap_lead.crm_iap_lead_industry_86
msgid "Media"
msgstr ""
#. module: crm_iap_lead
#: model:ir.model.fields,field_description:crm_iap_lead.field_crm_iap_lead_seniority__name
#: model_terms:ir.ui.view,arch_db:crm_iap_lead.lead_message_template
msgid "Name"
msgstr ""
#. module: crm_iap_lead
#: model:ir.model.constraint,message:crm_iap_lead.constraint_crm_iap_lead_seniority_name_uniq
msgid "Name already exists!"
msgstr ""
#. module: crm_iap_lead
#: code:addons/crm_iap_lead/models/crm_iap_lead_mining_request.py:0
#: code:addons/crm_iap_lead/models/crm_iap_lead_mining_request.py:0
#: code:addons/crm_iap_lead/models/crm_iap_lead_mining_request.py:0
#: code:addons/crm_iap_lead/models/crm_iap_lead_mining_request.py:0
#, python-format
msgid "New"
msgstr ""
#. module: crm_iap_lead
#: model:ir.model.fields,field_description:crm_iap_lead.field_crm_iap_lead_mining_request__contact_number
msgid "Number of Contacts"
msgstr ""
#. module: crm_iap_lead
#: model:ir.model.fields,field_description:crm_iap_lead.field_crm_iap_lead_mining_request__leads_count
msgid "Number of Generated Leads"
msgstr ""
#. module: crm_iap_lead
#: model:ir.model.fields,field_description:crm_iap_lead.field_crm_iap_lead_mining_request__lead_number
msgid "Number of Leads"
msgstr ""
#. module: crm_iap_lead
#: model_terms:ir.ui.view,arch_db:crm_iap_lead.crm_iap_lead_mining_request_search
msgid "Opportunities"
msgstr ""
#. module: crm_iap_lead
#: model:ir.model.fields.selection,name:crm_iap_lead.selection__crm_iap_lead_mining_request__lead_type__opportunity
msgid "Opportunity"
msgstr ""
#. module: crm_iap_lead
#: model_terms:ir.ui.view,arch_db:crm_iap_lead.lead_message_template
msgid "Opportunity created by Odoo Lead Generation"
msgstr ""
#. module: crm_iap_lead
#: model:ir.model.fields,field_description:crm_iap_lead.field_crm_iap_lead_mining_request__role_ids
msgid "Other Roles"
msgstr ""
#. module: crm_iap_lead
#: model:ir.model,name:crm_iap_lead.model_crm_iap_lead_role
msgid "People Role"
msgstr ""
#. module: crm_iap_lead
#: model:ir.model,name:crm_iap_lead.model_crm_iap_lead_seniority
msgid "People Seniority"
msgstr ""
#. module: crm_iap_lead
#: model:crm.iap.lead.industry,name:crm_iap_lead.crm_iap_lead_industry_161
msgid "Pharmaceuticals, Biotechnology & Life Sciences"
msgstr ""
#. module: crm_iap_lead
#: model_terms:ir.ui.view,arch_db:crm_iap_lead.lead_message_template
msgid "Phone"
msgstr ""
#. module: crm_iap_lead
#: model:ir.model.fields,field_description:crm_iap_lead.field_crm_iap_lead_mining_request__preferred_role_id
msgid "Preferred Role"
msgstr ""
#. module: crm_iap_lead
#: model:crm.iap.lead.industry,name:crm_iap_lead.crm_iap_lead_industry_114
msgid "Real Estate"
msgstr ""
#. module: crm_iap_lead
#: model_terms:ir.ui.view,arch_db:crm_iap_lead.crm_iap_lead_mining_request_form
msgid "Request"
msgstr ""
#. module: crm_iap_lead
#: model:ir.model.fields,field_description:crm_iap_lead.field_crm_iap_lead_mining_request__name
msgid "Request Number"
msgstr ""
#. module: crm_iap_lead
#: model:crm.iap.lead.industry,name:crm_iap_lead.crm_iap_lead_industry_152
msgid "Retailing"
msgstr ""
#. module: crm_iap_lead
#: model_terms:ir.ui.view,arch_db:crm_iap_lead.crm_iap_lead_mining_request_form
msgid "Retry"
msgstr ""
#. module: crm_iap_lead
#: model:ir.model.fields,field_description:crm_iap_lead.field_crm_iap_lead_industry__reveal_id
#: model:ir.model.fields,field_description:crm_iap_lead.field_crm_iap_lead_role__reveal_id
#: model:ir.model.fields,field_description:crm_iap_lead.field_crm_iap_lead_seniority__reveal_id
msgid "Reveal"
msgstr ""
#. module: crm_iap_lead
#: model:ir.model.fields,field_description:crm_iap_lead.field_crm_lead__reveal_id
msgid "Reveal ID"
msgstr ""
#. module: crm_iap_lead
#: model:ir.model.fields.selection,name:crm_iap_lead.selection__crm_iap_lead_mining_request__contact_filter_type__role
msgid "Role"
msgstr ""
#. module: crm_iap_lead
#: model:ir.model.fields,field_description:crm_iap_lead.field_crm_iap_lead_role__name
msgid "Role Name"
msgstr ""
#. module: crm_iap_lead
#: model:ir.model.constraint,message:crm_iap_lead.constraint_crm_iap_lead_role_name_uniq
msgid "Role name already exists!"
msgstr ""
#. module: crm_iap_lead
#: model:ir.model.fields,field_description:crm_iap_lead.field_crm_iap_lead_mining_request__team_id
#: model_terms:ir.ui.view,arch_db:crm_iap_lead.crm_iap_lead_mining_request_search
msgid "Sales Team"
msgstr ""
#. module: crm_iap_lead
#: model:ir.model.fields,field_description:crm_iap_lead.field_crm_iap_lead_mining_request__user_id
#: model_terms:ir.ui.view,arch_db:crm_iap_lead.crm_iap_lead_mining_request_search
msgid "Salesperson"
msgstr ""
#. module: crm_iap_lead
#: model:crm.iap.lead.industry,name:crm_iap_lead.crm_iap_lead_industry_164
msgid "Semiconductors & Semiconductor Equipment"
msgstr ""
#. module: crm_iap_lead
#: model:ir.model.fields,field_description:crm_iap_lead.field_crm_iap_lead_mining_request__seniority_id
#: model:ir.model.fields.selection,name:crm_iap_lead.selection__crm_iap_lead_mining_request__contact_filter_type__seniority
msgid "Seniority"
msgstr ""
#. module: crm_iap_lead
#: model:ir.model.fields,field_description:crm_iap_lead.field_crm_iap_lead_mining_request__company_size_min
msgid "Size"
msgstr ""
#. module: crm_iap_lead
#: model:crm.iap.lead.industry,name:crm_iap_lead.crm_iap_lead_industry_165
msgid "Software & Services"
msgstr ""
#. module: crm_iap_lead
#: model:ir.model.fields,field_description:crm_iap_lead.field_crm_iap_lead_mining_request__state_ids
msgid "States"
msgstr ""
#. module: crm_iap_lead
#: model:ir.model.fields,field_description:crm_iap_lead.field_crm_iap_lead_mining_request__state
msgid "Status"
msgstr ""
#. module: crm_iap_lead
#: model_terms:ir.ui.view,arch_db:crm_iap_lead.crm_iap_lead_mining_request_form
msgid "Submit"
msgstr ""
#. module: crm_iap_lead
#: model:ir.model.fields,field_description:crm_iap_lead.field_crm_iap_lead_industry__name
msgid "Tag Name"
msgstr ""
#. module: crm_iap_lead
#: model:ir.model.constraint,message:crm_iap_lead.constraint_crm_iap_lead_industry_name_uniq
msgid "Tag name already exists!"
msgstr ""
#. module: crm_iap_lead
#: model:ir.model.fields,field_description:crm_iap_lead.field_crm_iap_lead_mining_request__tag_ids
msgid "Tags"
msgstr ""
#. module: crm_iap_lead
#: model:ir.model.fields,field_description:crm_iap_lead.field_crm_iap_lead_mining_request__search_type
msgid "Target"
msgstr ""
#. module: crm_iap_lead
#: model:crm.iap.lead.industry,name:crm_iap_lead.crm_iap_lead_industry_166
msgid "Technology Hardware & Equipment"
msgstr ""
#. module: crm_iap_lead
#: model_terms:ir.ui.view,arch_db:crm_iap_lead.lead_message_template
msgid "Technology Used :"
msgstr ""
#. module: crm_iap_lead
#: model:crm.iap.lead.industry,name:crm_iap_lead.crm_iap_lead_industry_149
msgid "Telecommunication Services"
msgstr ""
#. module: crm_iap_lead
#: code:addons/crm_iap_lead/models/crm_iap_lead_mining_request.py:0
#, python-format
msgid "This makes a total of %d credits for this request."
msgstr ""
#. module: crm_iap_lead
#: model_terms:ir.ui.view,arch_db:crm_iap_lead.lead_message_template
msgid "Title"
msgstr ""
#. module: crm_iap_lead
#: model:crm.iap.lead.industry,name:crm_iap_lead.crm_iap_lead_industry_136
msgid "Transportation"
msgstr ""
#. module: crm_iap_lead
#: model:ir.model.fields,field_description:crm_iap_lead.field_crm_iap_lead_mining_request__lead_type
#: model_terms:ir.ui.view,arch_db:crm_iap_lead.crm_iap_lead_mining_request_search
msgid "Type"
msgstr ""
#. module: crm_iap_lead
#: code:addons/crm_iap_lead/models/crm_iap_lead_mining_request.py:0
#, python-format
msgid ""
"Up to %d additional credits will be consumed to identify %d contacts per "
"company."
msgstr ""
#. module: crm_iap_lead
#: model:crm.iap.lead.industry,name:crm_iap_lead.crm_iap_lead_industry_138
msgid "Utilities"
msgstr ""
#. module: crm_iap_lead
#: model:crm.iap.lead.role,name:crm_iap_lead.crm_iap_lead_role_1
msgid "ceo"
msgstr ""
#. module: crm_iap_lead
#: model:crm.iap.lead.role,name:crm_iap_lead.crm_iap_lead_role_2
msgid "communications"
msgstr ""
#. module: crm_iap_lead
#: model:crm.iap.lead.role,name:crm_iap_lead.crm_iap_lead_role_3
msgid "consulting"
msgstr ""
#. module: crm_iap_lead
#: model:crm.iap.lead.role,name:crm_iap_lead.crm_iap_lead_role_4
msgid "customer_service"
msgstr ""
#. module: crm_iap_lead
#: model:crm.iap.lead.seniority,name:crm_iap_lead.crm_iap_lead_seniority_1
msgid "director"
msgstr ""
#. module: crm_iap_lead
#: model:crm.iap.lead.role,name:crm_iap_lead.crm_iap_lead_role_5
msgid "education"
msgstr ""
#. module: crm_iap_lead
#: model_terms:ir.ui.view,arch_db:crm_iap_lead.crm_iap_lead_mining_request_form
msgid "employees"
msgstr ""
#. module: crm_iap_lead
#: model:crm.iap.lead.role,name:crm_iap_lead.crm_iap_lead_role_6
msgid "engineering"
msgstr ""
#. module: crm_iap_lead
#: model:crm.iap.lead.seniority,name:crm_iap_lead.crm_iap_lead_seniority_2
msgid "executive"
msgstr ""
#. module: crm_iap_lead
#: model:crm.iap.lead.role,name:crm_iap_lead.crm_iap_lead_role_7
msgid "finance"
msgstr ""
#. module: crm_iap_lead
#: model:crm.iap.lead.role,name:crm_iap_lead.crm_iap_lead_role_8
msgid "founder"
msgstr ""
#. module: crm_iap_lead
#: model:crm.iap.lead.role,name:crm_iap_lead.crm_iap_lead_role_9
msgid "health_professional"
msgstr ""
#. module: crm_iap_lead
#: model:crm.iap.lead.role,name:crm_iap_lead.crm_iap_lead_role_10
msgid "human_resources"
msgstr ""
#. module: crm_iap_lead
#: model:crm.iap.lead.role,name:crm_iap_lead.crm_iap_lead_role_11
msgid "information_technology"
msgstr ""
#. module: crm_iap_lead
#: model:crm.iap.lead.role,name:crm_iap_lead.crm_iap_lead_role_12
msgid "legal"
msgstr ""
#. module: crm_iap_lead
#: model:crm.iap.lead.seniority,name:crm_iap_lead.crm_iap_lead_seniority_3
msgid "manager"
msgstr ""
#. module: crm_iap_lead
#: model:crm.iap.lead.role,name:crm_iap_lead.crm_iap_lead_role_13
msgid "marketing"
msgstr ""
#. module: crm_iap_lead
#: model:crm.iap.lead.role,name:crm_iap_lead.crm_iap_lead_role_14
msgid "operations"
msgstr ""
#. module: crm_iap_lead
#: model:crm.iap.lead.role,name:crm_iap_lead.crm_iap_lead_role_15
msgid "owner"
msgstr ""
#. module: crm_iap_lead
#: model:crm.iap.lead.role,name:crm_iap_lead.crm_iap_lead_role_16
msgid "president"
msgstr ""
#. module: crm_iap_lead
#: model:crm.iap.lead.role,name:crm_iap_lead.crm_iap_lead_role_17
msgid "product"
msgstr ""
#. module: crm_iap_lead
#: model:crm.iap.lead.role,name:crm_iap_lead.crm_iap_lead_role_18
msgid "public_relations"
msgstr ""
#. module: crm_iap_lead
#: model:crm.iap.lead.role,name:crm_iap_lead.crm_iap_lead_role_19
msgid "real_estate"
msgstr ""
#. module: crm_iap_lead
#: model:crm.iap.lead.role,name:crm_iap_lead.crm_iap_lead_role_20
msgid "recruiting"
msgstr ""
#. module: crm_iap_lead
#: model:crm.iap.lead.role,name:crm_iap_lead.crm_iap_lead_role_21
msgid "research"
msgstr ""
#. module: crm_iap_lead
#: model:crm.iap.lead.role,name:crm_iap_lead.crm_iap_lead_role_22
msgid "sale"
msgstr ""
#. module: crm_iap_lead
#: model_terms:ir.ui.view,arch_db:crm_iap_lead.crm_iap_lead_mining_request_form
msgid "to"
msgstr ""
| {
"pile_set_name": "Github"
} |
package net.famzangl.minecraft.minebot.ai.tools.rate;
import net.famzangl.minecraft.minebot.ai.path.world.BlockFloatMap;
import net.minecraft.item.ItemStack;
public class OrRater extends Rater {
private final Rater[] raters;
public OrRater(BlockFloatMap values, Rater... raters) {
super(createName(raters, "|"), values);
this.raters = raters;
}
@Override
protected double getPow(ItemStack item, int forBlockAndMeta) {
double pow = 1;
for (Rater r : raters) {
pow = Math.max(r.getPow(item, forBlockAndMeta), pow);
}
return pow;
}
@Override
protected boolean isAppleciable(ItemStack item, int forBlockAndMeta) {
for (Rater rater : raters) {
if (rater.isAppleciable(item, forBlockAndMeta)) {
return true;
}
}
return false;
}
} | {
"pile_set_name": "Github"
} |
{
"cells": [
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"hide_input": false
},
"outputs": [],
"source": [
"#hide\n",
"#default_exp test\n",
"from nbdev.showdoc import show_doc"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"#export\n",
"from nbdev.imports import *\n",
"from nbdev.sync import *\n",
"from nbdev.export import *\n",
"from nbdev.export import _mk_flag_re\n",
"from nbdev.export2html import _re_notebook2script\n",
"\n",
"from nbconvert.preprocessors import ExecutePreprocessor"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Extract tests\n",
"\n",
"> The functions that grab the cells containing tests (filtering with potential flags) and execute them"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Everything that is not an exported cell is considered a test, so you should make sure your notebooks can all run smoothly (and fast) if you want to use this functionality as the CLI. You can mark some cells with special flags (like slow) to make sure they are only executed when you authorize it. Those flags should be configured in your `settings.ini` (separated by a `|` if you have several of them). You can also apply flags to one entire notebook by using the `all` option, e.g. `#all_slow`, in code cells.\n",
"\n",
"If `tst_flags=slow|fastai` in `settings.ini`, you can:\n",
"- mark slow tests with `#slow` flag\n",
"- mark tests that depend on fastai with the `#fastai` flag."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Detect flags"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"The following functions detect the cells that should be excluded from the tests (unless their special flag is passed)."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"#export\n",
"_re_all_flag = ReTstFlags(True)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"#export\n",
"def get_all_flags(cells):\n",
" \"Check for all test flags in `cells`\"\n",
" if len(Config().get('tst_flags',''))==0: return []\n",
" result = []\n",
" for cell in cells:\n",
" if cell['cell_type'] == 'code': result.extend(_re_all_flag.findall(cell['source']))\n",
" return set(result)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"nb = read_nb(\"04_test.ipynb\")\n",
"assert get_all_flags(nb['cells']) == set()"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"#hide\n",
"tst_flags_bck=Config().get('tst_flags')\n",
"try:\n",
" Config()['tst_flags'] = 'fastai|vslow'\n",
" if hasattr(_re_all_flag, '_re'): del _re_all_flag._re\n",
" cells = [{'cell_type': cell_type, 'source': source} for cell_type, source in [\n",
" ('code', '# export\\nfrom local.core import *'), \n",
" ('markdown', '# title of some kind'), \n",
" ('code', '# all_vslow \\n# all_fastai'),\n",
" ('code', '#all_vslow\\n# all_fastai'),\n",
" ('code', '#all_vslow '),\n",
" ('code', '# all_fastai'),\n",
" ('code', '#all_fastai\\n')]]\n",
" for i in range(3):\n",
" test_eq(set(['vslow','fastai']), get_all_flags(cells))\n",
" cells.pop(2)\n",
" for i in range(2):\n",
" test_eq(set(['fastai']), get_all_flags(cells))\n",
" cells.pop(2)\n",
" test_eq(set(), get_all_flags(cells))\n",
"finally:\n",
" Config()['tst_flags'] = tst_flags_bck\n",
" del _re_all_flag._re"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"#export\n",
"_re_flags = ReTstFlags(False)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"#export\n",
"def get_cell_flags(cell):\n",
" \"Check for any special test flag in `cell`\"\n",
" if cell['cell_type'] != 'code' or len(Config().get('tst_flags',''))==0: return []\n",
" return _re_flags.findall(cell['source'])"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"test_eq(get_cell_flags({'cell_type': 'code', 'source': \"#hide\\n\"}), [])"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"#hide\n",
"for expected, flag in [(['fastai'], 'fastai'), ([], 'vslow')]:\n",
" test_eq(expected, get_cell_flags(nbformat.v4.new_code_cell(f\"#hide\\n# {flag}\\n\")))\n",
" test_eq(expected, get_cell_flags(nbformat.v4.new_code_cell(f\"# {flag}\\n#hide\\n\")))\n",
" test_eq(expected, get_cell_flags(nbformat.v4.new_code_cell(f\"#{flag}\\n#hide\\n\")))\n",
" test_eq([], get_cell_flags(nbformat.v4.new_code_cell(\"#hide\\n\")))\n",
" test_eq([], get_cell_flags(nbformat.v4.new_code_cell(f\"# all_{flag}\")))\n",
" test_eq([], get_cell_flags(nbformat.v4.new_code_cell(f\"#all_{flag}\")))\n",
"tst_flags_bck=Config().get('tst_flags')\n",
"try:\n",
" Config()['tst_flags'] = 'fastai|vslow'\n",
" del _re_flags._re\n",
" test_eq(['vslow'], get_cell_flags(nbformat.v4.new_code_cell(f\"#hide\\n# vslow\\n\")))\n",
" test_eq(['vslow'], get_cell_flags(nbformat.v4.new_code_cell(f\"#hide\\n#vslow\\n\")))\n",
" test_eq(['vslow', 'fastai'], get_cell_flags(nbformat.v4.new_code_cell(f\"#hide\\n# vslow\\n# fastai\")))\n",
" test_eq(['fastai', 'vslow'], get_cell_flags(nbformat.v4.new_code_cell(f\"#fastai\\n#vslow\")))\n",
"finally:\n",
" Config()['tst_flags'] = tst_flags_bck\n",
" del _re_flags._re"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Testing a notebook"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"#export\n",
"class NoExportPreprocessor(ExecutePreprocessor):\n",
" \"An `ExecutePreprocessor` that executes cells that don't have a flag in `flags`\"\n",
" def __init__(self, flags, **kwargs):\n",
" self.flags = flags\n",
" super().__init__(**kwargs)\n",
"\n",
" def preprocess_cell(self, cell, resources, index):\n",
" if 'source' not in cell or cell['cell_type'] != \"code\": return cell, resources\n",
" for f in get_cell_flags(cell):\n",
" if f not in self.flags: return cell, resources\n",
" if check_re(cell, _re_notebook2script): return cell, resources\n",
" return super().preprocess_cell(cell, resources, index)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"#export\n",
"def test_nb(fn, flags=None):\n",
" \"Execute tests in notebook in `fn` with `flags`\"\n",
" os.environ[\"IN_TEST\"] = '1'\n",
" if flags is None: flags = []\n",
" try:\n",
" nb = read_nb(fn)\n",
" for f in get_all_flags(nb['cells']):\n",
" if f not in flags: return\n",
" ep = NoExportPreprocessor(flags, timeout=600, kernel_name='python3')\n",
" pnb = nbformat.from_dict(nb)\n",
" ep.preprocess(pnb)\n",
" finally: os.environ.pop(\"IN_TEST\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"test_nb('index.ipynb')"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Export-"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Converted 00_export.ipynb.\n",
"Converted 01_sync.ipynb.\n",
"Converted 02_showdoc.ipynb.\n",
"Converted 03_export2html.ipynb.\n",
"Converted 04_test.ipynb.\n",
"Converted 05_merge.ipynb.\n",
"Converted 06_cli.ipynb.\n",
"Converted 07_clean.ipynb.\n",
"Converted 99_search.ipynb.\n",
"Converted index.ipynb.\n",
"Converted tutorial.ipynb.\n"
]
}
],
"source": [
"#hide\n",
"from nbdev.export import *\n",
"notebook2script()"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
}
},
"nbformat": 4,
"nbformat_minor": 2
}
| {
"pile_set_name": "Github"
} |
#version 450
layout(local_size_x = 1) in;
#define T mat4
#define PACKING std430
layout(set = 0, binding = 0, PACKING) buffer SSBOCol
{
layout(column_major) T col_major0;
layout(column_major) T col_major1;
};
layout(set = 0, binding = 1, PACKING) buffer SSBORow
{
layout(row_major) T row_major0;
layout(row_major) T row_major1;
};
void load_store_to_variable_col_major()
{
// Load to variable.
T loaded = col_major0;
// Store from variable.
col_major1 = loaded;
}
void load_store_to_variable_row_major()
{
// Load to variable.
T loaded = row_major0;
// Store to variable.
row_major0 = loaded;
}
void copy_col_major_to_col_major()
{
// Copy col -> col
col_major0 = col_major1;
}
void copy_row_major_to_col_major()
{
// Copy row -> col
col_major0 = row_major0;
}
void copy_col_major_to_row_major()
{
// Copy col -> row
row_major0 = col_major0;
}
void copy_row_major_to_row_major()
{
// Copy row -> row
row_major0 = row_major1;
}
void copy_columns()
{
// Copy columns/rows.
col_major0[1] = row_major0[1];
row_major0[1] = col_major0[1];
}
void copy_elements()
{
// Copy individual elements.
col_major0[0][1] = row_major0[0][1];
row_major0[0][1] = col_major0[0][1];
}
void main()
{
load_store_to_variable_col_major();
load_store_to_variable_row_major();
copy_col_major_to_col_major();
copy_col_major_to_row_major();
copy_row_major_to_col_major();
copy_row_major_to_row_major();
copy_columns();
copy_elements();
}
| {
"pile_set_name": "Github"
} |
define(function() {
// All support tests are defined in their respective modules.
return {};
});
| {
"pile_set_name": "Github"
} |
/**********************************************************************
* File: boxchar.cpp
* Description: Simple class to associate a Tesseract classification unit with
* its bounding box so that the boxes can be rotated as the image
* is rotated for degradation. Also includes routines to output
* the character-tagged boxes to a boxfile.
* Author: Ray Smith
* Created: Mon Nov 18 2013
*
* (C) Copyright 2013, Google Inc.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
**********************************************************************/
#include "boxchar.h"
#include <stddef.h>
#include <algorithm>
#include "fileio.h"
#include "genericvector.h"
#include "ndminx.h"
#include "normstrngs.h"
#include "tprintf.h"
#include "unicharset.h"
#include "unicode/uchar.h" // from libicu
// Absolute Ratio of dx:dy or dy:dx to be a newline.
const int kMinNewlineRatio = 5;
namespace tesseract {
BoxChar::BoxChar(const char* utf8_str, int len) : ch_(utf8_str, len) {
box_ = NULL;
}
BoxChar::~BoxChar() { boxDestroy(&box_); }
void BoxChar::AddBox(int x, int y, int width, int height) {
box_ = boxCreate(x, y, width, height);
}
/* static */
void BoxChar::TranslateBoxes(int xshift, int yshift,
std::vector<BoxChar*>* boxes) {
for (int i = 0; i < boxes->size(); ++i) {
BOX* box = (*boxes)[i]->box_;
if (box != NULL) {
box->x += xshift;
box->y += yshift;
}
}
}
// Prepares for writing the boxes to a file by inserting newlines, spaces,
// and re-ordering so the boxes are strictly left-to-right.
/* static */
void BoxChar::PrepareToWrite(std::vector<BoxChar*>* boxes) {
bool rtl_rules = ContainsMostlyRTL(*boxes);
bool vertical_rules = MostlyVertical(*boxes);
InsertNewlines(rtl_rules, vertical_rules, boxes);
InsertSpaces(rtl_rules, vertical_rules, boxes);
for (int i = 0; i < boxes->size(); ++i) {
if ((*boxes)[i]->box_ == NULL) tprintf("Null box at index %d\n", i);
}
if (rtl_rules) {
ReorderRTLText(boxes);
}
}
// Inserts newline (tab) characters into the vector at newline positions.
/* static */
void BoxChar::InsertNewlines(bool rtl_rules, bool vertical_rules,
std::vector<BoxChar*>* boxes) {
int prev_i = -1;
int max_shift = 0;
for (int i = 0; i < boxes->size(); ++i) {
Box* box = (*boxes)[i]->box_;
if (box == NULL) {
if (prev_i < 0 || prev_i < i - 1 || i + 1 == boxes->size()) {
// Erase null boxes at the start of a line and after another null box.
do {
delete (*boxes)[i];
boxes->erase(boxes->begin() + i);
--i;
} while (i >= 0 && i + 1 == boxes->size() && (*boxes)[i]->box_ == NULL);
}
continue;
}
if (prev_i >= 0) {
Box* prev_box = (*boxes)[prev_i]->box_;
int shift = box->x - prev_box->x;
if (vertical_rules) {
shift = box->y - prev_box->y;
} else if (rtl_rules) {
shift = -shift;
}
if (-shift > max_shift) {
// This is a newline.
int width = prev_box->w;
int height = prev_box->h;
int x = prev_box->x + width;
int y = prev_box->y;
if (vertical_rules) {
x = prev_box->x;
y = prev_box->y + height;
} else if (rtl_rules) {
x = prev_box->x - width;
if (x < 0) {
tprintf("prev x = %d, width=%d\n", prev_box->x, width);
x = 0;
}
}
if (prev_i == i - 1) {
// New character needed.
BoxChar* new_box = new BoxChar("\t", 1);
new_box->AddBox(x, y, width, height);
new_box->page_ = (*boxes)[i]->page_;
boxes->insert(boxes->begin() + i, new_box);
++i;
} else {
(*boxes)[i - 1]->AddBox(x, y, width, height);
(*boxes)[i - 1]->ch_ = "\t";
}
max_shift = 0;
} else if (shift > max_shift) {
max_shift = shift;
}
}
prev_i = i;
}
}
// Converts NULL boxes to space characters, with appropriate bounding boxes.
/* static */
void BoxChar::InsertSpaces(bool rtl_rules, bool vertical_rules,
std::vector<BoxChar*>* boxes) {
// After InsertNewlines, any remaining null boxes are not newlines, and are
// singletons, so add a box to each remaining null box.
for (int i = 1; i + 1 < boxes->size(); ++i) {
Box* box = (*boxes)[i]->box_;
if (box == NULL) {
Box* prev = (*boxes)[i - 1]->box_;
Box* next = (*boxes)[i + 1]->box_;
ASSERT_HOST(prev != NULL && next != NULL);
int top = MIN(prev->y, next->y);
int bottom = MAX(prev->y + prev->h, next->y + next->h);
int left = prev->x + prev->w;
int right = next->x;
if (vertical_rules) {
top = prev->y + prev->h;
bottom = next->y;
left = MIN(prev->x, next->x);
right = MAX(prev->x + prev->w, next->x + next->w);
} else if (rtl_rules) {
// With RTL we have to account for BiDi.
// Right becomes the min left of all prior boxes back to the first
// space or newline.
right = prev->x;
left = next->x + next->w;
for (int j = i - 2;
j >= 0 && (*boxes)[j]->ch_ != " " && (*boxes)[j]->ch_ != "\t";
--j) {
prev = (*boxes)[j]->box_;
ASSERT_HOST(prev != NULL);
if (prev->x < right) {
right = prev->x;
}
}
// Left becomes the max right of all next boxes forward to the first
// space or newline.
for (int j = i + 2; j < boxes->size() && (*boxes)[j]->box_ != NULL &&
(*boxes)[j]->ch_ != "\t";
++j) {
next = (*boxes)[j]->box_;
if (next->x + next->w > left) {
left = next->x + next->w;
}
}
}
// Italic and stylized characters can produce negative spaces, which
// Leptonica doesn't like, so clip to a positive size.
if (right <= left) right = left + 1;
if (bottom <= top) bottom = top + 1;
(*boxes)[i]->AddBox(left, top, right - left, bottom - top);
(*boxes)[i]->ch_ = " ";
}
}
}
// Reorders text in a right-to-left script in left-to-right order.
/* static */
void BoxChar::ReorderRTLText(std::vector<BoxChar*>* boxes) {
// After adding newlines and spaces, this task is simply a matter of sorting
// by left each group of boxes between newlines.
BoxCharPtrSort sorter;
int end = 0;
for (int start = 0; start < boxes->size(); start = end + 1) {
end = start + 1;
while (end < boxes->size() && (*boxes)[end]->ch_ != "\t") ++end;
sort(boxes->begin() + start, boxes->begin() + end, sorter);
}
}
// Returns true if the vector contains mostly RTL characters.
/* static */
bool BoxChar::ContainsMostlyRTL(const std::vector<BoxChar*>& boxes) {
int num_rtl = 0, num_ltr = 0;
for (int i = 0; i < boxes.size(); ++i) {
// Convert the unichar to UTF32 representation
GenericVector<char32> uni_vector;
if (!UNICHAR::UTF8ToUnicode(boxes[i]->ch_.c_str(), &uni_vector)) {
tprintf("Illegal utf8 in boxchar %d string:%s = ", i,
boxes[i]->ch_.c_str());
for (int c = 0; c < boxes[i]->ch_.size(); ++c) {
tprintf(" 0x%x", boxes[i]->ch_[c]);
}
tprintf("\n");
continue;
}
for (int j = 0; j < uni_vector.size(); ++j) {
UCharDirection dir = u_charDirection(uni_vector[j]);
if (dir == U_RIGHT_TO_LEFT || dir == U_RIGHT_TO_LEFT_ARABIC ||
dir == U_ARABIC_NUMBER) {
++num_rtl;
} else {
++num_ltr;
}
}
}
return num_rtl > num_ltr;
}
// Returns true if the text is mostly laid out vertically.
/* static */
bool BoxChar::MostlyVertical(const std::vector<BoxChar*>& boxes) {
inT64 total_dx = 0, total_dy = 0;
for (int i = 1; i < boxes.size(); ++i) {
if (boxes[i - 1]->box_ != NULL && boxes[i]->box_ != NULL &&
boxes[i - 1]->page_ == boxes[i]->page_) {
int dx = boxes[i]->box_->x - boxes[i - 1]->box_->x;
int dy = boxes[i]->box_->y - boxes[i - 1]->box_->y;
if (abs(dx) > abs(dy) * kMinNewlineRatio ||
abs(dy) > abs(dx) * kMinNewlineRatio) {
total_dx += dx * dx;
total_dy += dy * dy;
}
}
}
return total_dy > total_dx;
}
// Returns the total length of all the strings in the boxes.
/* static */
int BoxChar::TotalByteLength(const std::vector<BoxChar*>& boxes) {
int total_length = 0;
for (int i = 0; i < boxes.size(); ++i) total_length += boxes[i]->ch_.size();
return total_length;
}
// Rotate the boxes in [start_box, end_box) by the given rotation.
// The rotation is in radians clockwise about the given center.
/* static */
void BoxChar::RotateBoxes(float rotation, int xcenter, int ycenter,
int start_box, int end_box,
std::vector<BoxChar*>* boxes) {
Boxa* orig = boxaCreate(0);
for (int i = start_box; i < end_box; ++i) {
BOX* box = (*boxes)[i]->box_;
if (box) boxaAddBox(orig, box, L_CLONE);
}
Boxa* rotated = boxaRotate(orig, xcenter, ycenter, rotation);
boxaDestroy(&orig);
for (int i = start_box, box_ind = 0; i < end_box; ++i) {
if ((*boxes)[i]->box_) {
boxDestroy(&((*boxes)[i]->box_));
(*boxes)[i]->box_ = boxaGetBox(rotated, box_ind++, L_CLONE);
}
}
boxaDestroy(&rotated);
}
const int kMaxLineLength = 1024;
/* static */
void BoxChar::WriteTesseractBoxFile(const string& filename, int height,
const std::vector<BoxChar*>& boxes) {
string output = GetTesseractBoxStr(height, boxes);
File::WriteStringToFileOrDie(output, filename);
}
/* static */
string BoxChar::GetTesseractBoxStr(int height,
const std::vector<BoxChar*>& boxes) {
string output;
char buffer[kMaxLineLength];
for (int i = 0; i < boxes.size(); ++i) {
const Box* box = boxes[i]->box_;
if (box == NULL) {
tprintf("Error: Call PrepareToWrite before WriteTesseractBoxFile!!\n");
return "";
}
int nbytes =
snprintf(buffer, kMaxLineLength, "%s %d %d %d %d %d\n",
boxes[i]->ch_.c_str(), box->x, height - box->y - box->h,
box->x + box->w, height - box->y, boxes[i]->page_);
output.append(buffer, nbytes);
}
return output;
}
} // namespace tesseract
| {
"pile_set_name": "Github"
} |
/*
* Copyright (c) 1983 Regents of the University of California.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#if defined(LIBC_SCCS) && !defined(lint)
static char sccsid[] = "@(#)closedir.c 5.9 (Berkeley) 2/23/91";
#endif /* LIBC_SCCS and not lint */
#include <sys/types.h>
#include <dirent.h>
#include <stdlib.h>
#include <unistd.h>
/*
* close a directory.
*/
int
closedir(dirp)
register DIR *dirp;
{
int fd;
fd = dirp->dd_fd;
dirp->dd_fd = -1;
dirp->dd_loc = 0;
(void)free((void *)dirp->dd_buf);
(void)free((void *)dirp);
return(close(fd));
}
| {
"pile_set_name": "Github"
} |
/* $Id: SplitString.h 1156 2011-06-07 04:01:16Z bhagman $
||
|| @author Hernando Barragan <[email protected]>
|| @url http://wiring.org.co/
|| @contribution Brett Hagman <[email protected]>
|| @contribution Alexander Brevig <[email protected]>
||
|| @license Please see cores/Common/License.txt.
||
*/
#pragma once
#include "WVector.h"
#include "WString.h"
/** @brief split a delimited string list of integers into an array
* @param what
* @param delim
* @param splits
* @retval number of items returned in splits (same as splits.count())
* @note leading/trailing whitespace is removed from 'what' before parsing
* example: " 1,2,3,4,5" returns [1, 2, 3, 4, 5]
*/
int splitString(String& what, char delim, Vector<int>& splits);
/** @brief split a delimited string list into an array
* @param what
* @param delim
* @param splits
* @retval number of items returned in splits (same as splits.count())
* @note leading/trailing whitespace is removed from 'what' before parsing
* example: " a,b,c,d,e" returns ["a", "b", "c", "d", "e"]
*/
int splitString(String& what, char delim, Vector<String>& splits);
| {
"pile_set_name": "Github"
} |
name: Delphi
asset: delphi
parent: text
match:
extension:
equals:
- .dfm
- .dpr
| {
"pile_set_name": "Github"
} |
# New in version 1.3.3
* Fixed a crash when using ctags over SFTP.
* Fixed a crash when creating new files over SFTP.
* Make line highlight centered vertically around the cursor.
* Fixed stability issue when closing windows.
* Require UUID in bundle items, allows loading incomplete bundles.
# New in version 1.3.2
* Fixed a crash when closing document windows
* Fixed a crash when pasting text in visual mode
* Fixed a crash when saving fails due to permission denied
* Fixed a crash when closing another tab by clicking the X icon
* Fixed a crash when fuzzy filtering files
* Prevent symlink loops in file explorer
* Added support for submitting crash reports
# New in version 1.3.1
## Bugfixes
* Many memory leaks have been fixed.
* Garbage collection is no longer used.
* Externally modified documents that aren't edited in Vico are now automatically reverted.
To go back to the old behaviour of always asking if Vico should revert a document:
$ defaults write se.bzero.vico alwaysAskModifiedDocument -bool true
* Allow scrolling beyond last screenful with <ctrl-e>.
* Implement the ' and ` marks. Use `` to jump back to the last jump.
* Unbreak :quit behaviour.
* Caret screen update artifacts fixed.
* Implement detaching of split views to new windows (bound to <ctrl-w>D)
* <command-n> now opens a new window (<command-shift-N> opens a new untitled document)
* Fix a crash in the file explorer when filtering symlinked directories.
# New in version 1.3
* Project search using Ack
* Highlighting of matching smart pairs
* Added `zt`, `zb` and `zz` commands; scrolls the screen relative the caret position
* Block text objects added: `i[`, `ib`, `iB` and `i<`, as well as the 'a' counterparts
* "Breadcrumbs" folder navigation in file explorer
* Support for Finder aliases in the file explorer
* Added setting for blinking caret
* Added boolean 'clipboard' setting; when enabled (default off), yanks by default into '+' register (the clipboard)
* Ctrl-u and ctrl-w recognized in insert mode
* Standard Cocoa alt-backspace / alt-delete keys recognized in insert mode
* Command line helper can now open files in new windows with -n option
* Themes with dark backgrounds now use a white I-beam mouse cursor
* Added the `gv` command (re-select last selection)
* Global (uppercase) marks jump between files
* Mark locations are adjusted for inserted/deleted text
* All open documents can now be reverted at once when externally modified
* Added `+` and `-` commands
* Word completion now honors the `ignorecase` and `smartcase` settings
## Bugfixes
* Fixed delete problem with non-western input methods
* Fixed weird caret size and position with RTL unicode characters
* Copied bundles no longer results in duplicate bundle commands
* Fixed enabling of certain menu items, and make vi menu items respond to key events
* Tags files with absolute paths are now recognized
* Fixed cases where filesystem changes wasn't noticed by the file explorer
* Better handling of symlinked folders in the file explorer
* Make line number font size follow editor font size
* The '=' command now indents properly over multiple lines
* Tweaks for better TextMate bundle compatibility
* `:edit!` now reverts the open document
* Preserve POSIX permissions and uid/gid when saving over SFTP
* Command-clicking in explorer now selects the file
(changed, but not visible / yet usable for the user)
! Added themeing of underline style and color as well as text shadow
# New in version 1.2
* Lion compatibility bugs fixed
* Lion fullscreen support
* Fuzzy find no longer requires the toolbar to be visible
* Improved indentation behaviour (should match TextMate behaviour more closely)
* Shift operators (`<` and `>`) now honors a count in visual mode
* Added `gdefault` setting (use `:set gdefault`) -- makes `:s` always use `g` option (globally replace all occurrences on a line)
* Support transparent backgrounds in themes
* Reworked ex command parsing; allow for customizing and adding of new user-defined ex commands
* Added `gi` vi command; enter insert mode where insert mode was last exited)
* Fix creating new folders for SFTP
* Shows .htaccess and version control .ignore files in explorer by default
* added `wrapscan` setting (enabled by default, use `:set nowrapscan` to turn it off) -- makes searches wrap around to beginning of document
* added `@` vi command; execute content of a register as vi normal mode -- `q` is still missing, but you can yank into a register
* expand `%` and `#` characters in ex edit commands
* `:set` can now toggle boolean variables with `:set var!` or `:set invvar`
| {
"pile_set_name": "Github"
} |
#
# All or portions of this file Copyright (c) Amazon.com, Inc. or its affiliates or
# its licensors.
#
# For complete copyright and license terms please see the LICENSE at the root of this
# distribution (the "License"). All use of this software is governed by the License,
# or, if provided, by the license below or the license accompanying this file. Do not
# remove or modify any license notices. This file is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
from cgf_utils import custom_resource_response
def handler(event, context):
return custom_resource_response.success_response({}, "*")
| {
"pile_set_name": "Github"
} |
/* LibTomCrypt, modular cryptographic library -- Tom St Denis
*
* LibTomCrypt is a library that provides various cryptographic
* algorithms in a highly modular and flexible manner.
*
* The library is free for all purposes without any express
* guarantee it works.
*
* Tom St Denis, [email protected], http://libtomcrypt.com
*/
#include "tomcrypt.h"
/**
@file ctr_test.c
CTR implementation, Tests again RFC 3686, Tom St Denis
*/
#ifdef LTC_CTR_MODE
int ctr_test(void)
{
#ifdef LTC_NO_TEST
return CRYPT_NOP;
#else
static const struct {
int keylen, msglen;
unsigned char key[32], IV[16], pt[64], ct[64];
} tests[] = {
/* 128-bit key, 16-byte pt */
{
16, 16,
{0xAE,0x68,0x52,0xF8,0x12,0x10,0x67,0xCC,0x4B,0xF7,0xA5,0x76,0x55,0x77,0xF3,0x9E },
{0x00,0x00,0x00,0x30,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 },
{0x53,0x69,0x6E,0x67,0x6C,0x65,0x20,0x62,0x6C,0x6F,0x63,0x6B,0x20,0x6D,0x73,0x67 },
{0xE4,0x09,0x5D,0x4F,0xB7,0xA7,0xB3,0x79,0x2D,0x61,0x75,0xA3,0x26,0x13,0x11,0xB8 },
},
/* 128-bit key, 36-byte pt */
{
16, 36,
{0x76,0x91,0xBE,0x03,0x5E,0x50,0x20,0xA8,0xAC,0x6E,0x61,0x85,0x29,0xF9,0xA0,0xDC },
{0x00,0xE0,0x01,0x7B,0x27,0x77,0x7F,0x3F,0x4A,0x17,0x86,0xF0,0x00,0x00,0x00,0x00 },
{0x00,0x01,0x02,0x03,0x04,0x05,0x06,0x07,0x08,0x09,0x0A,0x0B,0x0C,0x0D,0x0E,0x0F,
0x10,0x11,0x12,0x13,0x14,0x15,0x16,0x17,0x18,0x19,0x1A,0x1B,0x1C,0x1D,0x1E,0x1F,
0x20,0x21,0x22,0x23},
{0xC1,0xCF,0x48,0xA8,0x9F,0x2F,0xFD,0xD9,0xCF,0x46,0x52,0xE9,0xEF,0xDB,0x72,0xD7,
0x45,0x40,0xA4,0x2B,0xDE,0x6D,0x78,0x36,0xD5,0x9A,0x5C,0xEA,0xAE,0xF3,0x10,0x53,
0x25,0xB2,0x07,0x2F },
},
};
int idx, err, x;
unsigned char buf[64];
symmetric_CTR ctr;
/* AES can be under rijndael or aes... try to find it */
if ((idx = find_cipher("aes")) == -1) {
if ((idx = find_cipher("rijndael")) == -1) {
return CRYPT_NOP;
}
}
for (x = 0; x < (int)(sizeof(tests)/sizeof(tests[0])); x++) {
if ((err = ctr_start(idx, tests[x].IV, tests[x].key, tests[x].keylen, 0, CTR_COUNTER_BIG_ENDIAN|LTC_CTR_RFC3686, &ctr)) != CRYPT_OK) {
return err;
}
if ((err = ctr_encrypt(tests[x].pt, buf, tests[x].msglen, &ctr)) != CRYPT_OK) {
return err;
}
ctr_done(&ctr);
if (XMEMCMP(buf, tests[x].ct, tests[x].msglen)) {
return CRYPT_FAIL_TESTVECTOR;
}
}
return CRYPT_OK;
#endif
}
#endif
/* $Source: /cvs/libtom/libtomcrypt/src/modes/ctr/ctr_test.c,v $ */
/* $Revision: 1.3 $ */
/* $Date: 2006/11/05 02:06:49 $ */
| {
"pile_set_name": "Github"
} |
<?php
/**
* RefTypeBuilder.php
*
* Builds reference type within a JOIN.
*
* PHP version 5
*
* LICENSE:
* Copyright (c) 2010-2014 Justin Swanhart and André Rothe
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* @author André Rothe <[email protected]>
* @copyright 2010-2014 Justin Swanhart and André Rothe
* @license http://www.debian.org/misc/bsd.license BSD License (3 Clause)
* @version SVN: $Id: RefTypeBuilder.php 830 2013-12-18 09:35:42Z [email protected] $
*
*/
require_once dirname(__FILE__) . '/../exceptions/UnsupportedFeatureException.php';
/**
* This class implements the references type within a JOIN.
* You can overwrite all functions to achieve another handling.
*
* @author André Rothe <[email protected]>
* @license http://www.debian.org/misc/bsd.license BSD License (3 Clause)
*
*/
class RefTypeBuilder {
public function build($parsed) {
if ($parsed === false) {
return "";
}
if ($parsed === 'ON') {
return " ON ";
}
if ($parsed === 'USING') {
return " USING ";
}
// TODO: add more
throw new UnsupportedFeatureException($parsed);
}
}
?>
| {
"pile_set_name": "Github"
} |
string greeting
int32 number
| {
"pile_set_name": "Github"
} |
package nl.anchormen.sql4es.parse.sql;
import java.sql.SQLException;
import java.sql.Types;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Properties;
import org.elasticsearch.action.search.SearchRequestBuilder;
import org.elasticsearch.index.query.MatchAllQueryBuilder;
import org.elasticsearch.index.query.QueryBuilder;
import org.elasticsearch.index.query.QueryBuilders;
import org.elasticsearch.search.aggregations.AggregationBuilder;
import com.facebook.presto.sql.tree.AstVisitor;
import com.facebook.presto.sql.tree.Query;
import com.facebook.presto.sql.tree.QueryBody;
import com.facebook.presto.sql.tree.QuerySpecification;
import com.facebook.presto.sql.tree.Relation;
import com.facebook.presto.sql.tree.SelectItem;
import com.facebook.presto.sql.tree.SortItem;
import nl.anchormen.sql4es.model.BasicQueryState;
import nl.anchormen.sql4es.model.Column;
import nl.anchormen.sql4es.model.Column.Operation;
import nl.anchormen.sql4es.model.Heading;
import nl.anchormen.sql4es.model.OrderBy;
import nl.anchormen.sql4es.model.QuerySource;
import nl.anchormen.sql4es.model.Utils;
import nl.anchormen.sql4es.model.expression.ColumnReference;
import nl.anchormen.sql4es.model.expression.ICalculation;
import nl.anchormen.sql4es.model.expression.IComparison;
import nl.anchormen.sql4es.model.expression.SimpleCalculation;
/**
* Interprets the parsed query and build the appropriate ES query (a {@link SearchRequestBuilder} instance).
* The other parses within this package are used to parse their speicific clause (WHERE, HAVING etc)
*
* @author cversloot
*
*/
public class QueryParser extends AstVisitor<ParseResult, Object>{
private final static SelectParser selectParser = new SelectParser();
private final static WhereParser whereParser = new WhereParser();
private final static HavingParser havingParser = new HavingParser();
private final static RelationParser relationParser = new RelationParser();
private final static GroupParser groupParser = new GroupParser();
private final static OrderByParser orderOarser = new OrderByParser();
private String sql;
private int maxRows = -1;
private Heading heading = new Heading();
private Properties props;
private Map<String, Map<String, Integer>> tableColumnInfo;
/**
* Builds the provided {@link SearchRequestBuilder} by parsing the {@link Query} using the properties provided.
* @param sql the original sql statement
* @param queryBody the Query parsed from the sql
// * @param searchReq the request to build
* @param props a set of properties to use in certain cases
* @param tableColumnInfo mapping from available tables to columns and their typesd
* @return an array containing [ {@link Heading}, {@link IComparison} having, List<{@link OrderBy}> orderings, Integer limit]
* @throws SQLException
*/
public ParseResult parse(String sql, QueryBody queryBody, int maxRows,
Properties props, Map<String, Map<String, Integer>> tableColumnInfo) throws SQLException{
this.sql = sql.replace("\r", " ").replace("\n", " ");// TODO: this removes linefeeds from string literals as well!
this.props = props;
this.maxRows = maxRows;
this.tableColumnInfo = tableColumnInfo;
if(queryBody instanceof QuerySpecification){
ParseResult result = queryBody.accept(this, null);
if(result.getException() != null) throw result.getException();
return result;
}
throw new SQLException("The provided query does not contain a QueryBody");
}
@Override
protected ParseResult visitQuerySpecification(QuerySpecification node, Object obj){
this.heading = new Heading();
BasicQueryState state = new BasicQueryState(sql, heading, props);
int limit = -1;
AggregationBuilder aggregation = null;
QueryBuilder query = null;
IComparison having = null;
List<OrderBy> orderings = new ArrayList<OrderBy>();
boolean useCache = false;
ParseResult subQuery = null;
// check for distinct in combination with group by
if(node.getSelect().isDistinct() && !node.getGroupBy().isEmpty()){
state.addException("Unable to combine DISTINCT and GROUP BY within a single query");
return new ParseResult(state.getException());
};
// get limit (possibly used by other parsers)
if(node.getLimit().isPresent()){
limit = Integer.parseInt(node.getLimit().get());
}
if(state.hasException()) return new ParseResult(state.getException());
// get sources to fetch data from
if(node.getFrom().isPresent()){
SourcesResult sr = getSources(node.getFrom().get(), state);
useCache = sr.useCache;
subQuery = sr.subQueryInfo;
}
// get columns to fetch (builds the header)
for(SelectItem si : node.getSelect().getSelectItems()){
si.accept(selectParser, state);
}
if(state.hasException()) return new ParseResult(state.getException());
boolean requestScore = heading.hasLabel("_score");
// Translate column references and their aliases back to their case sensitive forms
heading.reorderAndFixColumns(this.sql, "select.+", ".+from");
// create aggregation in case of DISTINCT
if(node.getSelect().isDistinct()){
aggregation = groupParser.addDistinctAggregation(state);
}
// add a Query
query = QueryBuilders.matchAllQuery();
if(node.getWhere().isPresent()){
query = whereParser.parse(node.getWhere().get(), state);
}
if(state.hasException()) return new ParseResult(state.getException());
// parse group by and create aggregations accordingly
if(node.getGroupBy() != null && node.getGroupBy().size() > 0){
aggregation = groupParser.parse(node.getGroupBy(), state);
}else if(heading.aggregateOnly()){
aggregation = groupParser.buildFilterAggregation(query, heading);
}
if(state.hasException()) return new ParseResult(state.getException());
// parse Having (is executed client side after results have been fetched)
if(node.getHaving().isPresent()){
having = node.getHaving().get().accept(havingParser, state);
}
// parse ORDER BY
if(!node.getOrderBy().isEmpty()){
for(SortItem si : node.getOrderBy()){
OrderBy ob = si.accept(orderOarser, state);
if(state.hasException()) return new ParseResult(state.getException());
orderings.add(ob);
}
}
if(state.hasException()) return new ParseResult(state.getException());
ParseResult result = new ParseResult(heading, state.getSources(), query, aggregation, having, orderings, limit, useCache, requestScore);
if(subQuery != null)try{
if(subQuery.getAggregation() == null && result.getAggregation() == null)
result = mergeSelectWithSelect(result, subQuery);
else if(subQuery.getAggregation() != null && result.getAggregation() == null)
result = mergeSelectWithAgg(result, subQuery);
else if(subQuery.getAggregation() == null && result.getAggregation() != null){
result = mergeAggWithSelect(result, subQuery);
if(result.getHeading().aggregateOnly()){
AggregationBuilder agg = groupParser.buildFilterAggregation(result.getQuery(), result.getHeading());
result.setAggregation(agg).setQuery(null);
}else{
BasicQueryState state2 = new BasicQueryState(sql, result.getHeading(), props);
AggregationBuilder agg = groupParser.addDistinctAggregation(state2);
result.setAggregation(agg);
}
}else throw new SQLException("Unable to merge the Sub query with the top query");
}catch(SQLException e){
return new ParseResult(e);
}
return result;
}
/**
* Gets the sources to query from the provided Relation. Parsed relations are put inside the state
* @param relation
* @param state
// * @param searchReq
* @return if the set with relations contains the query cache identifier
*/
private SourcesResult getSources(Relation relation, BasicQueryState state){
List<QuerySource> sources = relation.accept(relationParser, state);
boolean useCache = false;
ParseResult subQueryInfo = null;
if(state.hasException()) return new SourcesResult(false, null);
if(sources.size() < 1) {
state.addException("Specify atleast one valid table to execute the query on!");
return new SourcesResult(false, null);
}
for(int i=0; i<sources.size(); i++){
if(sources.get(i).getSource().toLowerCase().equals(props.getProperty(Utils.PROP_QUERY_CACHE_TABLE, "query_cache"))){
useCache = true;
sources.remove(i);
i--;
}else if(sources.get(i).isSubQuery()){
QuerySource qs = sources.get(i);
QueryParser subQueryParser = new QueryParser();
try {
subQueryInfo = subQueryParser.parse(qs.getSource(), qs.getQuery(), maxRows, props, tableColumnInfo);
} catch (SQLException e) {
state.addException("Unable to parse sub-query due to: "+e.getMessage());
}
//sources.remove(i);
//i--;
}
}
heading.setTypes(this.typesForColumns(sources));
state.setRelations(sources);
return new SourcesResult(useCache, subQueryInfo);
}
/**
* Merges two nested SELECT queries 'SELECT a as b from (select myfield as a FROM mytype)'
* @param top the top SELECT
* @param nested the nested SELECT
* @return a new ParseResult in which the two selects has been merged
* @throws SQLException in case this function is unable to marge the two queries
*/
private ParseResult mergeSelectWithSelect(ParseResult top, ParseResult nested) throws SQLException{
int limit = Math.min(top.getLimit(), nested.getLimit());
if(limit <= 0) limit = Math.max(top.getLimit(), nested.getLimit());
List<OrderBy> sorts = nested.getSorts();
sorts.addAll(top.getSorts());
QueryBuilder query = QueryBuilders.boolQuery().must(top.getQuery()).must(nested.getQuery());
boolean score = top.getRequestScore() || nested.getRequestScore();
boolean useCache = top.getUseCache() || nested.getUseCache();
Heading head = new Heading();
if(top.getHeading().hasAllCols()) head = nested.getHeading();
else{
for(Column col : top.getHeading().columns()){
Column col2 = nested.getHeading().getColumnByNameAndOp(col.getColumn(), Operation.NONE);
if(col2 == null) col2 = nested.getHeading().getColumnByLabel(col.getAlias());
if(col2 == null) throw new SQLException("Unable to determine column '"+col.getLabel()+"' within nested query");
String alias = (col.getAlias() == null ? col.getColumn() : col.getAlias());
head.add(new Column(col2.getColumn()).setAlias(alias).setSqlType(col2.getSqlType()));
}
}
return new ParseResult(head, nested.getSources(), query, null, null, sorts, limit, useCache, score);
}
/**
* Merges a top level SELECT query with its nested AGGREGATION
* @param top
* @param nested
* @return
* @throws SQLException
*/
private ParseResult mergeSelectWithAgg(ParseResult top, ParseResult nested) throws SQLException{
if(top.getRequestScore()) throw new SQLException("Unable to request a _score on an aggregation");
if(!(top.getQuery() instanceof MatchAllQueryBuilder)) throw new SQLException("Unable to combine a WHERE clause with a nested query");
int limit = Math.min(top.getLimit(), nested.getLimit());
if(limit <= 0) limit = Math.max(top.getLimit(), nested.getLimit());
List<OrderBy> sorts = nested.getSorts();
sorts.addAll(top.getSorts());
boolean useCache = top.getUseCache() || nested.getUseCache();
QueryBuilder aggQuery = nested.getQuery();
AggregationBuilder agg = nested.getAggregation();
IComparison having = nested.getHaving();
Heading head = new Heading();
if(top.getHeading().hasAllCols()) head = nested.getHeading();
else{
for(Column col : top.getHeading().columns()){
Column col2 = nested.getHeading().getColumnByNameAndOp(col.getColumn(), Operation.NONE);
if(col2 == null) col2 = nested.getHeading().getColumnByLabel(col.getAlias());
if(col2 == null) throw new SQLException("Unable to determine column '"+col+"' within nested query");
nested.getHeading().remove(col2);
head.add(new Column(col2.getColumn(), col2.getOp()).setAlias(col.getAlias())
.setSqlType(col2.getSqlType()));
}
for(Column col2 : nested.getHeading().columns()){
head.add(new Column(col2.getColumn(), col2.getOp())
.setAlias(col2.getAlias())
.setCalculation(col2.getCalculation()).setSqlType(col2.getSqlType())
.setTable(col2.getTable(), col2.getTableAlias()).setVisible(false)
);
}
}
head.buildIndex();
return new ParseResult(head, nested.getSources(), aggQuery, agg, having, sorts, limit, useCache, false);
}
/**
* Merges a top level aggregation query with an inner select
// * @param result
// * @param subQuery
* @return
* @throws SQLException
*/
private ParseResult mergeAggWithSelect(ParseResult top, ParseResult nested) throws SQLException {
if(nested.getRequestScore()) throw new SQLException("Unable to request a _score on an aggregation");
int limit = top.getLimit();
List<OrderBy> sorts = top.getSorts();
boolean useCache = top.getUseCache() || nested.getUseCache();
QueryBuilder query = top.getQuery();
if(query instanceof MatchAllQueryBuilder) query = nested.getQuery();
else if(!(nested.getQuery() instanceof MatchAllQueryBuilder)) query = QueryBuilders.boolQuery().must(top.getQuery()).must(nested.getQuery());
AggregationBuilder agg = top.getAggregation();
IComparison having = top.getHaving();
Heading head = new Heading();
if(nested.getHeading().hasAllCols()){
head = top.getHeading();
}else{
for(Column col : top.getHeading().columns()){
if(col.hasCalculation()){
translateCalculation(col.getCalculation(), nested.getHeading());
head.add(new Column(col.getColumn(), col.getOp()).setAlias(col.getAlias())
.setCalculation(col.getCalculation()).setSqlType(Types.FLOAT));
}else{
Column col2 = nested.getHeading().getColumnByNameAndOp(col.getColumn(), Operation.NONE);
if(col2 == null) col2 = nested.getHeading().getColumnByLabel(col.getAlias());
if(col2 == null && col.getOp() == Operation.COUNT){
head.add(col);
continue;
}else if(col2 == null) throw new SQLException("Unable to determine column '"+col.getLabel()+"' within nested query");
String alias = (col.getAlias() == null ? col.getColumn() : col.getAlias());
head.add(new Column(col2.getColumn(), col.getOp()).setAlias(alias).setVisible(col.isVisible())
.setSqlType(col2.getSqlType()));
}
}
}
head.buildIndex();
return new ParseResult(head, nested.getSources(), query, agg, having, sorts, limit, useCache, false);
}
/**
* Traverses a {@link ICalculation} tree to fix column references pointing to a nested query
* @param calc
// * @param top
* @param nested
* @throws SQLException
*/
private void translateCalculation(ICalculation calc, Heading nested) throws SQLException{
if(calc instanceof ColumnReference){
Column col = ((ColumnReference)calc).getColumn();
Column col2 = nested.getColumnByNameAndOp(col.getColumn(), Operation.NONE);
if(col2 == null) col2 = nested.getColumnByLabel(col.getAlias());
if(col2 != null){
col.setColumn(col2.getColumn());
}
}else if(calc instanceof SimpleCalculation){
SimpleCalculation sc = (SimpleCalculation)calc;
translateCalculation(sc.left(), nested);
translateCalculation(sc.right(), nested);
}
}
/**
* Gets SQL column types for the provided tables as a map from colname to java.sql.Types
// * @param tables
* @return
*/
public Map<String, Integer> typesForColumns(List<QuerySource> relations){
HashMap<String, Integer> colType = new HashMap<String, Integer>();
colType.put(Heading.ID, Types.VARCHAR);
colType.put(Heading.TYPE, Types.VARCHAR);
colType.put(Heading.INDEX, Types.VARCHAR);
for(QuerySource table : relations){
if(!tableColumnInfo.containsKey(table.getSource())) continue;
colType.putAll( tableColumnInfo.get(table.getSource()) );
}
return colType;
}
private class SourcesResult {
public boolean useCache;
public ParseResult subQueryInfo;
public SourcesResult(boolean useCache, ParseResult subQueryInfo){
this.useCache = useCache;
this.subQueryInfo = subQueryInfo;
}
}
}
| {
"pile_set_name": "Github"
} |
var expect = require('chai').expect;
var util = require('../lib/client-utils.js');
var testUtils = require('../lib/util.js');
var SyncMessage = require('../../lib/syncmessage.js');
var MakeDrive = require('../../client/src');
var Filer = require('../../lib/filer.js');
var syncTypes = require('../../lib/constants.js').syncTypes;
var diffHelper = require('../../lib/diff.js');
var FAKE_DATA = 'FAKE DATA';
function validateSocketMessage(message, expectedMessage, checkExists) {
message = util.decodeSocketMessage(message);
checkExists = checkExists || [];
expect(message.type).to.equal(expectedMessage.type);
expect(message.name).to.equal(expectedMessage.name);
if(!expectedMessage.content) {
expect(message.content).not.to.exist;
return;
}
expect(message.content).to.exist;
if(typeof message.content !== 'object') {
expect(message.content).to.deep.equal(expectedMessage.content);
return;
}
Object.keys(expectedMessage.content).forEach(function(key) {
if(checkExists.indexOf(key) !== -1) {
expect(message.content[key]).to.exist;
} else {
expect(message.content[key]).to.deep.equal(expectedMessage.content[key]);
}
});
}
function incorrectEvent() {
expect(true, '[Incorrect sync event emitted]').to.be.false;
}
describe('The Client', function() {
var SocketServer;
after(function(done) {
util.close(done);
});
describe('Socket protocol', function() {
var fs;
var sync;
beforeEach(function(done) {
util.run(function(server) {
SocketServer = server;
fs = MakeDrive.fs({forceCreate: true, manual: true, provider: new Filer.FileSystem.providers.Memory(testUtils.username())});
sync = fs.sync;
done();
});
});
afterEach(function(done){
testUtils.disconnectClient(sync, function(err) {
if(err) throw err;
sync = null;
fs = null;
done();
});
});
it('should emit a sync error if authentication fails', function(done) {
SocketServer.once('connection', function(client) {
client.once('message', function(message) {
var message = SyncMessage.error.format;
message.content = {error: 'Unable to parse/handle message, invalid message format.'};
client.send(message.stringify());
});
});
sync.once('connected', incorrectEvent);
sync.once('error', function(err) {
expect(err).to.exist;
expect(err.message).to.equal('Cannot handle message');
done();
});
sync.connect(util.socketURL, 'This is not a token');
});
it('should send emit a connected event on successfully authenticating with the server', function(done) {
SocketServer.once('connection', function(client) {
client.once('message', function() {
client.send(SyncMessage.response.authz.stringify());
});
});
sync.once('connected', function(url) {
expect(url).to.equal(util.socketURL);
done();
});
sync.once('disconnected', incorrectEvent);
sync.once('error', incorrectEvent);
sync.connect(util.socketURL, 'This is a valid token');
});
});
describe('Downstream syncs', function() {
var fs;
var sync;
beforeEach(function(done) {
util.run(function(server) {
SocketServer = server;
fs = MakeDrive.fs({forceCreate: true, manual: true, provider: new Filer.FileSystem.providers.Memory(testUtils.username())});
sync = fs.sync;
done();
});
});
afterEach(function(done){
testUtils.disconnectClient(sync, function(err) {
if(err) throw err;
sync = null;
fs = null;
done();
});
});
it('should send a "RESPONSE" of "AUTHORIZED" which triggers an initial downstream sync', function(done) {
util.authenticateAndRun(sync, function(client, message) {
validateSocketMessage(message, SyncMessage.response.authz);
done();
});
});
it('should send a "REQUEST" for "DIFFS" containing checksums when requested for checksums for a path under the sync root', function(done) {
var file = {path: '/file', content: 'This is a file'};
var checksumRequest = SyncMessage.request.checksums;
checksumRequest.content = {path: file.path, type: syncTypes.CREATE, sourceList: testUtils.generateSourceList([file])};
util.authenticateAndRun(sync, function(client) {
client.once('message', function(message) {
var expectedMessage = SyncMessage.request.diffs;
expectedMessage.content = {path: file.path, type: syncTypes.CREATE, checksums: FAKE_DATA};
validateSocketMessage(message, expectedMessage, ['checksums']);
done();
});
client.send(checksumRequest.stringify());
});
});
it('should send a "RESPONSE" of "ROOT" when requested for checksums for a path not under the sync root', function(done) {
var file = {path: '/file', content: 'This is a file'};
var checksumRequest = SyncMessage.request.checksums;
checksumRequest.content = {path: file.path, type: syncTypes.CREATE, sourceList: testUtils.generateSourceList([file])};
fs.mkdir('/dir', function(err) {
if(err) throw err;
fs.setRoot('/dir', function(err) {
if(err) throw err;
util.authenticateAndRun(sync, function(client) {
client.once('message', function(message) {
var expectedMessage = SyncMessage.response.root;
expectedMessage.content = {path: file.path, type: syncTypes.CREATE};
validateSocketMessage(message, expectedMessage);
done();
});
client.send(checksumRequest.stringify());
});
});
});
});
it('should patch the file being synced and send a "RESPONSE" of "PATCH" if the file was not changed during the sync', function(done) {
var file = {path: '/file', content: 'This is a file'};
var diffResponse = SyncMessage.response.diffs;
diffResponse.content = {path: file.path, type: syncTypes.CREATE, diffs: diffHelper.serialize(testUtils.generateDiffs([file]))};
var layout = {};
layout[file.path] = file.content;
util.authenticateAndRun(sync, function(client) {
client.once('message', function(message) {
var expectedMessage = SyncMessage.response.patch;
expectedMessage.content = {path: file.path, type: syncTypes.CREATE, checksum: testUtils.generateValidationChecksums([file])};
validateSocketMessage(message, expectedMessage);
testUtils.ensureFilesystem(fs, layout, function(err) {
expect(err).not.to.exist;
done();
});
});
client.send(diffResponse.stringify());
});
});
it('should not patch the file being synced and send a "REQUEST" for "DIFFS" with checksums if the file was changed during the sync', function(done) {
var file = {path: '/file', content: 'This is a file'};
var checksumRequest = SyncMessage.request.checksums;
checksumRequest.content = {path: file.path, type: syncTypes.CREATE, sourceList: testUtils.generateSourceList([file])};
var diffResponse = SyncMessage.response.diffs;
diffResponse.content = {path: file.path, type: syncTypes.CREATE, diffs: diffHelper.serialize(testUtils.generateDiffs([file]))};
var layout = {};
layout[file.path] = 'This file was changed';
util.authenticateAndRun(sync, function(client) {
client.once('message', function() {
fs.writeFile(file.path, layout[file.path], function(err) {
if(err) throw err;
client.once('message', function(message) {
var expectedMessage = SyncMessage.request.diffs;
expectedMessage.content = {path: file.path, type: syncTypes.CREATE, checksums: FAKE_DATA};
validateSocketMessage(message, expectedMessage, ['checksums']);
testUtils.ensureFilesystem(fs, layout, function(err) {
expect(err).not.to.exist;
done();
});
});
client.send(diffResponse.stringify());
});
});
client.send(checksumRequest.stringify());
});
});
it('should emit a completed event on completing a downstream sync', function(done) {
var file = {path: '/file', content: 'This is a file'};
var verificationResponse = SyncMessage.response.verification;
verificationResponse.content = {path: file.path};
util.authenticateAndRun(sync, function(client) {
sync.once('completed', function(path) {
expect(path).to.equal(file.path);
done();
});
client.send(verificationResponse.stringify());
});
});
});
describe('Upstream syncs', function() {
var fs;
var sync;
beforeEach(function(done) {
util.run(function(server) {
SocketServer = server;
fs = MakeDrive.fs({forceCreate: true, manual: true, provider: new Filer.FileSystem.providers.Memory(testUtils.username())});
sync = fs.sync;
done();
});
});
afterEach(function(done){
testUtils.disconnectClient(sync, function(err) {
if(err) throw err;
sync = null;
fs = null;
done();
});
});
it('should send a "REQUEST" for "SYNC" if a sync is requested and there are changes to the filesystem', function(done) {
var file = {path: '/file', content: 'This is a file'};
util.authenticateAndRun(sync, function(client) {
fs.writeFile(file.path, file.content, function(err) {
if(err) throw err;
client.once('message', function(message) {
var expectedMessage = SyncMessage.request.sync;
expectedMessage.content = {path: file.path, type: syncTypes.CREATE};
validateSocketMessage(message, expectedMessage);
done();
});
sync.request();
});
});
});
it('should emit an interrupted and syncing event when an upstream sync is requested for a file that has not been downstreamed', function(done) {
var file = {path: '/file', content: 'This is a file'};
var downstreamError = SyncMessage.error.needsDownstream;
downstreamError.content = {path: file.path, type: syncTypes.CREATE};
var checksumRequest = SyncMessage.request.checksums;
checksumRequest.content = {path: file.path, type: syncTypes.CREATE, sourceList: testUtils.generateSourceList([file])};
var errorEventEmitted = false;
var assertionsCompleted = false;
function endTest() {
if(assertionsCompleted) {
done();
} else {
assertionsCompleted = true;
}
}
util.authenticateAndRun(sync, function(client) {
fs.writeFile(file.path, file.content, function(err) {
if(err) throw err;
client.once('message', function() {
sync.once('error', function(err) {
errorEventEmitted = true;
client.once('message', endTest);
expect(err).to.exist;
expect(err.message).to.equal('Sync interrupted for path ' + file.path);
client.send(checksumRequest.stringify());
});
sync.once('syncing', function(message) {
expect(message).to.equal('Sync started for ' + file.path);
expect(errorEventEmitted).to.be.true;
endTest();
});
client.send(downstreamError.stringify());
});
sync.request();
});
});
});
it('should trigger a syncing event and send a "REQUEST" for "CHECKSUMS" when the request to sync has been approved', function(done) {
var file = {path: '/file', content: 'This is a file'};
var syncResponse = SyncMessage.response.sync;
syncResponse.content = {path: file.path, type: syncTypes.CREATE};
util.authenticateAndRun(sync, function(client) {
fs.writeFile(file.path, file.content, function(err) {
if(err) throw err;
client.once('message', function(message) {
var expectedMessage = SyncMessage.request.checksums;
expectedMessage.content = {path: file.path, type: syncTypes.CREATE, sourceList: FAKE_DATA};
validateSocketMessage(message, expectedMessage, ['sourceList']);
done();
});
sync.once('syncing', function(message) {
expect(message).to.equal('Sync started for ' + file.path);
});
client.send(syncResponse.stringify());
});
});
});
it('should send a "RESPONSE" of "DIFFS" when requested for diffs', function(done) {
var file = {path: '/file', content: 'This is a file'};
var diffRequest = SyncMessage.request.diffs;
diffRequest.content = {path: file.path, type: syncTypes.CREATE, checksums: testUtils.generateChecksums([file])};
util.authenticateAndRun(sync, function(client) {
fs.writeFile(file.path, file.content, function(err) {
if(err) throw err;
client.once('message', function(message) {
var expectedMessage = SyncMessage.response.diffs;
expectedMessage.content = {path: file.path, type: syncTypes.CREATE, diffs: FAKE_DATA};
validateSocketMessage(message, expectedMessage, ['diffs']);
done();
});
client.send(diffRequest.stringify());
});
});
});
it('should emit a completed and synced event when all upstream syncs are completed', function(done) {
var file = {path: '/file', content: 'This is a file'};
var patchResponse = SyncMessage.response.patch;
patchResponse.content = {path: file.path, type: syncTypes.CREATE};
var completedEventEmitted = false;
util.authenticateAndRun(sync, function(client) {
fs.writeFile(file.path, file.content, function(err) {
if(err) throw err;
sync.once('synced', function() {
expect(completedEventEmitted).to.be.true;
done();
});
sync.once('error', incorrectEvent);
sync.once('completed', function(path) {
expect(path).to.equal(file.path);
completedEventEmitted = true;
});
client.send(patchResponse.stringify());
});
});
});
it('should automatically trigger the next upstream sync in the queue once an upstream sync finishes', function(done) {
var file = {path: '/file', content: 'This is a file'};
var file2 = {path: '/file2', content: 'This is another file'};
var patchResponse = SyncMessage.response.patch;
patchResponse.content = {path: file.path, type: syncTypes.CREATE};
util.authenticateAndRun(sync, function(client) {
fs.writeFile(file.path, file.content, function(err) {
if(err) throw err;
fs.writeFile(file2.path, file2.content, function(err) {
if(err) throw err;
client.once('message', function(message) {
var expectedMessage = SyncMessage.request.sync;
expectedMessage.content = {path: file2.path, type: syncTypes.CREATE};
validateSocketMessage(message, expectedMessage);
done();
});
client.send(patchResponse.stringify());
});
});
});
});
it('should emit an error event when a sync is requested while another upstream sync is occurring', function(done) {
var file = {path: '/file', content: 'This is a file'};
util.authenticateAndRun(sync, function(client) {
fs.writeFile(file.path, file.content, function(err) {
if(err) throw err;
sync.once('error', function(err) {
expect(err).to.exist;
expect(err.message).to.equal('Sync currently underway');
done();
});
client.once('message', function() {
sync.request();
});
sync.request();
});
});
});
});
});
| {
"pile_set_name": "Github"
} |
const IS_TEST = !!process.env.VUE_LOADER_TEST
const fs = require('fs')
const path = require('path')
exports.lib = file => path.resolve(__dirname, '../', file)
exports.dep = dep => {
if (IS_TEST) {
return dep
} else if (
fs.existsSync(path.resolve(__dirname, '../../node_modules', dep))
) {
// npm 2 or npm linked
return 'vue-loader/node_modules/' + dep
} else {
// npm 3
return dep
}
}
| {
"pile_set_name": "Github"
} |
module.exports = // TODO support filter tags like :javascript, support inline HTML
function(hljs) {
return {
case_insensitive: true,
contains: [
{
className: 'meta',
begin: '^!!!( (5|1\\.1|Strict|Frameset|Basic|Mobile|RDFa|XML\\b.*))?$',
relevance: 10
},
// FIXME these comments should be allowed to span indented lines
hljs.COMMENT(
'^\\s*(!=#|=#|-#|/).*$',
false,
{
relevance: 0
}
),
{
begin: '^\\s*(-|=|!=)(?!#)',
starts: {
end: '\\n',
subLanguage: 'ruby'
}
},
{
className: 'tag',
begin: '^\\s*%',
contains: [
{
className: 'selector-tag',
begin: '\\w+'
},
{
className: 'selector-id',
begin: '#[\\w-]+'
},
{
className: 'selector-class',
begin: '\\.[\\w-]+'
},
{
begin: '{\\s*',
end: '\\s*}',
contains: [
{
begin: ':\\w+\\s*=>',
end: ',\\s+',
returnBegin: true,
endsWithParent: true,
contains: [
{
className: 'attr',
begin: ':\\w+'
},
hljs.APOS_STRING_MODE,
hljs.QUOTE_STRING_MODE,
{
begin: '\\w+',
relevance: 0
}
]
}
]
},
{
begin: '\\(\\s*',
end: '\\s*\\)',
excludeEnd: true,
contains: [
{
begin: '\\w+\\s*=',
end: '\\s+',
returnBegin: true,
endsWithParent: true,
contains: [
{
className: 'attr',
begin: '\\w+',
relevance: 0
},
hljs.APOS_STRING_MODE,
hljs.QUOTE_STRING_MODE,
{
begin: '\\w+',
relevance: 0
}
]
}
]
}
]
},
{
begin: '^\\s*[=~]\\s*'
},
{
begin: '#{',
starts: {
end: '}',
subLanguage: 'ruby'
}
}
]
};
}; | {
"pile_set_name": "Github"
} |
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#ifndef __nsAutoCompleteSimpleResult__
#define __nsAutoCompleteSimpleResult__
#include "nsIAutoCompleteResult.h"
#include "nsIAutoCompleteSimpleResult.h"
#include "nsString.h"
#include "nsCOMPtr.h"
#include "nsTArray.h"
#include "mozilla/Attributes.h"
class nsAutoCompleteSimpleResult MOZ_FINAL : public nsIAutoCompleteSimpleResult
{
public:
nsAutoCompleteSimpleResult();
inline void CheckInvariants() {
NS_ASSERTION(mValues.Length() == mComments.Length(), "Arrays out of sync");
NS_ASSERTION(mValues.Length() == mImages.Length(), "Arrays out of sync");
NS_ASSERTION(mValues.Length() == mStyles.Length(), "Arrays out of sync");
NS_ASSERTION(mValues.Length() == mFinalCompleteValues.Length(), "Arrays out of sync");
}
NS_DECL_ISUPPORTS
NS_DECL_NSIAUTOCOMPLETERESULT
NS_DECL_NSIAUTOCOMPLETESIMPLERESULT
private:
~nsAutoCompleteSimpleResult() {}
protected:
// What we really want is an array of structs with value/comment/image/style contents.
// But then we'd either have to use COM or manage object lifetimes ourselves.
// Having four arrays of string simplifies this, but is stupid.
nsTArray<nsString> mValues;
nsTArray<nsString> mComments;
nsTArray<nsString> mImages;
nsTArray<nsString> mStyles;
nsTArray<nsString> mFinalCompleteValues;
nsString mSearchString;
nsString mErrorDescription;
int32_t mDefaultIndex;
uint32_t mSearchResult;
bool mTypeAheadResult;
nsCOMPtr<nsIAutoCompleteSimpleResultListener> mListener;
};
#endif // __nsAutoCompleteSimpleResult__
| {
"pile_set_name": "Github"
} |
<?xml version="1.0" encoding="utf-8"?>
<!--
/*
**
** Copyright 2010, The Android Open Source Project
**
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
**
** http://www.apache.org/licenses/LICENSE-2.0
**
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*/
-->
<merge
xmlns:latin="http://schemas.android.com/apk/res-auto"
>
<Row
latin:keyWidth="10%p"
>
<Key
latin:keyStyle="toSymbolKeyStyle"
latin:keyWidth="15%p" />
<include
latin:keyboardLayout="@xml/key_comma" />
<include
latin:keyXPos="25%p"
latin:keyboardLayout="@xml/key_space_5kw" />
<include
latin:keyboardLayout="@xml/key_period" />
<Key
latin:keyStyle="enterKeyStyle"
latin:keyWidth="fillRight" />
</Row>
</merge>
| {
"pile_set_name": "Github"
} |
package archive
import (
"context"
"fmt"
"io/ioutil"
"os"
"strings"
"github.com/containers/image/v5/directory/explicitfilepath"
"github.com/containers/image/v5/docker/reference"
"github.com/containers/image/v5/image"
"github.com/containers/image/v5/internal/tmpdir"
"github.com/containers/image/v5/oci/internal"
ocilayout "github.com/containers/image/v5/oci/layout"
"github.com/containers/image/v5/transports"
"github.com/containers/image/v5/types"
"github.com/containers/storage/pkg/archive"
"github.com/pkg/errors"
)
func init() {
transports.Register(Transport)
}
// Transport is an ImageTransport for OCI archive
// it creates an oci-archive tar file by calling into the OCI transport
// tarring the directory created by oci and deleting the directory
var Transport = ociArchiveTransport{}
type ociArchiveTransport struct{}
// ociArchiveReference is an ImageReference for OCI Archive paths
type ociArchiveReference struct {
file string
resolvedFile string
image string
}
func (t ociArchiveTransport) Name() string {
return "oci-archive"
}
// ParseReference converts a string, which should not start with the ImageTransport.Name prefix
// into an ImageReference.
func (t ociArchiveTransport) ParseReference(reference string) (types.ImageReference, error) {
return ParseReference(reference)
}
// ValidatePolicyConfigurationScope checks that scope is a valid name for a signature.PolicyTransportScopes keys
func (t ociArchiveTransport) ValidatePolicyConfigurationScope(scope string) error {
return internal.ValidateScope(scope)
}
// ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an OCI ImageReference.
func ParseReference(reference string) (types.ImageReference, error) {
file, image := internal.SplitPathAndImage(reference)
return NewReference(file, image)
}
// NewReference returns an OCI reference for a file and a image.
func NewReference(file, image string) (types.ImageReference, error) {
resolved, err := explicitfilepath.ResolvePathToFullyExplicit(file)
if err != nil {
return nil, err
}
if err := internal.ValidateOCIPath(file); err != nil {
return nil, err
}
if err := internal.ValidateImageName(image); err != nil {
return nil, err
}
return ociArchiveReference{file: file, resolvedFile: resolved, image: image}, nil
}
func (ref ociArchiveReference) Transport() types.ImageTransport {
return Transport
}
// StringWithinTransport returns a string representation of the reference, which MUST be such that
// reference.Transport().ParseReference(reference.StringWithinTransport()) returns an equivalent reference.
func (ref ociArchiveReference) StringWithinTransport() string {
return fmt.Sprintf("%s:%s", ref.file, ref.image)
}
// DockerReference returns a Docker reference associated with this reference
func (ref ociArchiveReference) DockerReference() reference.Named {
return nil
}
// PolicyConfigurationIdentity returns a string representation of the reference, suitable for policy lookup.
func (ref ociArchiveReference) PolicyConfigurationIdentity() string {
// NOTE: ref.image is not a part of the image identity, because "$dir:$someimage" and "$dir:" may mean the
// same image and the two can’t be statically disambiguated. Using at least the repository directory is
// less granular but hopefully still useful.
return ref.resolvedFile
}
// PolicyConfigurationNamespaces returns a list of other policy configuration namespaces to search
// for if explicit configuration for PolicyConfigurationIdentity() is not set
func (ref ociArchiveReference) PolicyConfigurationNamespaces() []string {
res := []string{}
path := ref.resolvedFile
for {
lastSlash := strings.LastIndex(path, "/")
// Note that we do not include "/"; it is redundant with the default "" global default,
// and rejected by ociTransport.ValidatePolicyConfigurationScope above.
if lastSlash == -1 || path == "/" {
break
}
res = append(res, path)
path = path[:lastSlash]
}
return res
}
// NewImage returns a types.ImageCloser for this reference, possibly specialized for this ImageTransport.
// The caller must call .Close() on the returned ImageCloser.
// NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource,
// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage.
// WARNING: This may not do the right thing for a manifest list, see image.FromSource for details.
func (ref ociArchiveReference) NewImage(ctx context.Context, sys *types.SystemContext) (types.ImageCloser, error) {
src, err := newImageSource(ctx, sys, ref)
if err != nil {
return nil, err
}
return image.FromSource(ctx, sys, src)
}
// NewImageSource returns a types.ImageSource for this reference.
// The caller must call .Close() on the returned ImageSource.
func (ref ociArchiveReference) NewImageSource(ctx context.Context, sys *types.SystemContext) (types.ImageSource, error) {
return newImageSource(ctx, sys, ref)
}
// NewImageDestination returns a types.ImageDestination for this reference.
// The caller must call .Close() on the returned ImageDestination.
func (ref ociArchiveReference) NewImageDestination(ctx context.Context, sys *types.SystemContext) (types.ImageDestination, error) {
return newImageDestination(ctx, sys, ref)
}
// DeleteImage deletes the named image from the registry, if supported.
func (ref ociArchiveReference) DeleteImage(ctx context.Context, sys *types.SystemContext) error {
return errors.Errorf("Deleting images not implemented for oci: images")
}
// struct to store the ociReference and temporary directory returned by createOCIRef
type tempDirOCIRef struct {
tempDirectory string
ociRefExtracted types.ImageReference
}
// deletes the temporary directory created
func (t *tempDirOCIRef) deleteTempDir() error {
return os.RemoveAll(t.tempDirectory)
}
// createOCIRef creates the oci reference of the image
// If SystemContext.BigFilesTemporaryDir not "", overrides the temporary directory to use for storing big files
func createOCIRef(sys *types.SystemContext, image string) (tempDirOCIRef, error) {
dir, err := ioutil.TempDir(tmpdir.TemporaryDirectoryForBigFiles(sys), "oci")
if err != nil {
return tempDirOCIRef{}, errors.Wrapf(err, "error creating temp directory")
}
ociRef, err := ocilayout.NewReference(dir, image)
if err != nil {
return tempDirOCIRef{}, err
}
tempDirRef := tempDirOCIRef{tempDirectory: dir, ociRefExtracted: ociRef}
return tempDirRef, nil
}
// creates the temporary directory and copies the tarred content to it
func createUntarTempDir(sys *types.SystemContext, ref ociArchiveReference) (tempDirOCIRef, error) {
tempDirRef, err := createOCIRef(sys, ref.image)
if err != nil {
return tempDirOCIRef{}, errors.Wrap(err, "error creating oci reference")
}
src := ref.resolvedFile
dst := tempDirRef.tempDirectory
// TODO: This can take quite some time, and should ideally be cancellable using a context.Context.
if err := archive.UntarPath(src, dst); err != nil {
if err := tempDirRef.deleteTempDir(); err != nil {
return tempDirOCIRef{}, errors.Wrapf(err, "error deleting temp directory %q", tempDirRef.tempDirectory)
}
return tempDirOCIRef{}, errors.Wrapf(err, "error untarring file %q", tempDirRef.tempDirectory)
}
return tempDirRef, nil
}
| {
"pile_set_name": "Github"
} |
UploadCollection
================
.. currentmodule:: oci.log_analytics.models
.. autoclass:: UploadCollection
:show-inheritance:
:special-members: __init__
:members:
:undoc-members:
:inherited-members: | {
"pile_set_name": "Github"
} |
# 树的子结构
## 题目
[牛客网](https://www.nowcoder.com/practice/6e196c44c7004d15b1610b9afca8bd88?tpId=13&tqId=11170&rp=1&ru=%2Fta%2Fcoding-interviews&qru=%2Fta%2Fcoding-interviews%2Fquestion-ranking&tPage=1)
输入两棵二叉树A,B,判断B是不是A的子结构。(ps:我们约定空树不是任意一个树的子结构)
## 解题思路
1. 遍历查找相等根节点
2. 通过递归查找当前根节点下是否包含子树 root2
```
public boolean HasSubtree(TreeNode root1, TreeNode root2) {
if (root2 == null) {
return false;
}
LinkedList<TreeNode> pipeline = new LinkedList<>();
pipeline.addLast(root1);
while (!pipeline.isEmpty()) {
TreeNode node = pipeline.pop();
if (node == null) {
continue;
}
pipeline.addLast(node.left);
pipeline.addLast(node.right);
if (node.val == root2.val && isSub(node, root2)) {
return true;
}
}
return false;
}
private boolean isSub(TreeNode root1, TreeNode root2) {
if (root1 == null && root2 == null) {
return true;
}
if (root1 == null) {
return false;
}
if (root2 == null) {
return true;
}
if (root1.val == root2.val) {
return isSub(root1.left, root2.left) && isSub(root1.right, root2.right);
} else {
return false;
}
}
```
| {
"pile_set_name": "Github"
} |
package gg.rsmod.game.sync.segment
import gg.rsmod.game.sync.SynchronizationSegment
import gg.rsmod.net.packet.GamePacketBuilder
/**
* @author Tom <[email protected]>
*/
class RemoveLocalPlayerSegment(private val updateTileHash: Boolean) : SynchronizationSegment {
override fun encode(buf: GamePacketBuilder) {
/*
* Signal to the client that the player needs to be decoded.
*/
buf.putBits(1, 1)
/*
* Signal to the client that the player does not require
* [gg.rsmod.game.sync.block.UpdateBlockType] decoding.
*/
buf.putBits(1, 0)
/*
* Signal to the client that the player needs to be removed.
*/
buf.putBits(2, 0)
/*
* Signal to the client that the player's location does not need to be
* decoded.
*/
buf.putBits(1, if (updateTileHash) 1 else 0)
}
} | {
"pile_set_name": "Github"
} |
'\" t
.\" Title: macro::json_output_string_value
.\" Author:
.\" Generator: DocBook XSL Stylesheets v1.78.1 <http://docbook.sf.net/>
.\" Date: March 2016
.\" Manual: JSON Tapset
.\" Source: SystemTap Tapset Reference
.\" Language: English
.\"
.TH "MACRO::JSON_OUTPUT_S" "3stap" "March 2016" "SystemTap Tapset Reference" "JSON Tapset"
.\" -----------------------------------------------------------------
.\" * Define some portability stuff
.\" -----------------------------------------------------------------
.\" ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.\" http://bugs.debian.org/507673
.\" http://lists.gnu.org/archive/html/groff/2009-02/msg00013.html
.\" ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.ie \n(.g .ds Aq \(aq
.el .ds Aq '
.\" -----------------------------------------------------------------
.\" * set default formatting
.\" -----------------------------------------------------------------
.\" disable hyphenation
.nh
.\" disable justification (adjust text to left margin only)
.ad l
.\" -----------------------------------------------------------------
.\" * MAIN CONTENT STARTS HERE *
.\" -----------------------------------------------------------------
.SH "NAME"
macro::json_output_string_value \- Output a string value\&.
.SH "SYNOPSIS"
.sp
.nf
@json_output_string_value(name,value)
.fi
.SH "ARGUMENTS"
.PP
\fIname\fR
.RS 4
The name of the string metric\&.
.RE
.PP
\fIvalue\fR
.RS 4
The string value to output\&.
.RE
.SH "DESCRIPTION"
.PP
The json_output_string_value macro is designed to be called from the \*(Aqjson_data\*(Aq probe in the user\*(Aqs script to output a metric\*(Aqs string value\&. This metric should have been added with
\fBjson_add_string_metric\fR\&.
.SH SEE ALSO\n
.IR tapset::json (3stap)
| {
"pile_set_name": "Github"
} |
/*
* Copyright (c) 2000-2003,2005 Silicon Graphics, Inc.
* All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it would be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef __XFS_IOPS_H__
#define __XFS_IOPS_H__
struct xfs_inode;
extern const struct file_operations xfs_file_operations;
extern const struct file_operations xfs_dir_file_operations;
extern ssize_t xfs_vn_listxattr(struct dentry *, char *data, size_t size);
/*
* Internal setattr interfaces.
*/
#define XFS_ATTR_NOACL 0x01 /* Don't call posix_acl_chmod */
extern void xfs_setattr_time(struct xfs_inode *ip, struct iattr *iattr);
extern int xfs_setattr_nonsize(struct xfs_inode *ip, struct iattr *vap,
int flags);
extern int xfs_setattr_size(struct xfs_inode *ip, struct iattr *vap);
#endif /* __XFS_IOPS_H__ */
| {
"pile_set_name": "Github"
} |
; RUN: opt < %s -newgvn | llvm-dis
target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128"
target triple = "i386-apple-darwin7"
define i32 @main(i32 %argc, i8** %argv) nounwind {
entry:
br label %bb84
bb41: ; preds = %bb82
%tmp = load i8, i8* %opt.0, align 1 ; <i8> [#uses=0]
%tmp1 = getelementptr i8, i8* %opt.0, i32 1 ; <i8*> [#uses=2]
switch i32 0, label %bb81 [
i32 102, label %bb82
i32 110, label %bb79
i32 118, label %bb80
]
bb79: ; preds = %bb41
br label %bb82
bb80: ; preds = %bb41
ret i32 0
bb81: ; preds = %bb41
ret i32 1
bb82: ; preds = %bb84, %bb79, %bb41
%opt.0 = phi i8* [ %tmp3, %bb84 ], [ %tmp1, %bb79 ], [ %tmp1, %bb41 ] ; <i8*> [#uses=3]
%tmp2 = load i8, i8* %opt.0, align 1 ; <i8> [#uses=0]
br i1 false, label %bb84, label %bb41
bb84: ; preds = %bb82, %entry
%tmp3 = getelementptr i8, i8* null, i32 1 ; <i8*> [#uses=1]
br label %bb82
}
| {
"pile_set_name": "Github"
} |
context
=======
[](https://travis-ci.org/gorilla/context)
gorilla/context is a general purpose registry for global request variables.
> Note: gorilla/context, having been born well before `context.Context` existed, does not play well
> with the shallow copying of the request that [`http.Request.WithContext`](https://golang.org/pkg/net/http/#Request.WithContext) (added to net/http Go 1.7 onwards) performs. You should either use *just* gorilla/context, or moving forward, the new `http.Request.Context()`.
Read the full documentation here: http://www.gorillatoolkit.org/pkg/context
| {
"pile_set_name": "Github"
} |
This directory contains Cython files.
| {
"pile_set_name": "Github"
} |
//===- unittests/Analysis/CloneDetectionTest.cpp - Clone detection tests --===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#include "clang/AST/RecursiveASTVisitor.h"
#include "clang/Analysis/CloneDetection.h"
#include "clang/Tooling/Tooling.h"
#include "gtest/gtest.h"
namespace clang {
namespace analysis {
namespace {
class CloneDetectionVisitor
: public RecursiveASTVisitor<CloneDetectionVisitor> {
CloneDetector &Detector;
public:
explicit CloneDetectionVisitor(CloneDetector &D) : Detector(D) {}
bool VisitFunctionDecl(FunctionDecl *D) {
Detector.analyzeCodeBody(D);
return true;
}
};
/// Example constraint for testing purposes.
/// Filters out all statements that are in a function which name starts with
/// "bar".
class NoBarFunctionConstraint {
public:
void constrain(std::vector<CloneDetector::CloneGroup> &CloneGroups) {
CloneConstraint::splitCloneGroups(
CloneGroups, [](const StmtSequence &A, const StmtSequence &B) {
// Check if one of the sequences is in a function which name starts
// with "bar".
for (const StmtSequence &Arg : {A, B}) {
if (const auto *D =
dyn_cast<const FunctionDecl>(Arg.getContainingDecl())) {
if (D->getNameAsString().find("bar") == 0)
return false;
}
}
return true;
});
}
};
TEST(CloneDetector, FilterFunctionsByName) {
auto ASTUnit =
clang::tooling::buildASTFromCode("void foo1(int &a1) { a1++; }\n"
"void foo2(int &a2) { a2++; }\n"
"void bar1(int &a3) { a3++; }\n"
"void bar2(int &a4) { a4++; }\n");
auto TU = ASTUnit->getASTContext().getTranslationUnitDecl();
CloneDetector Detector;
// Push all the function bodies into the detector.
CloneDetectionVisitor Visitor(Detector);
Visitor.TraverseTranslationUnitDecl(TU);
// Find clones with the usual settings, but but we want to filter out
// all statements from functions which names start with "bar".
std::vector<CloneDetector::CloneGroup> CloneGroups;
Detector.findClones(CloneGroups, NoBarFunctionConstraint(),
RecursiveCloneTypeIIHashConstraint(),
MinComplexityConstraint(2), MinGroupSizeConstraint(2),
RecursiveCloneTypeIIVerifyConstraint(),
OnlyLargestCloneConstraint());
ASSERT_EQ(CloneGroups.size(), 1u);
ASSERT_EQ(CloneGroups.front().size(), 2u);
for (auto &Clone : CloneGroups.front()) {
const auto ND = dyn_cast<const FunctionDecl>(Clone.getContainingDecl());
ASSERT_TRUE(ND != nullptr);
// Check that no function name starting with "bar" is in the results...
ASSERT_TRUE(ND->getNameAsString().find("bar") != 0);
}
// Retry above's example without the filter...
CloneGroups.clear();
Detector.findClones(CloneGroups, RecursiveCloneTypeIIHashConstraint(),
MinComplexityConstraint(2), MinGroupSizeConstraint(2),
RecursiveCloneTypeIIVerifyConstraint(),
OnlyLargestCloneConstraint());
ASSERT_EQ(CloneGroups.size(), 1u);
ASSERT_EQ(CloneGroups.front().size(), 4u);
// Count how many functions with the bar prefix we have in the results.
int FoundFunctionsWithBarPrefix = 0;
for (auto &Clone : CloneGroups.front()) {
const auto ND = dyn_cast<const FunctionDecl>(Clone.getContainingDecl());
ASSERT_TRUE(ND != nullptr);
// This time check that we picked up the bar functions from above
if (ND->getNameAsString().find("bar") == 0) {
FoundFunctionsWithBarPrefix++;
}
}
// We should have found the two functions bar1 and bar2.
ASSERT_EQ(FoundFunctionsWithBarPrefix, 2);
}
} // namespace
} // namespace analysis
} // namespace clang
| {
"pile_set_name": "Github"
} |
package com.qihoo.qsql.metadata.extern;
import com.qihoo.qsql.exception.ParseException;
import java.util.Arrays;
import java.util.List;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
public class MetadataOptionParserTest {
MetadataOptionParser metadataOptionParser;
@Before
public void init() {
metadataOptionParser = new MetadataOptionParser();
}
@Test
public void testOptionParse() {
List<String> args = Arrays.asList("--dbType=mysql", "--action=init");
metadataOptionParser.parse(args);
if (metadataOptionParser.getProperties().keySet().size() == 2) {
Assert.assertTrue(true);
}
}
@Test
public void testErrorDbType() {
List<String> args = Arrays.asList("--dbType=oracle", "--action=init");
try {
metadataOptionParser.parse(args);
} catch (ParseException ex) {
Assert.assertTrue(true);
}
}
@Test
public void testErrorAction() {
List<String> args = Arrays.asList("--dbType=mysql", "--action=initAndUpdate");
try {
metadataOptionParser.parse(args);
} catch (ParseException ex) {
Assert.assertTrue(true);
}
}
}
| {
"pile_set_name": "Github"
} |
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package openpgp
import "hash"
// NewCanonicalTextHash reformats text written to it into the canonical
// form and then applies the hash h. See RFC 4880, section 5.2.1.
func NewCanonicalTextHash(h hash.Hash) hash.Hash {
return &canonicalTextHash{h, 0}
}
type canonicalTextHash struct {
h hash.Hash
s int
}
var newline = []byte{'\r', '\n'}
func (cth *canonicalTextHash) Write(buf []byte) (int, error) {
start := 0
for i, c := range buf {
switch cth.s {
case 0:
if c == '\r' {
cth.s = 1
} else if c == '\n' {
cth.h.Write(buf[start:i])
cth.h.Write(newline)
start = i + 1
}
case 1:
cth.s = 0
}
}
cth.h.Write(buf[start:])
return len(buf), nil
}
func (cth *canonicalTextHash) Sum(in []byte) []byte {
return cth.h.Sum(in)
}
func (cth *canonicalTextHash) Reset() {
cth.h.Reset()
cth.s = 0
}
func (cth *canonicalTextHash) Size() int {
return cth.h.Size()
}
func (cth *canonicalTextHash) BlockSize() int {
return cth.h.BlockSize()
}
| {
"pile_set_name": "Github"
} |
/**************************************************************************
* Otter Browser: Web browser controlled by the user, not vice-versa.
* Copyright (C) 2016 - 2019 Michal Dutkiewicz aka Emdek <[email protected]>
* Copyright (C) 2017 Jan Bajer aka bajasoft <[email protected]>
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*
**************************************************************************/
#include "FilePasswordsStorageBackend.h"
#include "../../../../core/Console.h"
#include "../../../../core/SessionsManager.h"
#include <QtCore/QFile>
#include <QtCore/QJsonArray>
#include <QtCore/QJsonDocument>
#include <QtCore/QJsonObject>
namespace Otter
{
FilePasswordsStorageBackend::FilePasswordsStorageBackend(QObject *parent) : PasswordsStorageBackend(parent),
m_isInitialized(false)
{
}
void FilePasswordsStorageBackend::initialize()
{
m_isInitialized = true;
const QString path(SessionsManager::getWritableDataPath(QLatin1String("passwords.json")));
if (!QFile::exists(path))
{
return;
}
QFile file(path);
if (!file.open(QIODevice::ReadOnly | QIODevice::Text))
{
Console::addMessage(tr("Failed to open passwords file: %1").arg(file.errorString()), Console::OtherCategory, Console::ErrorLevel, file.fileName());
return;
}
QHash<QString, QVector<PasswordsManager::PasswordInformation> > passwords;
QJsonObject hostsObject(QJsonDocument::fromJson(file.readAll()).object());
QJsonObject::const_iterator hostsIterator;
for (hostsIterator = hostsObject.constBegin(); hostsIterator != hostsObject.constEnd(); ++hostsIterator)
{
const QJsonArray hostArray(hostsIterator.value().toArray());
QVector<PasswordsManager::PasswordInformation> hostPasswords;
hostPasswords.reserve(hostArray.count());
for (int i = 0; i < hostArray.count(); ++i)
{
const QJsonObject passwordObject(hostArray.at(i).toObject());
PasswordsManager::PasswordInformation password;
password.url = QUrl(passwordObject.value(QLatin1String("url")).toString());
password.timeAdded = QDateTime::fromString(passwordObject.value(QLatin1String("timeAdded")).toString(), Qt::ISODate);
password.timeAdded.setTimeSpec(Qt::UTC);
password.timeUsed = QDateTime::fromString(passwordObject.value(QLatin1String("timeUsed")).toString(), Qt::ISODate);
password.timeUsed.setTimeSpec(Qt::UTC);
password.type = ((passwordObject.value(QLatin1String("type")).toString() == QLatin1String("auth")) ? PasswordsManager::AuthPassword : PasswordsManager::FormPassword);
const QJsonArray fieldsArray(passwordObject.value(QLatin1String("fields")).toArray());
password.fields.reserve(fieldsArray.count());
for (int j = 0; j < fieldsArray.count(); ++j)
{
const QJsonObject fieldObject(fieldsArray.at(j).toObject());
PasswordsManager::PasswordInformation::Field field;
field.name = fieldObject.value(fieldObject.contains(QLatin1String("name")) ? QLatin1String("name") : QLatin1String("key")).toString();
field.value = fieldObject.value(QLatin1String("value")).toString();
field.type = ((fieldObject.value(QLatin1String("type")).toString() == QLatin1String("password")) ? PasswordsManager::PasswordField : PasswordsManager::TextField);
password.fields.append(field);
}
hostPasswords.append(password);
}
passwords[hostsIterator.key()] = hostPasswords;
}
m_passwords = passwords;
}
void FilePasswordsStorageBackend::save()
{
QFile file(SessionsManager::getWritableDataPath(QLatin1String("passwords.json")));
if (!file.open(QIODevice::WriteOnly | QIODevice::Text))
{
Console::addMessage(tr("Failed to save passwords file: %1").arg(file.errorString()), Console::OtherCategory, Console::ErrorLevel, file.fileName());
return;
}
QJsonObject hostsObject;
QHash<QString, QVector<PasswordsManager::PasswordInformation> >::iterator hostsIterator;
for (hostsIterator = m_passwords.begin(); hostsIterator != m_passwords.end(); ++hostsIterator)
{
if (hostsIterator.value().isEmpty())
{
continue;
}
QJsonArray hostArray;
const QVector<PasswordsManager::PasswordInformation> passwords(hostsIterator.value());
for (int i = 0; i < passwords.count(); ++i)
{
QJsonArray fieldsArray;
for (int j = 0; j < passwords.at(i).fields.count(); ++j)
{
fieldsArray.append(QJsonObject({{QLatin1String("name"), passwords.at(i).fields.at(j).name}, {QLatin1String("value"), passwords.at(i).fields.at(j).value}, {QLatin1String("type"), ((passwords.at(i).fields.at(j).type == PasswordsManager::PasswordField) ? QLatin1String("password") : QLatin1String("text"))}}));
}
QJsonObject passwordObject({{QLatin1String("url"), passwords.at(i).url.toString()}});
if (passwords.at(i).timeAdded.isValid())
{
passwordObject.insert(QLatin1String("timeAdded"), passwords.at(i).timeAdded.toString(Qt::ISODate));
}
if (passwords.at(i).timeUsed.isValid())
{
passwordObject.insert(QLatin1String("timeUsed"), passwords.at(i).timeUsed.toString(Qt::ISODate));
}
passwordObject.insert(QLatin1String("type"), ((passwords.at(i).type == PasswordsManager::AuthPassword) ? QLatin1String("auth") : QLatin1String("form")));
passwordObject.insert(QLatin1String("fields"), fieldsArray);
hostArray.append(passwordObject);
}
hostsObject.insert(hostsIterator.key(), hostArray);
}
file.write(QJsonDocument(hostsObject).toJson(QJsonDocument::Compact));
file.close();
}
void FilePasswordsStorageBackend::clearPasswords(const QString &host)
{
if (host.isEmpty())
{
return;
}
if (!m_isInitialized)
{
initialize();
}
if (m_passwords.isEmpty())
{
return;
}
if (m_passwords.contains(host))
{
m_passwords.remove(host);
emit passwordsModified();
save();
}
}
void FilePasswordsStorageBackend::clearPasswords(int period)
{
if (period <= 0)
{
const QString path(SessionsManager::getWritableDataPath(QLatin1String("passwords.json")));
if (!QFile::remove(path))
{
Console::addMessage(tr("Failed to remove passwords file"), Console::OtherCategory, Console::ErrorLevel, path);
}
else if (!m_passwords.isEmpty())
{
m_passwords.clear();
emit passwordsModified();
}
}
if (!m_isInitialized)
{
initialize();
}
if (m_passwords.isEmpty())
{
return;
}
QHash<QString, QVector<PasswordsManager::PasswordInformation> >::iterator iterator(m_passwords.begin());
bool wasModified(false);
while (iterator != m_passwords.end())
{
QVector<PasswordsManager::PasswordInformation> passwords(iterator.value());
for (int i = (passwords.count() - 1); i >= 0; --i)
{
if (passwords.at(i).timeAdded.secsTo(QDateTime::currentDateTimeUtc()) < (period * 3600))
{
passwords.removeAt(i);
wasModified = true;
}
}
if (passwords.isEmpty())
{
iterator = m_passwords.erase(iterator);
}
else
{
m_passwords[iterator.key()] = passwords;
++iterator;
}
}
if (wasModified)
{
save();
emit passwordsModified();
}
}
void FilePasswordsStorageBackend::addPassword(const PasswordsManager::PasswordInformation &password)
{
if (!m_isInitialized)
{
initialize();
}
const QString host(Utils::extractHost(password.url));
if (m_passwords.contains(host))
{
const QVector<PasswordsManager::PasswordInformation> passwords(m_passwords[host]);
for (int i = 0; i < passwords.count(); ++i)
{
if (comparePasswords(password, passwords.at(i)) == PasswordsManager::FullMatch)
{
return;
}
if (comparePasswords(password, passwords.at(i)) == PasswordsManager::PartialMatch)
{
m_passwords[host].replace(i, password);
emit passwordsModified();
save();
return;
}
}
}
else
{
m_passwords[host] = {};
}
m_passwords[host].append(password);
emit passwordsModified();
save();
}
void FilePasswordsStorageBackend::removePassword(const PasswordsManager::PasswordInformation &password)
{
if (!m_isInitialized)
{
initialize();
}
const QString host(Utils::extractHost(password.url));
if (!m_passwords.contains(host))
{
return;
}
const QVector<PasswordsManager::PasswordInformation> passwords(m_passwords[host]);
for (int i = 0; i < passwords.count(); ++i)
{
if (comparePasswords(password, passwords.at(i)) != PasswordsManager::NoMatch)
{
m_passwords[host].removeAt(i);
emit passwordsModified();
save();
return;
}
}
}
QString FilePasswordsStorageBackend::getName() const
{
return QLatin1String("file");
}
QString FilePasswordsStorageBackend::getTitle() const
{
return tr("Encrypted File");
}
QString FilePasswordsStorageBackend::getDescription() const
{
return tr("Stores passwords in AES encrypted file.");
}
QString FilePasswordsStorageBackend::getVersion() const
{
return QLatin1String("1.0");
}
QUrl FilePasswordsStorageBackend::getHomePage() const
{
return QUrl(QLatin1String("https://otter-browser.org/"));
}
QStringList FilePasswordsStorageBackend::getHosts()
{
if (!m_isInitialized)
{
initialize();
}
return m_passwords.keys();
}
QVector<PasswordsManager::PasswordInformation> FilePasswordsStorageBackend::getPasswords(const QUrl &url, PasswordsManager::PasswordTypes types)
{
if (!m_isInitialized)
{
initialize();
}
const QString host(Utils::extractHost(url));
if (m_passwords.contains(host))
{
if (types == PasswordsManager::AnyPassword)
{
return m_passwords[host];
}
const QVector<PasswordsManager::PasswordInformation> passwords(m_passwords[host]);
QVector<PasswordsManager::PasswordInformation> matchingPasswords;
for (int i = 0; i < passwords.count(); ++i)
{
if (types.testFlag(passwords.at(i).type))
{
matchingPasswords.append(passwords.at(i));
}
}
return matchingPasswords;
}
return {};
}
PasswordsManager::PasswordMatch FilePasswordsStorageBackend::hasPassword(const PasswordsManager::PasswordInformation &password)
{
if (!m_isInitialized)
{
initialize();
}
const QString host(Utils::extractHost(password.url));
if (!m_passwords.contains(host))
{
return PasswordsManager::NoMatch;
}
const QVector<PasswordsManager::PasswordInformation> passwords(m_passwords[host]);
for (int i = 0; i < passwords.count(); ++i)
{
const PasswordsManager::PasswordMatch match(comparePasswords(password, passwords.at(i)));
if (match != PasswordsManager::NoMatch)
{
return match;
}
}
return PasswordsManager::NoMatch;
}
bool FilePasswordsStorageBackend::hasPasswords(const QUrl &url, PasswordsManager::PasswordTypes types)
{
if (!m_isInitialized)
{
initialize();
}
const QString host(Utils::extractHost(url));
if (types == PasswordsManager::AnyPassword)
{
return m_passwords.contains(host);
}
if (m_passwords.contains(host))
{
const QVector<PasswordsManager::PasswordInformation> passwords(m_passwords[host]);
for (int i = 0; i < passwords.count(); ++i)
{
if (types.testFlag(passwords.at(i).type))
{
return true;
}
}
}
return false;
}
}
| {
"pile_set_name": "Github"
} |
<html>
<head><title>Minified Web Test: {{file}}</title>
<style type="text/css">
body { font: 20px/1.5 "Helvetica Neue", Helvetica, Arial, sans-serif; }
#pageContainer { margin: 0.5em; }
h1, h2, h3, h4 { font-weight: 200; }
#container, #container2, #cloneTest { display: none; }
.x { margin-top: 2px; } /* required for testing computed css in minified-web-getset-test.js */
.hidden { display: none; } /* required for testing show() */
</style>
<link rel="stylesheet" href="mocha.css" />
<script id="minifiedTemplate" type="minified-template">{{'{'+'{a}'+'}'}}</script>
<script type="text/javascript" src="sparkplug.js"></script>
<script type="text/javascript">
var q = window.location.search, undefined;
var isLegacyIE = (!!document.all && ![].map); // remove non-ie versions on old ie
function fail(text) {
var e = new Error(text);
if (window.console) console.log(e.stack);
throw e;
}
function check(value, expected, extraMsg) {
var undefined;
if (expected === undefined) {
if (!value) {
if (window.console) console.log("Check for true failed, given:", value, extraMsg?extraMsg:'');
fail("Value '"+value+"' is not true. " + (extraMsg?extraMsg:''));
}
}
else
if (value != expected) {
if (window.console) {
console.log("Check failed, given:", value, " expected:", expected, extraMsg?extraMsg:'');
if (typeof value == 'string' && typeof expected == 'string' && value.length+expected.length > 15) {
for (var i = 0; i<value.length && i<expected.length; i++)
if (value.charAt(i) != expected.charAt(i)) {
console.log("Diff at ", i, ": '", value.charAt(i), "' != '", expected.charAt(i) + "'")
break;
}
}
}
fail("Value '"+value+"' given, but expected '"+expected+"'. " + (extraMsg?extraMsg:''));
}
}
function contains(list, value, checkIdentity, extraMsg) {
for (var i = 0; i < list.length; i++)
if (list[i] === value || (!checkIdentity && list[i] == value))
return true;
fail("Can not find '"+value+"' in list. " + extraMsg?extraMsg:'');
}
function containsAll(list, expectedValueList, checkIdentity, extraMsg) {
check(list.length, expectedValueList.length, 'Testing for array length' + (extraMsg?(': '+extraMsg):''));
for (var i = 0; i < expectedValueList.length; i++)
contains(list, expectedValueList[i], checkIdentity, 'Checked index='+i+'.' + (extraMsg?(': '+extraMsg):''));
}
function forward() {
if (/forward/.test(q) && (isLegacyIE ? '{{nextFileLegacy}}' : '{{nextFile}}'))
document.location.href = isLegacyIE ? '{{nextFileLegacy}}' : '{{nextFile}}';
}
</script>
<script type="text/javascript" src="{{file}}"></script>
<script src="mocha.js"></script>
<script>
var MINI = require('minified'), $ = MINI.$, $$=MINI.$$, EE = MINI.EE, HTML = MINI.HTML;
var _ = MINI._;
var isPhantomJS = /PhantomJS/.test(navigator.userAgent);
mocha.setup('bdd');
</script>
<script src="minified-web-selector-test.js"></script>
<script src="minified-web-element-test.js"></script>
<script src="minified-web-class-test.js"></script>
<script src="minified-web-misc-test.js"></script>
<script src="minified-web-event-test.js"></script>
<script src="minified-web-forms-test.js"></script>
<script src="minified-web-getset-test.js"></script>
<script src="minified-web-json-test.js"></script>
<script src="minified-web-lists-test.js"></script>
<script src="minified-web-toggle-test.js"></script>
<script src="minified-web-promises-test.js"></script>
</head>
<body>
<div id="pageContainer">
<h2 id="title">Minified Web Test: {{file}}</h2>
<div><button onclick="document.location.href = 'autorun.html'; return false;">Rerun All Tests</button>
<button onclick="document.location.href = 'vistest.html'; return false;">Vistest</button></div>
<div id="mocha"></div>
<div id="container">
<div id="a" class="x" style="margin-top: 5px;" title="tititi" data-x="bar" data-yy="foo">
<p><b>div id="a" class="x"</b></p>
<div id="a_a" class="y z">
<p>div id="a_a" class="y z"</p>
</div>
<div id="a_b" class="m x z">
<p>div id="a_b" class="m x z"</p>
</div>
</div>
<div id="b" class="n">
<p><b>div id="b" class="n"</b></p>
<div id="b_a" class="x y">
<p>div id="b_a" class="x y"</p>
</div>
<div id="b_b" class="n x">
<p>div id="b_b" class="n x"</p>
</div>
</div>
<div id="c" class="m">
<p><b>div id="b" class="m"</b></p>
<div id="c_a" class="x y r-r">
<p>div id="c_a" class="x y r-r"</p>
</div>
<div id="c_b" class="m z">
<p>div id="c_b" class="m z"</p>
</div>
</div>
</div>
<div id="container2">
</div>
<div id="cloneTest">
<div class="cloneMe">
<p>Test</p>
UglyChar: ß
<a href="http://example.com/" title="Teest">Test</a>
<strong><span class="bla342">abc<code>def</code>ds</span></strong>
<span style="margin: 1px">dd</span>
</div>
<div id="cloneId">nonono</div>
</div>
<div id="playground">
</div>
<div id="formContainer" style="display: none">
<form id="id1" name="form1">
<input id="id1_1" name="i1_1" value="a" />
<input id="id1_2a" name="i1_2" value="b1" />
<input id="id1_2b" name="i1_2" value="b2" />
<input id="id1_2c" name="i1_2" value="b3" />
<input id="id1_3" name="i1_3" value="c" type="hidden" />
<input id="id1_4" name="i1_4" value="d" type="password" />
<input id="id1_5" name="i1_5" value="e" type="checkbox" />
<input id="id1_6a" name="i1_6" value="f" type="checkbox" checked="checked"/>
<input id="id1_6b" name="i1_6" value="g" type="checkbox" checked="checked"/>
<input id="id1_6c" name="i1_6" value="h" type="checkbox" />
<input id="id1_6d" name="i1_6" value="i" type="checkbox" checked="checked"/>
<input id="id1_7" name="i1_7" value="j" type="checkbox" checked="checked"/>
<input id="id1_8a" name="i1_8" value="x" type="radio" />
<input id="id1_8b" name="i1_8" value="y" type="radio" checked="checked" />
<input id="id1_8c" name="i1_8" value="z" type="radio" />
<textarea id="id1_9" name="i1_9">abc</textarea>
<input id="id1_10" name="i1_10" />
</form>
<form id="id2" name="form2">
<input id="id2_1" name="i2_1" value="bb" />
<input id="id2_2" name="i2_2" value="bc" />
</form>
<input id="id3_1" name="i3_1" value="ttt" />
<span>test</span>
</div>
</div>
</body>
<script type="text/javascript">
var failures = 0;
mocha._reporter = function MHTML(runner, base) {
this.html = new Mocha.reporters.HTML(runner, base);
runner.on('fail', function() {
failures++;
});
};
// mocha.checkLeaks(); - disable: does not work with many Chrome extensions
if (!isPhantomJS)
mocha.run(function() {
if (failures == 0)
forward();
document.getElementById('title').style.color = failures ? 'red' : 'black';
});
</script>
</html>
| {
"pile_set_name": "Github"
} |
<?php
//2130706432-2147483647
$ranges=Array(
"2130706432" => array("2147483647","ZZ"),
);
?> | {
"pile_set_name": "Github"
} |
from datetime import datetime
from io import StringIO
import numpy as np
import pytest
import pandas as pd
from pandas import DataFrame, Series
import pandas._testing as tm
from pandas.io.common import get_handle
class TestSeriesToCSV:
def read_csv(self, path, **kwargs):
params = dict(squeeze=True, index_col=0, header=None, parse_dates=True)
params.update(**kwargs)
header = params.get("header")
out = pd.read_csv(path, **params)
if header is None:
out.name = out.index.name = None
return out
def test_from_csv(self, datetime_series, string_series):
with tm.ensure_clean() as path:
datetime_series.to_csv(path, header=False)
ts = self.read_csv(path)
tm.assert_series_equal(datetime_series, ts, check_names=False)
assert ts.name is None
assert ts.index.name is None
# see gh-10483
datetime_series.to_csv(path, header=True)
ts_h = self.read_csv(path, header=0)
assert ts_h.name == "ts"
string_series.to_csv(path, header=False)
series = self.read_csv(path)
tm.assert_series_equal(string_series, series, check_names=False)
assert series.name is None
assert series.index.name is None
string_series.to_csv(path, header=True)
series_h = self.read_csv(path, header=0)
assert series_h.name == "series"
with open(path, "w") as outfile:
outfile.write("1998-01-01|1.0\n1999-01-01|2.0")
series = self.read_csv(path, sep="|")
check_series = Series(
{datetime(1998, 1, 1): 1.0, datetime(1999, 1, 1): 2.0}
)
tm.assert_series_equal(check_series, series)
series = self.read_csv(path, sep="|", parse_dates=False)
check_series = Series({"1998-01-01": 1.0, "1999-01-01": 2.0})
tm.assert_series_equal(check_series, series)
def test_to_csv(self, datetime_series):
import io
with tm.ensure_clean() as path:
datetime_series.to_csv(path, header=False)
with io.open(path, newline=None) as f:
lines = f.readlines()
assert lines[1] != "\n"
datetime_series.to_csv(path, index=False, header=False)
arr = np.loadtxt(path)
tm.assert_almost_equal(arr, datetime_series.values)
def test_to_csv_unicode_index(self):
buf = StringIO()
s = Series(["\u05d0", "d2"], index=["\u05d0", "\u05d1"])
s.to_csv(buf, encoding="UTF-8", header=False)
buf.seek(0)
s2 = self.read_csv(buf, index_col=0, encoding="UTF-8")
tm.assert_series_equal(s, s2)
def test_to_csv_float_format(self):
with tm.ensure_clean() as filename:
ser = Series([0.123456, 0.234567, 0.567567])
ser.to_csv(filename, float_format="%.2f", header=False)
rs = self.read_csv(filename)
xp = Series([0.12, 0.23, 0.57])
tm.assert_series_equal(rs, xp)
def test_to_csv_list_entries(self):
s = Series(["jack and jill", "jesse and frank"])
split = s.str.split(r"\s+and\s+")
buf = StringIO()
split.to_csv(buf, header=False)
def test_to_csv_path_is_none(self):
# GH 8215
# Series.to_csv() was returning None, inconsistent with
# DataFrame.to_csv() which returned string
s = Series([1, 2, 3])
csv_str = s.to_csv(path_or_buf=None, header=False)
assert isinstance(csv_str, str)
@pytest.mark.parametrize(
"s,encoding",
[
(
Series([0.123456, 0.234567, 0.567567], index=["A", "B", "C"], name="X"),
None,
),
# GH 21241, 21118
(Series(["abc", "def", "ghi"], name="X"), "ascii"),
(Series(["123", "你好", "世界"], name="中文"), "gb2312"),
(Series(["123", "Γειά σου", "Κόσμε"], name="Ελληνικά"), "cp737"),
],
)
def test_to_csv_compression(self, s, encoding, compression):
with tm.ensure_clean() as filename:
s.to_csv(filename, compression=compression, encoding=encoding, header=True)
# test the round trip - to_csv -> read_csv
result = pd.read_csv(
filename,
compression=compression,
encoding=encoding,
index_col=0,
squeeze=True,
)
tm.assert_series_equal(s, result)
# test the round trip using file handle - to_csv -> read_csv
f, _handles = get_handle(
filename, "w", compression=compression, encoding=encoding
)
with f:
s.to_csv(f, encoding=encoding, header=True)
result = pd.read_csv(
filename,
compression=compression,
encoding=encoding,
index_col=0,
squeeze=True,
)
tm.assert_series_equal(s, result)
# explicitly ensure file was compressed
with tm.decompress_file(filename, compression) as fh:
text = fh.read().decode(encoding or "utf8")
assert s.name in text
with tm.decompress_file(filename, compression) as fh:
tm.assert_series_equal(
s, pd.read_csv(fh, index_col=0, squeeze=True, encoding=encoding)
)
def test_to_csv_interval_index(self):
# GH 28210
s = Series(["foo", "bar", "baz"], index=pd.interval_range(0, 3))
with tm.ensure_clean("__tmp_to_csv_interval_index__.csv") as path:
s.to_csv(path, header=False)
result = self.read_csv(path, index_col=0, squeeze=True)
# can't roundtrip intervalindex via read_csv so check string repr (GH 23595)
expected = s.copy()
expected.index = expected.index.astype(str)
tm.assert_series_equal(result, expected)
class TestSeriesIO:
def test_to_frame(self, datetime_series):
datetime_series.name = None
rs = datetime_series.to_frame()
xp = pd.DataFrame(datetime_series.values, index=datetime_series.index)
tm.assert_frame_equal(rs, xp)
datetime_series.name = "testname"
rs = datetime_series.to_frame()
xp = pd.DataFrame(
dict(testname=datetime_series.values), index=datetime_series.index
)
tm.assert_frame_equal(rs, xp)
rs = datetime_series.to_frame(name="testdifferent")
xp = pd.DataFrame(
dict(testdifferent=datetime_series.values), index=datetime_series.index
)
tm.assert_frame_equal(rs, xp)
def test_timeseries_periodindex(self):
# GH2891
from pandas import period_range
prng = period_range("1/1/2011", "1/1/2012", freq="M")
ts = Series(np.random.randn(len(prng)), prng)
new_ts = tm.round_trip_pickle(ts)
assert new_ts.index.freq == "M"
def test_pickle_preserve_name(self):
for n in [777, 777.0, "name", datetime(2001, 11, 11), (1, 2)]:
unpickled = self._pickle_roundtrip_name(tm.makeTimeSeries(name=n))
assert unpickled.name == n
def _pickle_roundtrip_name(self, obj):
with tm.ensure_clean() as path:
obj.to_pickle(path)
unpickled = pd.read_pickle(path)
return unpickled
def test_to_frame_expanddim(self):
# GH 9762
class SubclassedSeries(Series):
@property
def _constructor_expanddim(self):
return SubclassedFrame
class SubclassedFrame(DataFrame):
pass
s = SubclassedSeries([1, 2, 3], name="X")
result = s.to_frame()
assert isinstance(result, SubclassedFrame)
expected = SubclassedFrame({"X": [1, 2, 3]})
tm.assert_frame_equal(result, expected)
| {
"pile_set_name": "Github"
} |
import { createDuration } from './create';
function addSubtract (duration, input, value, direction) {
var other = createDuration(input, value);
duration._milliseconds += direction * other._milliseconds;
duration._days += direction * other._days;
duration._months += direction * other._months;
return duration._bubble();
}
// supports only 2.0-style add(1, 's') or add(duration)
export function add (input, value) {
return addSubtract(this, input, value, 1);
}
// supports only 2.0-style subtract(1, 's') or subtract(duration)
export function subtract (input, value) {
return addSubtract(this, input, value, -1);
}
| {
"pile_set_name": "Github"
} |
Não Seja um Idiota: A Licensa para Software Livre
Última Atualização: 19 de Março de 2015
Última Tradução: 05 de Outubro de 2015
Este software é livre e aberto.
- *Eu* sou o autor do software. *Eu* posso ser um *nós*, mas isto está OK.
- *Você* é o usuário deste software. *Você* também pode ser um *nós*, e isto também está OK.
> Este software é livre. Eu nunca vou te combrar para usar, licenciar, ou obter ester software. Fazendo isso eu estaria sendo um idiota.
> Eu nunca vou desativar ou começar a cobrar pelo que está disponível hoje. Fazendo isso eu estaria sendo um idiota.
> Você pode usar este código (e por "código" eu quero dizer *qualquer coisa* contida neste projeto) para o que você quiser. Uso pessoal, uso educacional, uso corporativo, uso militar, todos estão liberados! Limitar como você pode usar algo que é livre e gratuito faria de mim um idiota.
> Eu não ofereço nenhum tipo de garantia. Eu tentei me assegurar de que não há nenhuma falha de segurança que automaticamente mande o número do seu cartão de crédito para aliens ou apague todos os dados no seu HD, mas isso pode acontecer. Sinto muito. No entanto, eu te avisei. Então você não pode me processar. Processar pessoas por software livre faria de você um idiota.
> Se você achar bugs, seria legal me avisar para que eu possa consertá-los. Você não é obrigado a isso, mas não fazer isso faria de você um idiota.
> Por falar de bugs, eu não sou obrigado a corrigir nada nem a adicionar funcionalidades por você. Se sentir no direito de exigir coisas de um software livre faria de você um idiota.
> Se você adicionar uma nova funcionalidade ou corrigir um bug, seria legal contribuir com o projeto. Você não é obrigado a isso, mas não fazer isso o tornaria um idiota. O repositório/site de onde você conseguiu esse software deve conter uma forma para você entrar em contato comigo. Contribuir com open source faz de você fantástico!
> Se você usa esse software, você não tem que me dar crédito, mas seria legal.
Não seja um idiota.
Aproveite seu software livre!
| {
"pile_set_name": "Github"
} |
{
"rulesDirectory": [
"tslint-no-unused-expression-chai"
],
"rules": {
"align": [
true
],
"array-type": [
true,
"array-simple"
],
"await-promise": true,
"class-name": true,
"curly": true,
"eofline": true,
"member-access": true,
"no-return-await": true,
"no-unused-expression-chai": true,
"no-duplicate-variable": true,
"promise-function-async": true,
"semicolon": true,
"triple-equals": true,
"quotemark": true
}
} | {
"pile_set_name": "Github"
} |
[[
"start",
["punctuation.definition.comment.io","//"],
["comment.line.double-slash.io"," computes factorial of a number"]
],[
"start",
["text","factorial "],
["keyword.operator.io",":="],
["text"," "],
["support.function.io","method"],
["text","(n,"]
],[
"start",
["text"," "],
["keyword.control.io","if"],
["text","(n "],
["keyword.operator.io","=="],
["text"," "],
["constant.numeric.io","0"],
["text",", "],
["keyword.control.io","return"],
["text"," "],
["constant.numeric.io","1"],
["text",")"]
],[
"start",
["text"," res "],
["keyword.operator.io",":="],
["text"," "],
["constant.numeric.io","1"]
],[
"start",
["text"," "],
["support.class.io","Range"],
["text"," "],
["constant.numeric.io","1"],
["text"," "],
["support.function.io","to"],
["text","(n) "],
["keyword.control.io","foreach"],
["text","(i, res "],
["keyword.operator.io","="],
["text"," res "],
["keyword.operator.io","*"],
["text"," i)"]
],[
"start",
["text",")"]
]] | {
"pile_set_name": "Github"
} |
/***
*safeint.h - SafeInt class and free-standing functions used to prevent arithmetic overflows
*
* Copyright (c) Microsoft Corporation. All rights reserved.
*
*Purpose:
*
* The SafeInt class is designed to have as low an overhead as possible
* while still ensuring that all integer operations are conducted safely.
* Nearly every operator has been overloaded, with a very few exceptions.
*
* A usability-safety trade-off has been made to help ensure safety. This
* requires that every operation return either a SafeInt or a bool. If we
* allowed an operator to return a base integer type T, then the following
* can happen:
*
* char i = SafeInt<char>(32) * 2 + SafeInt<char>(16) * 4;
*
* The * operators take precedence, get overloaded, return a char, and then
* you have:
*
* char i = (char)64 + (char)64; //overflow!
*
* This situation would mean that safety would depend on usage, which isn't
* acceptable.
*
* One key operator that is missing is an implicit cast to type T. The reason for
* this is that if there is an implicit cast operator, then we end up with
* an ambiguous compile-time precedence. Because of this amiguity, there
* are two methods that are provided:
*
* Casting operators for every native integer type
*
* SafeInt::Ptr() - returns the address of the internal integer
*
* The SafeInt class should be used in any circumstances where ensuring
* integrity of the calculations is more important than performance. See Performance
* Notes below for additional information.
*
* Many of the conditionals will optimize out or be inlined for a release
* build (especially with /Ox), but it does have significantly more overhead,
* especially for signed numbers. If you do not _require_ negative numbers, use
* unsigned integer types - certain types of problems cannot occur, and this class
* performs most efficiently.
*
* Here's an example of when the class should ideally be used -
*
* void* AllocateMemForStructs(int StructSize, int HowMany)
* {
* SafeInt<unsigned long> s(StructSize);
*
* s *= HowMany;
*
* return malloc(s);
*
* }
*
* Here's when it should NOT be used:
*
* void foo()
* {
* int i;
*
* for(i = 0; i < 0xffff; i++)
* ....
* }
*
* Error handling - a SafeInt class will throw exceptions if something
* objectionable happens. The exceptions are SafeIntException classes,
* which contain an enum as a code.
*
* Typical usage might be:
*
* bool foo()
* {
* SafeInt<unsigned long> s; //note that s == 0 unless set
*
* try{
* s *= 23;
* ....
* }
* catch(SafeIntException err)
* {
* //handle errors here
* }
* }
*
* SafeInt accepts an error policy as an optional template parameter.
* We provide two error policy along with SafeInt: SafeIntErrorPolicy_SafeIntException, which
* throws SafeIntException in case of error, and SafeIntErrorPolicy_InvalidParameter, which
* calls _invalid_parameter to terminate the program.
*
* You can replace the error policy class with any class you like. This is accomplished by:
* 1) Create a class that has the following interface:
*
* struct YourSafeIntErrorPolicy
* {
* static __declspec(noreturn) void __stdcall SafeIntOnOverflow()
* {
* throw YourException( YourSafeIntArithmeticOverflowError );
* // or do something else which will terminate the program
* }
*
* static __declspec(noreturn) void __stdcall SafeIntOnDivZero()
* {
* throw YourException( YourSafeIntDivideByZeroError );
* // or do something else which will terminate the program
* }
* };
*
* Note that you don't have to throw C++ exceptions, you can throw Win32 exceptions, or do
* anything you like, just don't return from the call back into the code.
*
* 2) Either explicitly declare SafeInts like so:
* SafeInt< int, YourSafeIntErrorPolicy > si;
* or, before including SafeInt:
* #define _SAFEINT_DEFAULT_ERROR_POLICY ::YourSafeIntErrorPolicy
*
* Performance:
*
* Due to the highly nested nature of this class, you can expect relatively poor
* performance in unoptimized code. In tests of optimized code vs. correct inline checks
* in native code, this class has been found to take approximately 8% more CPU time (this varies),
* most of which is due to exception handling.
*
* Binary Operators:
*
* All of the binary operators have certain assumptions built into the class design.
* This is to ensure correctness. Notes on each class of operator follow:
*
* Arithmetic Operators (*,/,+,-,%)
* There are three possible variants:
* SafeInt< T, E > op SafeInt< T, E >
* SafeInt< T, E > op U
* U op SafeInt< T, E >
*
* The SafeInt< T, E > op SafeInt< U, E > variant is explicitly not supported, and if you try to do
* this the compiler with throw the following error:
*
* error C2593: 'operator *' is ambiguous
*
* This is because the arithmetic operators are required to return a SafeInt of some type.
* The compiler cannot know whether you'd prefer to get a type T or a type U returned. If
* you need to do this, you need to extract the value contained within one of the two using
* the casting operator. For example:
*
* SafeInt< T, E > t, result;
* SafeInt< U, E > u;
*
* result = t * (U)u;
*
* Comparison Operators:
*
* Because each of these operators return type bool, mixing SafeInts of differing types is
* allowed.
*
* Shift Operators:
*
* Shift operators always return the type on the left hand side of the operator. Mixed type
* operations are allowed because the return type is always known.
*
* Boolean Operators:
*
* Like comparison operators, these overloads always return type bool, and mixed-type SafeInts
* are allowed. Additionally, specific overloads exist for type bool on both sides of the
* operator.
*
* Binary Operators:
*
* Mixed-type operations are discouraged, however some provision has been made in order to
* enable things like:
*
* SafeInt<char> c = 2;
*
* if(c & 0x02)
* ...
*
* The "0x02" is actually an int, and it needs to work.
* In the case of binary operations on integers smaller than 32-bit, or of mixed type, corner
* cases do exist where you could get unexpected results. In any case where SafeInt returns a different
* result than the underlying operator, it will call _ASSERTE(). You should examine your code and cast things
* properly so that you are not programming with side effects.
*
* Comparison Operators and ANSI Conversions:
*
* The comparison operator behavior in this class varies from the ANSI definition.
* As an example, consider the following:
*
* unsigned int l = 0xffffffff;
* char c = -1;
*
* if(c == l)
* printf("Why is -1 equal to 4 billion???\n");
*
* The problem here is that c gets cast to an int, now has a value of 0xffffffff, and then gets
* cast again to an unsigned int, losing the true value. This behavior is despite the fact that
* an __int64 exists, and the following code will yield a different (and intuitively correct)
* answer:
*
* if((__int64)c == (__int64)l))
* printf("Why is -1 equal to 4 billion???\n");
* else
* printf("Why doesn't the compiler upcast to 64-bits when needed?\n");
*
* Note that combinations with smaller integers won't display the problem - if you
* changed "unsigned int" above to "unsigned short", you'd get the right answer.
*
* If you prefer to retain the ANSI standard behavior insert, before including safeint.h:
*
* #define _SAFEINT_ANSI_CONVERSIONS 1
*
* into your source. Behavior differences occur in the following cases:
* 8, 16, and 32-bit signed int, unsigned 32-bit int
* any signed int, unsigned 64-bit int
* Note - the signed int must be negative to show the problem
*
****/
#pragma once
#include <corecrt.h>
#include <crtdbg.h>
#if !defined (_SAFEINT_DEFAULT_ERROR_POLICY)
#define _SAFEINT_DEFAULT_ERROR_POLICY SafeIntErrorPolicy_SafeIntException
#endif /* !defined (_SAFEINT_DEFAULT_ERROR_POLICY) */
#if !defined (_SAFEINT_SHIFT_ASSERT)
#define _SAFEINT_SHIFT_ASSERT(x) _ASSERTE(x)
#endif /* !defined (_SAFEINT_SHIFT_ASSERT) */
#if !defined (_SAFEINT_BINARY_ASSERT)
#define _SAFEINT_BINARY_ASSERT(x) _ASSERTE(x)
#endif /* !defined (_SAFEINT_BINARY_ASSERT) */
#if !defined (_SAFEINT_EXCEPTION_ASSERT)
#define _SAFEINT_EXCEPTION_ASSERT()
#endif /* !defined (_SAFEINT_EXCEPTION_ASSERT) */
// by default, SafeInt will accept negation of an unsigned int;
// if you wish to disable it or assert, you can define the following
// macro to be a static assert or a runtime assert
#if !defined (_SAFEINT_UNSIGNED_NEGATION_BEHAVIOR)
#define _SAFEINT_UNSIGNED_NEGATION_BEHAVIOR()
#endif /* !defined (_SAFEINT_UNSIGNED_NEGATION_BEHAVIOR) */
// See above "Comparison Operators and ANSI Conversions" for an explanation
// of _SAFEINT_USE_ANSI_CONVERSIONS
#if !defined (_SAFEINT_USE_ANSI_CONVERSIONS)
#define _SAFEINT_USE_ANSI_CONVERSIONS 0
#endif /* !defined (_SAFEINT_USE_ANSI_CONVERSIONS) */
#pragma pack(push, _CRT_PACKING)
namespace msl
{
namespace utilities
{
enum SafeIntError
{
SafeIntNoError = 0,
SafeIntArithmeticOverflow,
SafeIntDivideByZero
};
} // namespace utilities
} // namespace msl
#include "safeint_internal.h"
namespace msl
{
namespace utilities
{
class SafeIntException
{
public:
SafeIntException() { m_code = SafeIntNoError; }
SafeIntException( SafeIntError code )
{
m_code = code;
}
SafeIntError m_code;
};
struct SafeIntErrorPolicy_SafeIntException
{
static __declspec(noreturn) void SafeIntOnOverflow()
{
_SAFEINT_EXCEPTION_ASSERT();
throw SafeIntException( SafeIntArithmeticOverflow );
}
static __declspec(noreturn) void SafeIntOnDivZero()
{
_SAFEINT_EXCEPTION_ASSERT();
throw SafeIntException( SafeIntDivideByZero );
}
};
struct SafeIntErrorPolicy_InvalidParameter
{
static __declspec(noreturn) void SafeIntOnOverflow()
{
_SAFEINT_EXCEPTION_ASSERT();
_CRT_SECURE_INVALID_PARAMETER("SafeInt Arithmetic Overflow");
}
static __declspec(noreturn) void SafeIntOnDivZero()
{
_SAFEINT_EXCEPTION_ASSERT();
_CRT_SECURE_INVALID_PARAMETER("SafeInt Divide By Zero");
}
};
// Free-standing functions that can be used where you only need to check one operation
// non-class helper function so that you can check for a cast's validity
// and handle errors how you like
template < typename T, typename U >
inline bool SafeCast( const T From, U& To ) throw()
{
return (details::SafeCastHelper< U, T,
details::SafeIntErrorPolicy_NoThrow >::Cast( From, To ) == SafeIntNoError);
}
template < typename T, typename U >
inline bool SafeEquals( const T t, const U u ) throw()
{
return details::EqualityTest< T, U >::IsEquals( t, u );
}
template < typename T, typename U >
inline bool SafeNotEquals( const T t, const U u ) throw()
{
return !details::EqualityTest< T, U >::IsEquals( t, u );
}
template < typename T, typename U >
inline bool SafeGreaterThan( const T t, const U u ) throw()
{
return details::GreaterThanTest< T, U >::GreaterThan( t, u );
}
template < typename T, typename U >
inline bool SafeGreaterThanEquals( const T t, const U u ) throw()
{
return !details::GreaterThanTest< U, T >::GreaterThan( u, t );
}
template < typename T, typename U >
inline bool SafeLessThan( const T t, const U u ) throw()
{
return details::GreaterThanTest< U, T >::GreaterThan( u, t );
}
template < typename T, typename U >
inline bool SafeLessThanEquals( const T t, const U u ) throw()
{
return !details::GreaterThanTest< T, U >::GreaterThan( t, u );
}
template < typename T, typename U >
inline bool SafeModulus( const T& t, const U& u, T& result ) throw()
{
return ( details::ModulusHelper< T, U, details::SafeIntErrorPolicy_NoThrow >::Modulus( t, u, result ) == SafeIntNoError );
}
template < typename T, typename U >
inline bool SafeMultiply( T t, U u, T& result ) throw()
{
return ( details::MultiplicationHelper< T, U,
details::SafeIntErrorPolicy_NoThrow >::Multiply( t, u, result ) == SafeIntNoError );
}
template < typename T, typename U >
inline bool SafeDivide( T t, U u, T& result ) throw()
{
return ( details::DivisionHelper< T, U,
details::SafeIntErrorPolicy_NoThrow >::Divide( t, u, result ) == SafeIntNoError );
}
template < typename T, typename U >
inline bool SafeAdd( T t, U u, T& result ) throw()
{
return ( details::AdditionHelper< T, U,
details::SafeIntErrorPolicy_NoThrow >::Addition( t, u, result ) == SafeIntNoError );
}
template < typename T, typename U >
inline bool SafeSubtract( T t, U u, T& result ) throw()
{
return ( details::SubtractionHelper< T, U,
details::SafeIntErrorPolicy_NoThrow >::Subtract( t, u, result ) == SafeIntNoError );
}
// SafeInt class
template < typename T, typename E = _SAFEINT_DEFAULT_ERROR_POLICY >
class SafeInt
{
public:
SafeInt() throw()
{
static_assert( details::NumericType< T >::isInt , "SafeInt<T>: T needs to be an integer type" );
m_int = 0;
}
// Having a constructor for every type of int
// avoids having the compiler evade our checks when doing implicit casts -
// e.g., SafeInt<char> s = 0x7fffffff;
SafeInt( const T& i ) throw()
{
static_assert( details::NumericType< T >::isInt , "SafeInt<T>: T needs to be an integer type" );
//always safe
m_int = i;
}
// provide explicit boolean converter
SafeInt( bool b ) throw()
{
static_assert( details::NumericType< T >::isInt , "SafeInt<T>: T needs to be an integer type" );
m_int = b ? 1 : 0;
}
template < typename U >
SafeInt(const SafeInt< U, E >& u)
{
static_assert( details::NumericType< T >::isInt , "SafeInt<T>: T needs to be an integer type" );
*this = SafeInt< T, E >( (U)u );
}
template < typename U >
SafeInt( const U& i )
{
static_assert( details::NumericType< T >::isInt , "SafeInt<T>: T needs to be an integer type" );
// SafeCast will throw exceptions if i won't fit in type T
details::SafeCastHelper< T, U, E >::Cast( i, m_int );
}
// now start overloading operators
// assignment operator
// constructors exist for all int types and will ensure safety
template < typename U >
SafeInt< T, E >& operator =( const U& rhs )
{
// use constructor to test size
// constructor is optimized to do minimal checking based
// on whether T can contain U
// note - do not change this
*this = SafeInt< T, E >( rhs );
return *this;
}
SafeInt< T, E >& operator =( const T& rhs ) throw()
{
m_int = rhs;
return *this;
}
template < typename U >
SafeInt< T, E >& operator =( const SafeInt< U, E >& rhs )
{
details::SafeCastHelper< T, U, E >::Cast( rhs.Ref(), m_int );
return *this;
}
SafeInt< T, E >& operator =( const SafeInt< T, E >& rhs ) throw()
{
m_int = rhs.m_int;
return *this;
}
// Casting operators
operator bool() const throw()
{
return !!m_int;
}
operator char() const
{
char val;
details::SafeCastHelper< char, T, E >::Cast( m_int, val );
return val;
}
operator signed char() const
{
signed char val;
details::SafeCastHelper< signed char, T, E >::Cast( m_int, val );
return val;
}
operator unsigned char() const
{
unsigned char val;
details::SafeCastHelper< unsigned char, T, E >::Cast( m_int, val );
return val;
}
operator __int16() const
{
__int16 val;
details::SafeCastHelper< __int16, T, E >::Cast( m_int, val );
return val;
}
operator unsigned __int16() const
{
unsigned __int16 val;
details::SafeCastHelper< unsigned __int16, T, E >::Cast( m_int, val );
return val;
}
operator __int32() const
{
__int32 val;
details::SafeCastHelper< __int32, T, E >::Cast( m_int, val );
return val;
}
operator unsigned __int32() const
{
unsigned __int32 val;
details::SafeCastHelper< unsigned __int32, T, E >::Cast( m_int, val );
return val;
}
// The compiler knows that int == __int32
// but not that long == __int32
operator long() const
{
long val;
details::SafeCastHelper< long, T, E >::Cast( m_int, val );
return val;
}
operator unsigned long() const
{
unsigned long val;
details::SafeCastHelper< unsigned long, T, E >::Cast( m_int, val );
return val;
}
operator __int64() const
{
__int64 val;
details::SafeCastHelper< __int64, T, E >::Cast( m_int, val );
return val;
}
operator unsigned __int64() const
{
unsigned __int64 val;
details::SafeCastHelper< unsigned __int64, T, E >::Cast( m_int, val );
return val;
}
#ifdef _NATIVE_WCHAR_T_DEFINED
operator wchar_t() const
{
unsigned __int16 val;
details::SafeCastHelper< unsigned __int16, T, E >::Cast( m_int, val );
return val;
}
#endif /* _NATIVE_WCHAR_T_DEFINED */
// If you need a pointer to the data
// this could be dangerous, but allows you to correctly pass
// instances of this class to APIs that take a pointer to an integer
// also see overloaded address-of operator below
T* Ptr() throw() { return &m_int; }
const T* Ptr() const throw() { return &m_int; }
const T& Ref() const throw() { return m_int; }
// Unary operators
bool operator !() const throw() { return (!m_int) ? true : false; }
// operator + (unary)
// note - normally, the '+' and '-' operators will upcast to a signed int
// for T < 32 bits. This class changes behavior to preserve type
const SafeInt< T, E >& operator +() const throw() { return *this; };
//unary -
SafeInt< T, E > operator -() const
{
// Note - unsigned still performs the bitwise manipulation
// will warn at level 2 or higher if the value is 32-bit or larger
T tmp;
details::NegationHelper< T, E, details::IntTraits< T >::isSigned >::Negative( m_int, tmp );
return SafeInt< T, E >( tmp );
}
// prefix increment operator
SafeInt< T, E >& operator ++()
{
if( m_int != details::IntTraits< T >::maxInt )
{
++m_int;
return *this;
}
E::SafeIntOnOverflow();
}
// prefix decrement operator
SafeInt< T, E >& operator --()
{
if( m_int != details::IntTraits< T >::minInt )
{
--m_int;
return *this;
}
E::SafeIntOnOverflow();
}
// note that postfix operators have inherently worse perf
// characteristics
// postfix increment operator
SafeInt< T, E > operator ++( int ) // dummy arg to comply with spec
{
if( m_int != details::IntTraits< T >::maxInt )
{
SafeInt< T, E > tmp( m_int );
m_int++;
return tmp;
}
E::SafeIntOnOverflow();
}
// postfix decrement operator
SafeInt< T, E > operator --( int ) // dummy arg to comply with spec
{
if( m_int != details::IntTraits< T >::minInt )
{
SafeInt< T, E > tmp( m_int );
m_int--;
return tmp;
}
E::SafeIntOnOverflow();
}
// One's complement
// Note - this operator will normally change size to an int
// cast in return improves perf and maintains type
SafeInt< T, E > operator ~() const throw() { return SafeInt< T, E >( (T)~m_int ); }
// Binary operators
//
// arithmetic binary operators
// % modulus
// * multiplication
// / division
// + addition
// - subtraction
//
// For each of the arithmetic operators, you will need to
// use them as follows:
//
// SafeInt<char> c = 2;
// SafeInt<int> i = 3;
//
// SafeInt<int> i2 = i op (char)c;
// OR
// SafeInt<char> i2 = (int)i op c;
//
// The base problem is that if the lhs and rhs inputs are different SafeInt types
// it is not possible in this implementation to determine what type of SafeInt
// should be returned. You have to let the class know which of the two inputs
// need to be the return type by forcing the other value to the base integer type.
//
// Note - as per feedback from Scott Meyers, I'm exploring how to get around this.
// 3.0 update - I'm still thinking about this. It can be done with template metaprogramming,
// but it is tricky, and there's a perf vs. correctness tradeoff where the right answer
// is situational.
//
// The case of:
//
// SafeInt< T, E > i, j, k;
// i = j op k;
//
// works just fine and no unboxing is needed because the return type is not ambiguous.
// Modulus
// Modulus has some convenient properties -
// first, the magnitude of the return can never be
// larger than the lhs operand, and it must be the same sign
// as well. It does, however, suffer from the same promotion
// problems as comparisons, division and other operations
template < typename U >
SafeInt< T, E > operator %( U rhs ) const
{
T result;
details::ModulusHelper< T, U, E >::Modulus( m_int, rhs, result );
return SafeInt< T, E >( result );
}
SafeInt< T, E > operator %( SafeInt< T, E > rhs ) const
{
T result;
details::ModulusHelper< T, T, E >::Modulus( m_int, rhs, result );
return SafeInt< T, E >( result );
}
// Modulus assignment
template < typename U >
SafeInt< T, E >& operator %=( U rhs )
{
details::ModulusHelper< T, U, E >::Modulus( m_int, rhs, m_int );
return *this;
}
template < typename U >
SafeInt< T, E >& operator %=( SafeInt< U, E > rhs )
{
details::ModulusHelper< T, U, E >::Modulus( m_int, (U)rhs, m_int );
return *this;
}
// Multiplication
template < typename U >
SafeInt< T, E > operator *( U rhs ) const
{
T ret( 0 );
details::MultiplicationHelper< T, U, E >::Multiply( m_int, rhs, ret );
return SafeInt< T, E >( ret );
}
SafeInt< T, E > operator *( SafeInt< T, E > rhs ) const
{
T ret( 0 );
details::MultiplicationHelper< T, T, E >::Multiply( m_int, (T)rhs, ret );
return SafeInt< T, E >( ret );
}
// Multiplication assignment
SafeInt< T, E >& operator *=( SafeInt< T, E > rhs )
{
details::MultiplicationHelper< T, T, E >::Multiply( m_int, (T)rhs, m_int );
return *this;
}
template < typename U >
SafeInt< T, E >& operator *=( U rhs )
{
details::MultiplicationHelper< T, U, E >::Multiply( m_int, rhs, m_int );
return *this;
}
template < typename U >
SafeInt< T, E >& operator *=( SafeInt< U, E > rhs )
{
details::MultiplicationHelper< T, U, E >::Multiply( m_int, rhs.Ref(), m_int );
return *this;
}
// Division
template < typename U >
SafeInt< T, E > operator /( U rhs ) const
{
T ret( 0 );
details::DivisionHelper< T, U, E >::Divide( m_int, rhs, ret );
return SafeInt< T, E >( ret );
}
SafeInt< T, E > operator /( SafeInt< T, E > rhs ) const
{
T ret( 0 );
details::DivisionHelper< T, T, E >::Divide( m_int, (T)rhs, ret );
return SafeInt< T, E >( ret );
}
// Division assignment
SafeInt< T, E >& operator /=( SafeInt< T, E > i )
{
details::DivisionHelper< T, T, E >::Divide( m_int, (T)i, m_int );
return *this;
}
template < typename U > SafeInt< T, E >& operator /=( U i )
{
details::DivisionHelper< T, U, E >::Divide( m_int, i, m_int );
return *this;
}
template < typename U > SafeInt< T, E >& operator /=( SafeInt< U, E > i )
{
details::DivisionHelper< T, U, E >::Divide( m_int, (U)i, m_int );
return *this;
}
// For addition and subtraction
// Addition
SafeInt< T, E > operator +( SafeInt< T, E > rhs ) const
{
T ret( 0 );
details::AdditionHelper< T, T, E >::Addition( m_int, (T)rhs, ret );
return SafeInt< T, E >( ret );
}
template < typename U >
SafeInt< T, E > operator +( U rhs ) const
{
T ret( 0 );
details::AdditionHelper< T, U, E >::Addition( m_int, rhs, ret );
return SafeInt< T, E >( ret );
}
//addition assignment
SafeInt< T, E >& operator +=( SafeInt< T, E > rhs )
{
details::AdditionHelper< T, T, E >::Addition( m_int, (T)rhs, m_int );
return *this;
}
template < typename U >
SafeInt< T, E >& operator +=( U rhs )
{
details::AdditionHelper< T, U, E >::Addition( m_int, rhs, m_int );
return *this;
}
template < typename U >
SafeInt< T, E >& operator +=( SafeInt< U, E > rhs )
{
details::AdditionHelper< T, U, E >::Addition( m_int, (U)rhs, m_int );
return *this;
}
// Subtraction
template < typename U >
SafeInt< T, E > operator -( U rhs ) const
{
T ret( 0 );
details::SubtractionHelper< T, U, E >::Subtract( m_int, rhs, ret );
return SafeInt< T, E >( ret );
}
SafeInt< T, E > operator -(SafeInt< T, E > rhs) const
{
T ret( 0 );
details::SubtractionHelper< T, T, E >::Subtract( m_int, (T)rhs, ret );
return SafeInt< T, E >( ret );
}
// Subtraction assignment
SafeInt< T, E >& operator -=( SafeInt< T, E > rhs )
{
details::SubtractionHelper< T, T, E >::Subtract( m_int, (T)rhs, m_int );
return *this;
}
template < typename U >
SafeInt< T, E >& operator -=( U rhs )
{
details::SubtractionHelper< T, U, E >::Subtract( m_int, rhs, m_int );
return *this;
}
template < typename U >
SafeInt< T, E >& operator -=( SafeInt< U, E > rhs )
{
details::SubtractionHelper< T, U, E >::Subtract( m_int, (U)rhs, m_int );
return *this;
}
// Comparison operators
// Additional overloads defined outside the class
// to allow for cases where the SafeInt is the rhs value
// Less than
template < typename U >
bool operator <( U rhs ) const throw()
{
return details::GreaterThanTest< U, T >::GreaterThan( rhs, m_int );
}
bool operator <( SafeInt< T, E > rhs ) const throw()
{
return m_int < (T)rhs;
}
// Greater than or eq.
template < typename U >
bool operator >=( U rhs ) const throw()
{
return !details::GreaterThanTest< U, T >::GreaterThan( rhs, m_int );
}
bool operator >=( SafeInt< T, E > rhs ) const throw()
{
return m_int >= (T)rhs;
}
// Greater than
template < typename U >
bool operator >( U rhs ) const throw()
{
return details::GreaterThanTest< T, U >::GreaterThan( m_int, rhs );
}
bool operator >( SafeInt< T, E > rhs ) const throw()
{
return m_int > (T)rhs;
}
// Less than or eq.
template < typename U >
bool operator <=( U rhs ) const throw()
{
return !details::GreaterThanTest< T, U >::GreaterThan( m_int, rhs );
}
bool operator <=( SafeInt< T, E > rhs ) const throw()
{
return m_int <= (T)rhs;
}
// Equality
template < typename U >
bool operator ==( U rhs ) const throw()
{
return details::EqualityTest< T, U >::IsEquals( m_int, rhs );
}
// Need an explicit override for type bool
bool operator ==( bool rhs ) const throw()
{
return ( m_int == 0 ? false : true ) == rhs;
}
bool operator ==( SafeInt< T, E > rhs ) const throw() { return m_int == (T)rhs; }
// != operators
template < typename U >
bool operator !=( U rhs ) const throw()
{
return !details::EqualityTest< T, U >::IsEquals( m_int, rhs );
}
bool operator !=( bool b ) const throw()
{
return ( m_int == 0 ? false : true ) != b;
}
bool operator !=( SafeInt< T, E > rhs ) const throw() { return m_int != (T)rhs; }
// Shift operators
// Note - shift operators ALWAYS return the same type as the lhs
// specific version for SafeInt< T, E > not needed -
// code path is exactly the same as for SafeInt< U, E > as rhs
// Left shift
// Also, shifting > bitcount is undefined - trap in debug (check _SAFEINT_SHIFT_ASSERT)
template < typename U >
SafeInt< T, E > operator <<( U bits ) const throw()
{
_SAFEINT_SHIFT_ASSERT( !details::IntTraits< U >::isSigned || bits >= 0 );
_SAFEINT_SHIFT_ASSERT( bits < (int)details::IntTraits< T >::bitCount );
return SafeInt< T, E >( (T)( m_int << bits ) );
}
template < typename U >
SafeInt< T, E > operator <<( SafeInt< U, E > bits ) const throw()
{
_SAFEINT_SHIFT_ASSERT( !details::IntTraits< U >::isSigned || (U)bits >= 0 );
_SAFEINT_SHIFT_ASSERT( (U)bits < (int)details::IntTraits< T >::bitCount );
return SafeInt< T, E >( (T)( m_int << (U)bits ) );
}
// Left shift assignment
template < typename U >
SafeInt< T, E >& operator <<=( U bits ) throw()
{
_SAFEINT_SHIFT_ASSERT( !details::IntTraits< U >::isSigned || bits >= 0 );
_SAFEINT_SHIFT_ASSERT( bits < (int)details::IntTraits< T >::bitCount );
m_int <<= bits;
return *this;
}
template < typename U >
SafeInt< T, E >& operator <<=( SafeInt< U, E > bits ) throw()
{
_SAFEINT_SHIFT_ASSERT( !details::IntTraits< U >::isSigned || (U)bits >= 0 );
_SAFEINT_SHIFT_ASSERT( (U)bits < (int)details::IntTraits< T >::bitCount );
m_int <<= (U)bits;
return *this;
}
// Right shift
template < typename U >
SafeInt< T, E > operator >>( U bits ) const throw()
{
_SAFEINT_SHIFT_ASSERT( !details::IntTraits< U >::isSigned || bits >= 0 );
_SAFEINT_SHIFT_ASSERT( bits < (int)details::IntTraits< T >::bitCount );
return SafeInt< T, E >( (T)( m_int >> bits ) );
}
template < typename U >
SafeInt< T, E > operator >>( SafeInt< U, E > bits ) const throw()
{
_SAFEINT_SHIFT_ASSERT( !details::IntTraits< U >::isSigned || (U)bits >= 0 );
_SAFEINT_SHIFT_ASSERT( bits < (int)details::IntTraits< T >::bitCount );
return SafeInt< T, E >( (T)(m_int >> (U)bits) );
}
// Right shift assignment
template < typename U >
SafeInt< T, E >& operator >>=( U bits ) throw()
{
_SAFEINT_SHIFT_ASSERT( !details::IntTraits< U >::isSigned || bits >= 0 );
_SAFEINT_SHIFT_ASSERT( bits < (int)details::IntTraits< T >::bitCount );
m_int >>= bits;
return *this;
}
template < typename U >
SafeInt< T, E >& operator >>=( SafeInt< U, E > bits ) throw()
{
_SAFEINT_SHIFT_ASSERT( !details::IntTraits< U >::isSigned || (U)bits >= 0 );
_SAFEINT_SHIFT_ASSERT( (U)bits < (int)details::IntTraits< T >::bitCount );
m_int >>= (U)bits;
return *this;
}
// Bitwise operators
// This only makes sense if we're dealing with the same type and size
// demand a type T, or something that fits into a type T
// Bitwise &
SafeInt< T, E > operator &( SafeInt< T, E > rhs ) const throw()
{
return SafeInt< T, E >( m_int & (T)rhs );
}
template < typename U >
SafeInt< T, E > operator &( U rhs ) const throw()
{
// we want to avoid setting bits by surprise
// consider the case of lhs = int, value = 0xffffffff
// rhs = char, value = 0xff
//
// programmer intent is to get only the lower 8 bits
// normal behavior is to upcast both sides to an int
// which then sign extends rhs, setting all the bits
// If you land in the assert, this is because the bitwise operator
// was causing unexpected behavior. Fix is to properly cast your inputs
// so that it works like you meant, not unexpectedly
return SafeInt< T, E >( details::BinaryAndHelper< T, U >::And( m_int, rhs ) );
}
// Bitwise & assignment
SafeInt< T, E >& operator &=( SafeInt< T, E > rhs ) throw()
{
m_int &= (T)rhs;
return *this;
}
template < typename U >
SafeInt< T, E >& operator &=( U rhs ) throw()
{
m_int = details::BinaryAndHelper< T, U >::And( m_int, rhs );
return *this;
}
template < typename U >
SafeInt< T, E >& operator &=( SafeInt< U, E > rhs ) throw()
{
m_int = details::BinaryAndHelper< T, U >::And( m_int, (U)rhs );
return *this;
}
// XOR
SafeInt< T, E > operator ^( SafeInt< T, E > rhs ) const throw()
{
return SafeInt< T, E >( (T)( m_int ^ (T)rhs ) );
}
template < typename U >
SafeInt< T, E > operator ^( U rhs ) const throw()
{
// If you land in the assert, this is because the bitwise operator
// was causing unexpected behavior. Fix is to properly cast your inputs
// so that it works like you meant, not unexpectedly
return SafeInt< T, E >( details::BinaryXorHelper< T, U >::Xor( m_int, rhs ) );
}
// XOR assignment
SafeInt< T, E >& operator ^=( SafeInt< T, E > rhs ) throw()
{
m_int ^= (T)rhs;
return *this;
}
template < typename U >
SafeInt< T, E >& operator ^=( U rhs ) throw()
{
m_int = details::BinaryXorHelper< T, U >::Xor( m_int, rhs );
return *this;
}
template < typename U >
SafeInt< T, E >& operator ^=( SafeInt< U, E > rhs ) throw()
{
m_int = details::BinaryXorHelper< T, U >::Xor( m_int, (U)rhs );
return *this;
}
// bitwise OR
SafeInt< T, E > operator |( SafeInt< T, E > rhs ) const throw()
{
return SafeInt< T, E >( (T)( m_int | (T)rhs ) );
}
template < typename U >
SafeInt< T, E > operator |( U rhs ) const throw()
{
return SafeInt< T, E >( details::BinaryOrHelper< T, U >::Or( m_int, rhs ) );
}
// bitwise OR assignment
SafeInt< T, E >& operator |=( SafeInt< T, E > rhs ) throw()
{
m_int |= (T)rhs;
return *this;
}
template < typename U >
SafeInt< T, E >& operator |=( U rhs ) throw()
{
m_int = details::BinaryOrHelper< T, U >::Or( m_int, rhs );
return *this;
}
template < typename U >
SafeInt< T, E >& operator |=( SafeInt< U, E > rhs ) throw()
{
m_int = details::BinaryOrHelper< T, U >::Or( m_int, (U)rhs );
return *this;
}
// Miscellaneous helper functions
SafeInt< T, E > Min( SafeInt< T, E > test, SafeInt< T, E > floor = SafeInt< T, E >( details::IntTraits< T >::minInt ) ) const throw()
{
T tmp = test < m_int ? test : m_int;
return tmp < floor ? floor : tmp;
}
SafeInt< T, E > Max( SafeInt< T, E > test, SafeInt< T, E > upper = SafeInt< T, E >( details::IntTraits< T >::maxInt ) ) const throw()
{
T tmp = test > m_int ? test : m_int;
return tmp > upper ? upper : tmp;
}
void Swap( SafeInt< T, E >& with ) throw()
{
T temp( m_int );
m_int = with.m_int;
with.m_int = temp;
}
template < int bits >
const SafeInt< T, E >& Align()
{
// Zero is always aligned
if( m_int == 0 )
return *this;
// We don't support aligning negative numbers at this time
// Can't align unsigned numbers on bitCount (e.g., 8 bits = 256, unsigned char max = 255)
// or signed numbers on bitCount-1 (e.g., 7 bits = 128, signed char max = 127).
// Also makes no sense to try to align on negative or no bits.
_SAFEINT_SHIFT_ASSERT( ( ( details::IntTraits<T>::isSigned && bits < (int)details::IntTraits< T >::bitCount - 1 )
|| ( !details::IntTraits<T>::isSigned && bits < (int)details::IntTraits< T >::bitCount ) ) &&
bits >= 0 && ( !details::IntTraits<T>::isSigned || m_int > 0 ) );
const T AlignValue = ( (T)1 << bits ) - 1;
m_int = ( m_int + AlignValue ) & ~AlignValue;
if( m_int <= 0 )
E::SafeIntOnOverflow();
return *this;
}
// Commonly needed alignments:
const SafeInt< T, E >& Align2() { return Align< 1 >(); }
const SafeInt< T, E >& Align4() { return Align< 2 >(); }
const SafeInt< T, E >& Align8() { return Align< 3 >(); }
const SafeInt< T, E >& Align16() { return Align< 4 >(); }
const SafeInt< T, E >& Align32() { return Align< 5 >(); }
const SafeInt< T, E >& Align64() { return Align< 6 >(); }
private:
T m_int;
};
// Externally defined functions for the case of U op SafeInt< T, E >
template < typename T, typename U, typename E >
bool operator <( U lhs, SafeInt< T, E > rhs ) throw()
{
return details::GreaterThanTest< T, U >::GreaterThan( (T)rhs, lhs );
}
template < typename T, typename U, typename E >
bool operator <( SafeInt< U, E > lhs, SafeInt< T, E > rhs ) throw()
{
return details::GreaterThanTest< T, U >::GreaterThan( (T)rhs, (U)lhs );
}
// Greater than
template < typename T, typename U, typename E >
bool operator >( U lhs, SafeInt< T, E > rhs ) throw()
{
return details::GreaterThanTest< U, T >::GreaterThan( lhs, (T)rhs );
}
template < typename T, typename U, typename E >
bool operator >( SafeInt< T, E > lhs, SafeInt< U, E > rhs ) throw()
{
return details::GreaterThanTest< T, U >::GreaterThan( (T)lhs, (U)rhs );
}
// Greater than or equal
template < typename T, typename U, typename E >
bool operator >=( U lhs, SafeInt< T, E > rhs ) throw()
{
return !details::GreaterThanTest< T, U >::GreaterThan( (T)rhs, lhs );
}
template < typename T, typename U, typename E >
bool operator >=( SafeInt< T, E > lhs, SafeInt< U, E > rhs ) throw()
{
return !details::GreaterThanTest< U, T >::GreaterThan( (U)rhs, (T)lhs );
}
// Less than or equal
template < typename T, typename U, typename E >
bool operator <=( U lhs, SafeInt< T, E > rhs ) throw()
{
return !details::GreaterThanTest< U, T >::GreaterThan( lhs, (T)rhs );
}
template < typename T, typename U, typename E >
bool operator <=( SafeInt< T, E > lhs, SafeInt< U, E > rhs ) throw()
{
return !details::GreaterThanTest< T, U >::GreaterThan( (T)lhs, (U)rhs );
}
// equality
// explicit overload for bool
template < typename T, typename E >
bool operator ==( bool lhs, SafeInt< T, E > rhs ) throw()
{
return lhs == ( (T)rhs == 0 ? false : true );
}
template < typename T, typename U, typename E >
bool operator ==( U lhs, SafeInt< T, E > rhs ) throw()
{
return details::EqualityTest< T, U >::IsEquals((T)rhs, lhs);
}
template < typename T, typename U, typename E >
bool operator ==( SafeInt< T, E > lhs, SafeInt< U, E > rhs ) throw()
{
return details::EqualityTest< T, U >::IsEquals( (T)lhs, (U)rhs );
}
//not equals
template < typename T, typename U, typename E >
bool operator !=( U lhs, SafeInt< T, E > rhs ) throw()
{
return !details::EqualityTest< T, U >::IsEquals( rhs, lhs );
}
template < typename T, typename E >
bool operator !=( bool lhs, SafeInt< T, E > rhs ) throw()
{
return ( (T)rhs == 0 ? false : true ) != lhs;
}
template < typename T, typename U, typename E >
bool operator !=( SafeInt< T, E > lhs, SafeInt< U, E > rhs ) throw()
{
return !details::EqualityTest< T, U >::IsEquals( lhs, rhs );
}
// Modulus
template < typename T, typename U, typename E >
SafeInt< T, E > operator %( U lhs, SafeInt< T, E > rhs )
{
// Value of return depends on sign of lhs
// This one may not be safe - bounds check in constructor
// if lhs is negative and rhs is unsigned, this will throw an exception.
// Fast-track the simple case
// same size and same sign
#pragma warning(suppress:4127 6326)
if( sizeof(T) == sizeof(U) && details::IntTraits< T >::isSigned == details::IntTraits< U >::isSigned )
{
if( rhs != 0 )
{
if( details::IntTraits< T >::isSigned && (T)rhs == -1 )
return 0;
return SafeInt< T, E >( (T)( lhs % (T)rhs ) );
}
E::SafeIntOnDivZero();
}
return SafeInt< T, E >( ( SafeInt< U, E >( lhs ) % (T)rhs ) );
}
// Multiplication
template < typename T, typename U, typename E >
SafeInt< T, E > operator *( U lhs, SafeInt< T, E > rhs )
{
T ret( 0 );
details::MultiplicationHelper< T, U, E >::Multiply( (T)rhs, lhs, ret );
return SafeInt< T, E >(ret);
}
// Division
template < typename T, typename U, typename E > SafeInt< T, E > operator /( U lhs, SafeInt< T, E > rhs )
{
#pragma warning(push)
#pragma warning(disable: 4127 4146 4307 4310 6326)
// Corner case - has to be handled separately
if( details::DivisionMethod< U, T >::method == details::DivisionState_UnsignedSigned )
{
if( (T)rhs > 0 )
return SafeInt< T, E >( lhs/(T)rhs );
// Now rhs is either negative, or zero
if( (T)rhs != 0 )
{
if( sizeof( U ) >= 4 && sizeof( T ) <= sizeof( U ) )
{
// Problem case - normal casting behavior changes meaning
// flip rhs to positive
// any operator casts now do the right thing
U tmp;
if( sizeof(T) == 4 )
tmp = lhs/(U)(unsigned __int32)( -(T)rhs );
else
tmp = lhs/(U)( -(T)rhs );
if( tmp <= details::IntTraits< T >::maxInt )
return SafeInt< T, E >( -( (T)tmp ) );
// Corner case
// Note - this warning happens because we're not using partial
// template specialization in this case. For any real cases where
// this block isn't optimized out, the warning won't be present.
if( tmp == (U)details::IntTraits< T >::maxInt + 1 )
return SafeInt< T, E >( details::IntTraits< T >::minInt );
E::SafeIntOnOverflow();
}
return SafeInt< T, E >(lhs/(T)rhs);
}
E::SafeIntOnDivZero();
} // method == DivisionState_UnsignedSigned
if( details::SafeIntCompare< T, U >::isBothSigned )
{
if( lhs == details::IntTraits< U >::minInt && (T)rhs == -1 )
{
// corner case of a corner case - lhs = min int, rhs = -1,
// but rhs is the return type, so in essence, we can return -lhs
// if rhs is a larger type than lhs
if( sizeof( U ) < sizeof( T ) )
{
return SafeInt< T, E >( (T)( -(T)details::IntTraits< U >::minInt ) );
}
// If rhs is smaller or the same size int, then -minInt won't work
E::SafeIntOnOverflow();
}
}
// Otherwise normal logic works with addition of bounds check when casting from U->T
U ret;
details::DivisionHelper< U, T, E >::Divide( lhs, (T)rhs, ret );
return SafeInt< T, E >( ret );
#pragma warning(pop)
}
// Addition
template < typename T, typename U, typename E >
SafeInt< T, E > operator +( U lhs, SafeInt< T, E > rhs )
{
T ret( 0 );
details::AdditionHelper< T, U, E >::Addition( (T)rhs, lhs, ret );
return SafeInt< T, E >( ret );
}
// Subtraction
template < typename T, typename U, typename E >
SafeInt< T, E > operator -( U lhs, SafeInt< T, E > rhs )
{
T ret( 0 );
details::SubtractionHelper< U, T, E, details::SubtractionMethod2< U, T >::method >::Subtract( lhs, rhs.Ref(), ret );
return SafeInt< T, E >( ret );
}
// Overrides designed to deal with cases where a SafeInt is assigned out
// to a normal int - this at least makes the last operation safe
// +=
template < typename T, typename U, typename E >
T& operator +=( T& lhs, SafeInt< U, E > rhs )
{
T ret( 0 );
details::AdditionHelper< T, U, E >::Addition( lhs, (U)rhs, ret );
lhs = ret;
return lhs;
}
template < typename T, typename U, typename E >
T& operator -=( T& lhs, SafeInt< U, E > rhs )
{
T ret( 0 );
details::SubtractionHelper< T, U, E >::Subtract( lhs, (U)rhs, ret );
lhs = ret;
return lhs;
}
template < typename T, typename U, typename E >
T& operator *=( T& lhs, SafeInt< U, E > rhs )
{
T ret( 0 );
details::MultiplicationHelper< T, U, E >::Multiply( lhs, (U)rhs, ret );
lhs = ret;
return lhs;
}
template < typename T, typename U, typename E >
T& operator /=( T& lhs, SafeInt< U, E > rhs )
{
T ret( 0 );
details::DivisionHelper< T, U, E >::Divide( lhs, (U)rhs, ret );
lhs = ret;
return lhs;
}
template < typename T, typename U, typename E >
T& operator %=( T& lhs, SafeInt< U, E > rhs )
{
T ret( 0 );
details::ModulusHelper< T, U, E >::Modulus( lhs, (U)rhs, ret );
lhs = ret;
return lhs;
}
template < typename T, typename U, typename E >
T& operator &=( T& lhs, SafeInt< U, E > rhs ) throw()
{
lhs = details::BinaryAndHelper< T, U >::And( lhs, (U)rhs );
return lhs;
}
template < typename T, typename U, typename E >
T& operator ^=( T& lhs, SafeInt< U, E > rhs ) throw()
{
lhs = details::BinaryXorHelper< T, U >::Xor( lhs, (U)rhs );
return lhs;
}
template < typename T, typename U, typename E >
T& operator |=( T& lhs, SafeInt< U, E > rhs ) throw()
{
lhs = details::BinaryOrHelper< T, U >::Or( lhs, (U)rhs );
return lhs;
}
template < typename T, typename U, typename E >
T& operator <<=( T& lhs, SafeInt< U, E > rhs ) throw()
{
lhs = (T)( SafeInt< T, E >( lhs ) << (U)rhs );
return lhs;
}
template < typename T, typename U, typename E >
T& operator >>=( T& lhs, SafeInt< U, E > rhs ) throw()
{
lhs = (T)( SafeInt< T, E >( lhs ) >> (U)rhs );
return lhs;
}
// Specific pointer overrides
// Note - this function makes no attempt to ensure
// that the resulting pointer is still in the buffer, only
// that no int overflows happened on the way to getting the new pointer
template < typename T, typename U, typename E >
T*& operator +=( T*& lhs, SafeInt< U, E > rhs )
{
// Cast the pointer to a number so we can do arithmetic
SafeInt< uintptr_t, E > ptr_val = reinterpret_cast< uintptr_t >( lhs );
// Check first that rhs is valid for the type of ptrdiff_t
// and that multiplying by sizeof( T ) doesn't overflow a ptrdiff_t
// Next, we need to add 2 SafeInts of different types, so unbox the ptr_diff
// Finally, cast the number back to a pointer of the correct type
lhs = reinterpret_cast< T* >( (uintptr_t)( ptr_val + (ptrdiff_t)( SafeInt< ptrdiff_t, E >( rhs ) * sizeof( T ) ) ) );
return lhs;
}
template < typename T, typename U, typename E >
T*& operator -=( T*& lhs, SafeInt< U, E > rhs )
{
// Cast the pointer to a number so we can do arithmetic
SafeInt< size_t, E > ptr_val = reinterpret_cast< uintptr_t >( lhs );
// See above for comments
lhs = reinterpret_cast< T* >( (uintptr_t)( ptr_val - (ptrdiff_t)( SafeInt< ptrdiff_t, E >( rhs ) * sizeof( T ) ) ) );
return lhs;
}
template < typename T, typename U, typename E >
T*& operator *=( T* lhs, SafeInt< U, E > rhs )
{
static_assert( details::DependentFalse< T >::value, "SafeInt<T>: This operator explicitly not supported" );
return lhs;
}
template < typename T, typename U, typename E >
T*& operator /=( T* lhs, SafeInt< U, E > rhs )
{
static_assert( details::DependentFalse< T >::value, "SafeInt<T>: This operator explicitly not supported" );
return lhs;
}
template < typename T, typename U, typename E >
T*& operator %=( T* lhs, SafeInt< U, E > rhs )
{
static_assert( details::DependentFalse< T >::value, "SafeInt<T>: This operator explicitly not supported" );
return lhs;
}
template < typename T, typename U, typename E >
T*& operator &=( T* lhs, SafeInt< U, E > rhs )
{
static_assert( details::DependentFalse< T >::value, "SafeInt<T>: This operator explicitly not supported" );
return lhs;
}
template < typename T, typename U, typename E >
T*& operator ^=( T* lhs, SafeInt< U, E > rhs )
{
static_assert( details::DependentFalse< T >::value, "SafeInt<T>: This operator explicitly not supported" );
return lhs;
}
template < typename T, typename U, typename E >
T*& operator |=( T* lhs, SafeInt< U, E > rhs )
{
static_assert( details::DependentFalse< T >::value, "SafeInt<T>: This operator explicitly not supported" );
return lhs;
}
template < typename T, typename U, typename E >
T*& operator <<=( T* lhs, SafeInt< U, E > rhs )
{
static_assert( details::DependentFalse< T >::value, "SafeInt<T>: This operator explicitly not supported" );
return lhs;
}
template < typename T, typename U, typename E >
T*& operator >>=( T* lhs, SafeInt< U, E > rhs )
{
static_assert( details::DependentFalse< T >::value, "SafeInt<T>: This operator explicitly not supported" );
return lhs;
}
// Shift operators
// NOTE - shift operators always return the type of the lhs argument
// Left shift
template < typename T, typename U, typename E >
SafeInt< U, E > operator <<( U lhs, SafeInt< T, E > bits ) throw()
{
_SAFEINT_SHIFT_ASSERT( !details::IntTraits< T >::isSigned || (T)bits >= 0 );
_SAFEINT_SHIFT_ASSERT( (T)bits < (int)details::IntTraits< U >::bitCount );
return SafeInt< U, E >( (U)( lhs << (T)bits ) );
}
// Right shift
template < typename T, typename U, typename E >
SafeInt< U, E > operator >>( U lhs, SafeInt< T, E > bits ) throw()
{
_SAFEINT_SHIFT_ASSERT( !details::IntTraits< T >::isSigned || (T)bits >= 0 );
_SAFEINT_SHIFT_ASSERT( (T)bits < (int)details::IntTraits< U >::bitCount );
return SafeInt< U, E >( (U)( lhs >> (T)bits ) );
}
// Bitwise operators
// This only makes sense if we're dealing with the same type and size
// demand a type T, or something that fits into a type T.
// Bitwise &
template < typename T, typename U, typename E >
SafeInt< T, E > operator &( U lhs, SafeInt< T, E > rhs ) throw()
{
return SafeInt< T, E >( details::BinaryAndHelper< T, U >::And( (T)rhs, lhs ) );
}
// Bitwise XOR
template < typename T, typename U, typename E >
SafeInt< T, E > operator ^( U lhs, SafeInt< T, E > rhs ) throw()
{
return SafeInt< T, E >(details::BinaryXorHelper< T, U >::Xor( (T)rhs, lhs ) );
}
// Bitwise OR
template < typename T, typename U, typename E >
SafeInt< T, E > operator |( U lhs, SafeInt< T, E > rhs ) throw()
{
return SafeInt< T, E >( details::BinaryOrHelper< T, U >::Or( (T)rhs, lhs ) );
}
} // namespace utilities
} // namespace msl
#pragma pack(pop)
| {
"pile_set_name": "Github"
} |
//===----------------------------------------------------------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is dual licensed under the MIT and the University of Illinois Open
// Source Licenses. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
// <unordered_map>
// template <class Key, class T, class Hash = hash<Key>, class Pred = equal_to<Key>,
// class Alloc = allocator<pair<const Key, T>>>
// class unordered_map
// void rehash(size_type n);
#include <unordered_map>
#include <string>
#include <cassert>
void test(const std::unordered_map<int, std::string>& c)
{
assert(c.size() == 4);
assert(c.at(1) == "one");
assert(c.at(2) == "two");
assert(c.at(3) == "three");
assert(c.at(4) == "four");
}
int main()
{
{
typedef std::unordered_map<int, std::string> C;
typedef std::pair<int, std::string> P;
P a[] =
{
P(1, "one"),
P(2, "two"),
P(3, "three"),
P(4, "four"),
P(1, "four"),
P(2, "four"),
};
C c(a, a + sizeof(a)/sizeof(a[0]));
test(c);
assert(c.bucket_count() >= 5);
c.rehash(3);
assert(c.bucket_count() == 5);
test(c);
c.max_load_factor(2);
c.rehash(3);
assert(c.bucket_count() == 3);
test(c);
c.rehash(31);
assert(c.bucket_count() == 31);
test(c);
}
}
| {
"pile_set_name": "Github"
} |
#ifndef __SYS_LOCK_H__
#define __SYS_LOCK_H__
/* dummy lock routines for single-threaded aps */
typedef int _LOCK_T;
typedef int _LOCK_RECURSIVE_T;
#include <_ansi.h>
#define __LOCK_INIT(class,lock) static int lock = 0;
#define __LOCK_INIT_RECURSIVE(class,lock) static int lock = 0;
#define __lock_init(lock) (_CAST_VOID 0)
#define __lock_init_recursive(lock) (_CAST_VOID 0)
#define __lock_close(lock) (_CAST_VOID 0)
#define __lock_close_recursive(lock) (_CAST_VOID 0)
#define __lock_acquire(lock) (_CAST_VOID 0)
#define __lock_acquire_recursive(lock) (_CAST_VOID 0)
#define __lock_try_acquire(lock) (_CAST_VOID 0)
#define __lock_try_acquire_recursive(lock) (_CAST_VOID 0)
#define __lock_release(lock) (_CAST_VOID 0)
#define __lock_release_recursive(lock) (_CAST_VOID 0)
#endif /* __SYS_LOCK_H__ */
| {
"pile_set_name": "Github"
} |
package org.orcid.persistence.constants;
public class SiteConstants {
public static final int URL_MAX_LENGTH = 2000;
public static final int URL_NAME_MAX_LENGTH = 355;
public static final int MAX_LENGTH_255 = 255;
public static final int KEYWORD_MAX_LENGTH = 100;
}
| {
"pile_set_name": "Github"
} |
/* Copyright 1986, 1989, 1990, 1995 by Abacus Research and
* Development, Inc. All rights reserved.
*/
#if !defined (OMIT_RCSID_STRINGS)
char ROMlib_rcsid_windSize[] =
"$Id: windSize.c 63 2004-12-24 18:19:43Z ctm $";
#endif
/* Forward declarations in WindowMgr.h (DO NOT DELETE THIS LINE) */
#include "rsys/common.h"
#include "QuickDraw.h"
#include "CQuickDraw.h"
#include "WindowMgr.h"
#include "EventMgr.h"
#include "OSEvent.h"
#include "ToolboxUtil.h"
#include "MemoryMgr.h"
#include "rsys/cquick.h"
#include "rsys/wind.h"
/*
* Note, the code below probably be rewritten to use XorRgn as much
* as possible and probably have only one CalcVisBehind
*/
P4(PUBLIC pascal trap, void, MoveWindow, WindowPtr, wp, INTEGER, h, INTEGER, v,
BOOLEAN, front)
{
GrafPtr gp;
RgnHandle movepart, updatepart, behindpart;
Rect r;
register WindowPeek w;
TRAPBEGIN();
w = (WindowPeek) wp;
gp = thePort;
if (WINDOW_VISIBLE_X (w))
{
SetPort (MR (wmgr_port));
ClipRect (&GD_BOUNDS (MR (TheGDevice)));
ClipAbove(w);
movepart = NewRgn();
updatepart = NewRgn();
behindpart = NewRgn();
SectRgn(PORT_CLIP_REGION (MR (wmgr_port)),
WINDOW_STRUCT_REGION (w), movepart);
#if 1
/*
* CopyBits does unaligned 32-bit reads from the source, which can
* cause it to read beyond the framebuffer in certain circumstances.
* This is a cheesy way to prevent that from happening here. A
* better fix would be either in CopyBits or to force an extra page
* after the framebuffer.
*/
{
Rect tmpr;
RgnHandle last_three_pixels;
tmpr = GD_BOUNDS (MR (TheGDevice));
tmpr.top = CW (CW (tmpr.bottom) - 1);
tmpr.left = CW (CW (tmpr.right) - 3);
last_three_pixels = NewRgn ();
RectRgn (last_three_pixels, &tmpr);
DiffRgn (movepart, last_three_pixels, movepart);
DisposeRgn (last_three_pixels);
}
#endif
CopyRgn(movepart, behindpart);
r = HxX (WINDOW_STRUCT_REGION (w), rgnBBox);
}
#if !defined(LETGCCWAIL)
else
{
movepart = 0;
updatepart = 0;
behindpart = 0;
}
#endif
#if 1
/*
* NOTE: the use of portRect below was introduced by Bill, without comment
* either here or in the rcslog. But taking it out made the MSW5.1
* Picture editting window come up in the wrong place.
* (That could be due to other inconsistencies though, like the
*/
h += CW (PORT_BOUNDS (w).left) - CW (PORT_RECT (w).left);
v += CW (PORT_BOUNDS (w).top) - CW (PORT_RECT (w).top);
#else
h += CW (PORT_BOUNDS (w).left);
v += CW (PORT_BOUNDS (w).top);
#endif
if (WINDOW_VISIBLE_X (w))
{
WRAPPER_PIXMAP_FOR_COPY (wrapper);
OffsetRect (&r, h, v);
OffsetRgn (movepart, h, v);
SectRgn (movepart, PORT_CLIP_REGION (MR (wmgr_port)), movepart);
ClipRect (&GD_BOUNDS (MR (TheGDevice)));
WRAPPER_SET_PIXMAP_X (wrapper, GD_PMAP_X (MR (TheGDevice)));
#define NEW_CLIP_HACK
#if defined(NEW_CLIP_HACK)
/*
* This hack appears to be necessary because clipping via the
* clip-region isn't enough to prevent us from reading bits that
* are outside the framebuffer. If there is unmapped memory on
* either side of the framebuffer we can eat flaming death for
* just looking at it. This appears to happen under NT4.0.
*/
{
Rect srcr, dstr;
SectRect (&HxX (WINDOW_STRUCT_REGION (w), rgnBBox),
&GD_BOUNDS (MR (TheGDevice)), &srcr);
dstr = GD_BOUNDS (MR (TheGDevice));
OffsetRect (&dstr, h, v);
SectRect (&dstr, &r, &dstr);
CopyBits (wrapper, wrapper, &srcr, &dstr, srcCopy, movepart);
}
#else
CopyBits (wrapper, wrapper,
&HxX (WINDOW_STRUCT_REGION (w), rgnBBox), &r,
srcCopy, movepart);
#endif
}
OffsetRgn (WINDOW_STRUCT_REGION (w), h, v);
OffsetRgn (WINDOW_CONT_REGION (w), h, v);
OffsetRgn (WINDOW_UPDATE_REGION (w), h, v);
OffsetRect (&PORT_BOUNDS (w), -h, -v);
if (WINDOW_VISIBLE_X (w))
{
ClipRect (&GD_BOUNDS (MR (TheGDevice)));
ClipAbove(w);
DiffRgn (WINDOW_STRUCT_REGION (w), movepart, updatepart);
SectRgn (PORT_CLIP_REGION (MR (wmgr_port)), updatepart, updatepart);
DiffRgn(behindpart, movepart, behindpart);
DiffRgn(behindpart, updatepart, behindpart);
PaintOne(w, updatepart);
PaintBehind (WINDOW_NEXT_WINDOW (w), behindpart);
CalcVisBehind(w, updatepart);
CalcVisBehind (WINDOW_NEXT_WINDOW (w), behindpart);
CalcVisBehind (WINDOW_NEXT_WINDOW (w), movepart);
DisposeRgn(movepart);
DisposeRgn(updatepart);
DisposeRgn(behindpart);
}
if (front)
SelectWindow((WindowPtr) w);
SetPort(gp);
TRAPEND();
}
P3 (PUBLIC pascal trap, void, DragWindow, WindowPtr, wp, Point, p, Rect *, rp)
{
RgnHandle rh;
LONGINT l;
EventRecord ev;
int cmddown;
Rect r;
THEPORT_SAVE_EXCURSION
(MR (wmgr_port),
{
GetOSEvent (0, &ev);
SetClip (MR (GrayRgn));
cmddown = ev.modifiers & CWC (cmdKey);
if (cmddown)
ClipAbove ((WindowPeek) wp);
rh = NewRgn ();
CopyRgn (WINDOW_STRUCT_REGION (wp), rh);
r = *rp;
if (CW (r.top) < 24)
r.top = CWC (24);
l = DragGrayRgn (rh, p, &r, &r, noConstraint, (ProcPtr) 0);
if ((uint32) l != 0x80008000)
MoveWindow(wp,
(- CW (PORT_BOUNDS (wp).left)
+ LoWord (l) + CW (PORT_RECT (wp).left)),
(- CW (PORT_BOUNDS (wp).top)
+ HiWord (l) + CW (PORT_RECT (wp).top)),
!cmddown);
DisposeRgn (rh);
});
}
#define SETUP_PORT(p) \
do \
{ \
SetPort (p); \
PenPat(gray); \
PenMode(notPatXor); \
} \
while (FALSE)
#define RESTORE_PORT(p) \
do \
{ \
PenPat(black); \
PenMode(patCopy); \
SetPort (p); \
} \
while (FALSE)
P3(PUBLIC pascal trap, LONGINT, GrowWindow, WindowPtr, w, Point, startp,
Rect *, rp)
{
EventRecord ev;
GrafPtr gp;
Point p;
Rect r;
Rect pinr;
LONGINT l;
p.h = startp.h;
p.v = startp.v;
#if 0
r.left = CW (- CW (PORT_BOUNDS (w).left));
r.top = CW (- CW (PORT_BOUNDS (w).top));
r.right = CW (CW (r.left) + RECT_WIDTH (&PORT_RECT (w)));
r.bottom = CW (CW (r.top) + RECT_HEIGHT (&PORT_RECT (w)));
#else
r.left = CW (CW (PORT_RECT (w).left) - CW (PORT_BOUNDS (w).left));
r.top = CW (CW (PORT_RECT (w).top) - CW (PORT_BOUNDS (w).top));
r.right = CW (CW (PORT_RECT (w).right) - CW (PORT_BOUNDS (w).left));
r.bottom = CW (CW (PORT_RECT (w).bottom) - CW (PORT_BOUNDS (w).top));
#endif
pinr.left = CW(CW(r.left) + CW(rp->left));
if (CW(pinr.left) <= CW(r.left) && CW(rp->left) > 0)
pinr.left = CWC(32767);
pinr.top = CW(CW(r.top) + CW(rp->top));
if (CW(pinr.top) <= CW(r.top) && CW(rp->top) > 0)
pinr.top = CWC(32767);
pinr.right = CW(CW(r.left) + CW(rp->right));
if (CW(pinr.right) <= CW(r.left) && CW(rp->right) > 0)
pinr.right = CWC(32767);
pinr.bottom = CW(CW(r.top) + CW(rp->bottom));
if (CW(pinr.bottom) <= CW(r.top) && CW(rp->bottom) > 0)
pinr.bottom = CWC(32767);
gp = thePort;
SETUP_PORT (MR ((GrafPtr) WMgrPort));
SETUP_PORT (MR (wmgr_port));
ClipRect (&GD_BOUNDS (MR (TheGDevice)));
ClipAbove((WindowPeek) w);
WINDCALL((WindowPtr) w, wGrow, (LONGINT) (long) &r);
while (!GetOSEvent(mUpMask, &ev))
{
ev.where.h = CW(ev.where.h);
ev.where.v = CW(ev.where.v);
l = PinRect (&pinr, ev.where);
ev.where.v = HiWord(l);
ev.where.h = LoWord(l);
if (p.h != ev.where.h || p.v != ev.where.v)
{
WINDCALL((WindowPtr) w, wGrow, (LONGINT) (long) &r);
r.right = CW(CW(r.right) + (ev.where.h - p.h));
r.bottom = CW(CW(r.bottom) + (ev.where.v - p.v));
WINDCALL((WindowPtr) w, wGrow, (LONGINT) (long) &r);
p.h = ev.where.h;
p.v = ev.where.v;
}
CALLDRAGHOOK();
}
WINDCALL ((WindowPtr) w, wGrow, (LONGINT) (long) &r);
RESTORE_PORT (MR ((GrafPtr) WMgrPort));
RESTORE_PORT (gp);
if (p.h != startp.h || p.v != startp.v)
/*-->*/ return(((LONGINT)(CW(r.bottom) - CW(r.top)) << 16)|
(unsigned short)(CW(r.right) - CW(r.left)));
else
return(0L);
}
/* #### speedup? bag saveold, drawnew */
P4 (PUBLIC pascal trap, void, SizeWindow, WindowPtr, w,
INTEGER, width, INTEGER, height, BOOLEAN, flag)
{
if (width || height)
{
if (WINDOW_VISIBLE_X (w))
SaveOld ((WindowPeek) w);
PORT_RECT (w).right = CW (CW (PORT_RECT (w).left) + width);
PORT_RECT (w).bottom = CW (CW (PORT_RECT (w).top) + height);
THEPORT_SAVE_EXCURSION
(MR (wmgr_port),
{
WINDCALL (w, wCalcRgns, 0);
if (WINDOW_VISIBLE_X (w))
DrawNew ((WindowPeek) w, flag);
});
}
}
| {
"pile_set_name": "Github"
} |
// CodeMirror, copyright (c) by Marijn Haverbeke and others
// Distributed under an MIT license: http://codemirror.net/LICENSE
(function(mod) {
if (typeof exports == "object" && typeof module == "object") // CommonJS
mod(require("../../lib/codemirror"), require("./xml-hint"));
else if (typeof define == "function" && define.amd) // AMD
define(["../../lib/codemirror", "./xml-hint"], mod);
else // Plain browser env
mod(CodeMirror);
})(function(CodeMirror) {
"use strict";
var langs = "ab aa af ak sq am ar an hy as av ae ay az bm ba eu be bn bh bi bs br bg my ca ch ce ny zh cv kw co cr hr cs da dv nl dz en eo et ee fo fj fi fr ff gl ka de el gn gu ht ha he hz hi ho hu ia id ie ga ig ik io is it iu ja jv kl kn kr ks kk km ki rw ky kv kg ko ku kj la lb lg li ln lo lt lu lv gv mk mg ms ml mt mi mr mh mn na nv nb nd ne ng nn no ii nr oc oj cu om or os pa pi fa pl ps pt qu rm rn ro ru sa sc sd se sm sg sr gd sn si sk sl so st es su sw ss sv ta te tg th ti bo tk tl tn to tr ts tt tw ty ug uk ur uz ve vi vo wa cy wo fy xh yi yo za zu".split(" ");
var targets = ["_blank", "_self", "_top", "_parent"];
var charsets = ["ascii", "utf-8", "utf-16", "latin1", "latin1"];
var methods = ["get", "post", "put", "delete"];
var encs = ["application/x-www-form-urlencoded", "multipart/form-data", "text/plain"];
var media = ["all", "screen", "print", "embossed", "braille", "handheld", "print", "projection", "screen", "tty", "tv", "speech",
"3d-glasses", "resolution [>][<][=] [X]", "device-aspect-ratio: X/Y", "orientation:portrait",
"orientation:landscape", "device-height: [X]", "device-width: [X]"];
var s = { attrs: {} }; // Simple tag, reused for a whole lot of tags
var data = {
a: {
attrs: {
href: null, ping: null, type: null,
media: media,
target: targets,
hreflang: langs
}
},
abbr: s,
acronym: s,
address: s,
applet: s,
area: {
attrs: {
alt: null, coords: null, href: null, target: null, ping: null,
media: media, hreflang: langs, type: null,
shape: ["default", "rect", "circle", "poly"]
}
},
article: s,
aside: s,
audio: {
attrs: {
src: null, mediagroup: null,
crossorigin: ["anonymous", "use-credentials"],
preload: ["none", "metadata", "auto"],
autoplay: ["", "autoplay"],
loop: ["", "loop"],
controls: ["", "controls"]
}
},
b: s,
base: { attrs: { href: null, target: targets } },
basefont: s,
bdi: s,
bdo: s,
big: s,
blockquote: { attrs: { cite: null } },
body: s,
br: s,
button: {
attrs: {
form: null, formaction: null, name: null, value: null,
autofocus: ["", "autofocus"],
disabled: ["", "autofocus"],
formenctype: encs,
formmethod: methods,
formnovalidate: ["", "novalidate"],
formtarget: targets,
type: ["submit", "reset", "button"]
}
},
canvas: { attrs: { width: null, height: null } },
caption: s,
center: s,
cite: s,
code: s,
col: { attrs: { span: null } },
colgroup: { attrs: { span: null } },
command: {
attrs: {
type: ["command", "checkbox", "radio"],
label: null, icon: null, radiogroup: null, command: null, title: null,
disabled: ["", "disabled"],
checked: ["", "checked"]
}
},
data: { attrs: { value: null } },
datagrid: { attrs: { disabled: ["", "disabled"], multiple: ["", "multiple"] } },
datalist: { attrs: { data: null } },
dd: s,
del: { attrs: { cite: null, datetime: null } },
details: { attrs: { open: ["", "open"] } },
dfn: s,
dir: s,
div: s,
dl: s,
dt: s,
em: s,
embed: { attrs: { src: null, type: null, width: null, height: null } },
eventsource: { attrs: { src: null } },
fieldset: { attrs: { disabled: ["", "disabled"], form: null, name: null } },
figcaption: s,
figure: s,
font: s,
footer: s,
form: {
attrs: {
action: null, name: null,
"accept-charset": charsets,
autocomplete: ["on", "off"],
enctype: encs,
method: methods,
novalidate: ["", "novalidate"],
target: targets
}
},
frame: s,
frameset: s,
h1: s, h2: s, h3: s, h4: s, h5: s, h6: s,
head: {
attrs: {},
children: ["title", "base", "link", "style", "meta", "script", "noscript", "command"]
},
header: s,
hgroup: s,
hr: s,
html: {
attrs: { manifest: null },
children: ["head", "body"]
},
i: s,
iframe: {
attrs: {
src: null, srcdoc: null, name: null, width: null, height: null,
sandbox: ["allow-top-navigation", "allow-same-origin", "allow-forms", "allow-scripts"],
seamless: ["", "seamless"]
}
},
img: {
attrs: {
alt: null, src: null, ismap: null, usemap: null, width: null, height: null,
crossorigin: ["anonymous", "use-credentials"]
}
},
input: {
attrs: {
alt: null, dirname: null, form: null, formaction: null,
height: null, list: null, max: null, maxlength: null, min: null,
name: null, pattern: null, placeholder: null, size: null, src: null,
step: null, value: null, width: null,
accept: ["audio/*", "video/*", "image/*"],
autocomplete: ["on", "off"],
autofocus: ["", "autofocus"],
checked: ["", "checked"],
disabled: ["", "disabled"],
formenctype: encs,
formmethod: methods,
formnovalidate: ["", "novalidate"],
formtarget: targets,
multiple: ["", "multiple"],
readonly: ["", "readonly"],
required: ["", "required"],
type: ["hidden", "text", "search", "tel", "url", "email", "password", "datetime", "date", "month",
"week", "time", "datetime-local", "number", "range", "color", "checkbox", "radio",
"file", "submit", "image", "reset", "button"]
}
},
ins: { attrs: { cite: null, datetime: null } },
kbd: s,
keygen: {
attrs: {
challenge: null, form: null, name: null,
autofocus: ["", "autofocus"],
disabled: ["", "disabled"],
keytype: ["RSA"]
}
},
label: { attrs: { "for": null, form: null } },
legend: s,
li: { attrs: { value: null } },
link: {
attrs: {
href: null, type: null,
hreflang: langs,
media: media,
sizes: ["all", "16x16", "16x16 32x32", "16x16 32x32 64x64"]
}
},
map: { attrs: { name: null } },
mark: s,
menu: { attrs: { label: null, type: ["list", "context", "toolbar"] } },
meta: {
attrs: {
content: null,
charset: charsets,
name: ["viewport", "application-name", "author", "description", "generator", "keywords"],
"http-equiv": ["content-language", "content-type", "default-style", "refresh"]
}
},
meter: { attrs: { value: null, min: null, low: null, high: null, max: null, optimum: null } },
nav: s,
noframes: s,
noscript: s,
object: {
attrs: {
data: null, type: null, name: null, usemap: null, form: null, width: null, height: null,
typemustmatch: ["", "typemustmatch"]
}
},
ol: { attrs: { reversed: ["", "reversed"], start: null, type: ["1", "a", "A", "i", "I"] } },
optgroup: { attrs: { disabled: ["", "disabled"], label: null } },
option: { attrs: { disabled: ["", "disabled"], label: null, selected: ["", "selected"], value: null } },
output: { attrs: { "for": null, form: null, name: null } },
p: s,
param: { attrs: { name: null, value: null } },
pre: s,
progress: { attrs: { value: null, max: null } },
q: { attrs: { cite: null } },
rp: s,
rt: s,
ruby: s,
s: s,
samp: s,
script: {
attrs: {
type: ["text/javascript"],
src: null,
async: ["", "async"],
defer: ["", "defer"],
charset: charsets
}
},
section: s,
select: {
attrs: {
form: null, name: null, size: null,
autofocus: ["", "autofocus"],
disabled: ["", "disabled"],
multiple: ["", "multiple"]
}
},
small: s,
source: { attrs: { src: null, type: null, media: null } },
span: s,
strike: s,
strong: s,
style: {
attrs: {
type: ["text/css"],
media: media,
scoped: null
}
},
sub: s,
summary: s,
sup: s,
table: s,
tbody: s,
td: { attrs: { colspan: null, rowspan: null, headers: null } },
textarea: {
attrs: {
dirname: null, form: null, maxlength: null, name: null, placeholder: null,
rows: null, cols: null,
autofocus: ["", "autofocus"],
disabled: ["", "disabled"],
readonly: ["", "readonly"],
required: ["", "required"],
wrap: ["soft", "hard"]
}
},
tfoot: s,
th: { attrs: { colspan: null, rowspan: null, headers: null, scope: ["row", "col", "rowgroup", "colgroup"] } },
thead: s,
time: { attrs: { datetime: null } },
title: s,
tr: s,
track: {
attrs: {
src: null, label: null, "default": null,
kind: ["subtitles", "captions", "descriptions", "chapters", "metadata"],
srclang: langs
}
},
tt: s,
u: s,
ul: s,
"var": s,
video: {
attrs: {
src: null, poster: null, width: null, height: null,
crossorigin: ["anonymous", "use-credentials"],
preload: ["auto", "metadata", "none"],
autoplay: ["", "autoplay"],
mediagroup: ["movie"],
muted: ["", "muted"],
controls: ["", "controls"]
}
},
wbr: s
};
var globalAttrs = {
accesskey: ["a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z", "0", "1", "2", "3", "4", "5", "6", "7", "8", "9"],
"class": null,
contenteditable: ["true", "false"],
contextmenu: null,
dir: ["ltr", "rtl", "auto"],
draggable: ["true", "false", "auto"],
dropzone: ["copy", "move", "link", "string:", "file:"],
hidden: ["hidden"],
id: null,
inert: ["inert"],
itemid: null,
itemprop: null,
itemref: null,
itemscope: ["itemscope"],
itemtype: null,
lang: ["en", "es"],
spellcheck: ["true", "false"],
style: null,
tabindex: ["1", "2", "3", "4", "5", "6", "7", "8", "9"],
title: null,
translate: ["yes", "no"],
onclick: null,
rel: ["stylesheet", "alternate", "author", "bookmark", "help", "license", "next", "nofollow", "noreferrer", "prefetch", "prev", "search", "tag"]
};
function populate(obj) {
for (var attr in globalAttrs) if (globalAttrs.hasOwnProperty(attr))
obj.attrs[attr] = globalAttrs[attr];
}
populate(s);
for (var tag in data) if (data.hasOwnProperty(tag) && data[tag] != s)
populate(data[tag]);
CodeMirror.htmlSchema = data;
function htmlHint(cm, options) {
var local = {schemaInfo: data};
if (options) for (var opt in options) local[opt] = options[opt];
return CodeMirror.hint.xml(cm, local);
}
CodeMirror.registerHelper("hint", "html", htmlHint);
});
| {
"pile_set_name": "Github"
} |
// Setup
const svgWidth = 1000;
const svgHeight = 700;
const svg = d3.select("#map")
.append("svg")
.attr("width", svgWidth)
.attr("height", svgHeight);
svg.append("rect")
.attr("x", 400)
.attr("y", 300)
/*
Chain another `.attr()` method to add the `"width"` attribute to the rectangle.
Give it a value of `200`.
*/
| {
"pile_set_name": "Github"
} |
package jd.gui.swing.jdgui.views.settings.panels.accountmanager;
import java.awt.event.ActionEvent;
import javax.swing.AbstractAction;
import jd.controlling.AccountController;
import jd.plugins.Account;
import jd.plugins.PluginForHost;
import jd.utils.JDUtilities;
import org.jdownloader.gui.IconKey;
import org.jdownloader.gui.translate._GUI;
import org.jdownloader.images.AbstractIcon;
public class RenewAction extends TableBarAction {
private static final long serialVersionUID = 8346982706972553448L;
public RenewAction() {
this.putValue(NAME, _GUI.T.settings_accountmanager_renew());
this.putValue(AbstractAction.SMALL_ICON, new AbstractIcon(IconKey.ICON_RENEW, ActionColumn.SIZE));
}
public void actionPerformed(ActionEvent e) {
final Account lAcc = getAccount();
if (lAcc != null) {
final PluginForHost plugin = JDUtilities.getPluginForHost(lAcc.getHoster());
final String customURL;
if (plugin == null) {
customURL = "http://" + lAcc.getHoster();
} else {
customURL = null;
}
AccountController.openAfflink(plugin, customURL, "RenewAction");
}
}
}
| {
"pile_set_name": "Github"
} |
/* Driver for Realtek RTS51xx USB card reader
* Header file
*
* Copyright(c) 2009 Realtek Semiconductor Corp. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2, or (at your option) any
* later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, see <http://www.gnu.org/licenses/>.
*
* Author:
* wwang ([email protected])
* No. 450, Shenhu Road, Suzhou Industry Park, Suzhou, China
* Maintainer:
* Edwin Rong ([email protected])
* No. 450, Shenhu Road, Suzhou Industry Park, Suzhou, China
*/
#ifndef __RTS51X_TRACE_H
#define __RTS51X_TRACE_H
#include "debug.h"
#define _MSG_TRACE
#ifdef _MSG_TRACE
static inline char *filename(char *path)
{
char *ptr;
if (path == NULL)
return NULL;
ptr = path;
while (*ptr != '\0') {
if ((*ptr == '\\') || (*ptr == '/'))
path = ptr + 1;
ptr++;
}
return path;
}
#define TRACE_RET(chip, ret) \
do { \
char *_file = filename((char *)__FILE__); \
RTS51X_DEBUGP("[%s][%s]:[%d]\n", _file, __func__, __LINE__); \
(chip)->trace_msg[(chip)->msg_idx].line = (u16)(__LINE__); \
strncpy((chip)->trace_msg[(chip)->msg_idx].func, \
__func__, MSG_FUNC_LEN-1); \
strncpy((chip)->trace_msg[(chip)->msg_idx].file, \
_file, MSG_FILE_LEN-1); \
get_current_time((chip)->trace_msg[(chip)->msg_idx].timeval_buf,\
TIME_VAL_LEN); \
(chip)->trace_msg[(chip)->msg_idx].valid = 1; \
(chip)->msg_idx++; \
if ((chip)->msg_idx >= TRACE_ITEM_CNT) { \
(chip)->msg_idx = 0; \
} \
return ret; \
} while (0)
#define TRACE_GOTO(chip, label) \
do { \
char *_file = filename((char *)__FILE__); \
RTS51X_DEBUGP("[%s][%s]:[%d]\n", _file, __func__, __LINE__); \
(chip)->trace_msg[(chip)->msg_idx].line = (u16)(__LINE__); \
strncpy((chip)->trace_msg[(chip)->msg_idx].func, \
__func__, MSG_FUNC_LEN-1); \
strncpy((chip)->trace_msg[(chip)->msg_idx].file, \
_file, MSG_FILE_LEN-1); \
get_current_time((chip)->trace_msg[(chip)->msg_idx].timeval_buf,\
TIME_VAL_LEN); \
(chip)->trace_msg[(chip)->msg_idx].valid = 1; \
(chip)->msg_idx++; \
if ((chip)->msg_idx >= TRACE_ITEM_CNT) { \
(chip)->msg_idx = 0; \
} \
goto label; \
} while (0)
#else
#define TRACE_RET(chip, ret) return (ret)
#define TRACE_GOTO(chip, label) goto label
#endif
#ifdef CONFIG_RTS5139_DEBUG
static inline void rts51x_dump(u8 *buf, int buf_len)
{
int i;
u8 tmp[16] = { 0 };
u8 *_ptr = buf;
for (i = 0; i < ((buf_len) / 16); i++) {
RTS51X_DEBUGP("%02x %02x %02x %02x %02x %02x %02x %02x "
"%02x %02x %02x %02x %02x %02x %02x %02x\n",
_ptr[0], _ptr[1], _ptr[2], _ptr[3], _ptr[4],
_ptr[5], _ptr[6], _ptr[7], _ptr[8], _ptr[9],
_ptr[10], _ptr[11], _ptr[12], _ptr[13], _ptr[14],
_ptr[15]);
_ptr += 16;
}
if ((buf_len) % 16) {
memcpy(tmp, _ptr, (buf_len) % 16);
_ptr = tmp;
RTS51X_DEBUGP("%02x %02x %02x %02x %02x %02x %02x %02x "
"%02x %02x %02x %02x %02x %02x %02x %02x\n",
_ptr[0], _ptr[1], _ptr[2], _ptr[3], _ptr[4],
_ptr[5], _ptr[6], _ptr[7], _ptr[8], _ptr[9],
_ptr[10], _ptr[11], _ptr[12], _ptr[13], _ptr[14],
_ptr[15]);
}
}
#define RTS51X_DUMP(buf, buf_len) \
rts51x_dump((u8 *)(buf), (buf_len))
#define CATCH_TRIGGER(chip) \
do { \
rts51x_ep0_write_register((chip), 0xFC31, 0x01, 0x01); \
RTS51X_DEBUGP("Catch trigger!\n"); \
} while (0)
#else
#define RTS51X_DUMP(buf, buf_len)
#define CATCH_TRIGGER(chip)
#endif
#endif /* __RTS51X_TRACE_H */
| {
"pile_set_name": "Github"
} |
ref: replSetReconfig-majority
content: |
Starting in MongoDB 4.4, |reconfig| waits until a majority of voting
replica set members install the new replica configuration before
returning success. A voting member is *any* replica set member where
:rsconf:`members[n].votes` is ``1``, including arbiters.
Replica set members propagate their replica configuration via
:ref:`heartbeats <replica-set-heartbeats>`. Whenever a member learns
of a configuration with a higher :rsconf:`version` and
:rsconf:`term`, it installs the new configuration. The
reconfiguration process has two distinct 'waiting' phases:
\1) Wait for the current configuration to be *committed* before installing the new configuration.
The "current" configuration refers to the replica configuration in
use by the primary at the time |reconfig| is issued.
.. _replSetReconfig-configuration-committed:
A configuration is committed when:
- A majority of voting replica set members have installed the
current configuration, *and*
- All writes which were :writeconcern:`"majority"` committed in
the previous configuration have also replicated to a majority in
the current configuration.
Typically, the current configuration has already been installed on
a majority of voting replica set members. However, the majority
committed writes in the previous configuration may not all be
committed in the current configuration.
:rsconf:`Delayed <members[n].slaveDelay>` members *or* members
that are :method:`lagging behind <rs.printSlaveReplicationInfo()>`
the primary can increase the time spent in this phase.
If the operation was issued with a
|reconfig-maxTimeMS| limit *and*
the operation exceeds the limit while waiting, the operation
returns an error and discard the new configuration. The
limit is *cumulative* and does not reset after proceeding to
the next phase.
\2) Wait for a majority of voting members in the new configuration to install the new configuration.
The "new" configuration refers to the replica configuration
specified to |reconfig|.
The primary installs and begins using the new replica
configuration before propagating the configuration to the
remaining replica set members. The operation only waits for a
majority of voting members to install the new configuration, and
does not require waiting for the new configuration to be
:ref:`committed <replSetReconfig-configuration-committed>`.
If the operation was issued with a |reconfig-maxTimeMS| limit
*and* the operation exceeds the limit while waiting, the operation
returns an error *but* continues using and propagating the new
configuration.
Issuing a |reconfig-force| reconfiguration
immediately installs the new configuration regardless of the
commitment status of the previous configuration. Force
reconfiguration can cause unexpected behavior, such as the
rollback of :writeconcern:`"majority"` committed write operations.
To check the commitment status of the current replica configuration,
issue :dbcommand:`replSetGetConfig` with the
:ref:`commitmentStatus <replSetGetConfig-commitmentStatus>` parameter
on the replica set :term:`primary`.
---
ref: replSetReconfig-single-node
content: |
Starting in MongoDB 4.4, |reconfig| by default allows adding or
removing no more than ``1`` :rsconf:`voting <members[n].votes>`
member at a time. For example, a new configuration can make at most
*one* of the following changes to the cluster :rsconf:`membership
<members[n].votes>`:
- Adding a new voting replica set member.
- Removing an existing voting replica set member.
- Modifying the :rsconf:`~members[n].votes` for an existing replica
set member.
To add or remove multiple voting members, issue a series of
|reconfig| operations to add or remove one member at a
time.
Issuing a |reconfig-force| reconfiguration
immediately installs the new configuration even if it adds or removes
multiple voting members. Force reconfiguration can cause unexpected
behavior, such as the rollback of :writeconcern:`"majority"`
committed write operations.
... | {
"pile_set_name": "Github"
} |
#!/usr/bin/env bash
##############################################################################
##
## Gradle start up script for UN*X
##
##############################################################################
# Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script.
DEFAULT_JVM_OPTS=""
APP_NAME="Gradle"
APP_BASE_NAME=`basename "$0"`
# Use the maximum available, or set MAX_FD != -1 to use that value.
MAX_FD="maximum"
warn ( ) {
echo "$*"
}
die ( ) {
echo
echo "$*"
echo
exit 1
}
# OS specific support (must be 'true' or 'false').
cygwin=false
msys=false
darwin=false
case "`uname`" in
CYGWIN* )
cygwin=true
;;
Darwin* )
darwin=true
;;
MINGW* )
msys=true
;;
esac
# Attempt to set APP_HOME
# Resolve links: $0 may be a link
PRG="$0"
# Need this for relative symlinks.
while [ -h "$PRG" ] ; do
ls=`ls -ld "$PRG"`
link=`expr "$ls" : '.*-> \(.*\)$'`
if expr "$link" : '/.*' > /dev/null; then
PRG="$link"
else
PRG=`dirname "$PRG"`"/$link"
fi
done
SAVED="`pwd`"
cd "`dirname \"$PRG\"`/" >/dev/null
APP_HOME="`pwd -P`"
cd "$SAVED" >/dev/null
CLASSPATH=$APP_HOME/gradle/wrapper/gradle-wrapper.jar
# Determine the Java command to use to start the JVM.
if [ -n "$JAVA_HOME" ] ; then
if [ -x "$JAVA_HOME/jre/sh/java" ] ; then
# IBM's JDK on AIX uses strange locations for the executables
JAVACMD="$JAVA_HOME/jre/sh/java"
else
JAVACMD="$JAVA_HOME/bin/java"
fi
if [ ! -x "$JAVACMD" ] ; then
die "ERROR: JAVA_HOME is set to an invalid directory: $JAVA_HOME
Please set the JAVA_HOME variable in your environment to match the
location of your Java installation."
fi
else
JAVACMD="java"
which java >/dev/null 2>&1 || die "ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH.
Please set the JAVA_HOME variable in your environment to match the
location of your Java installation."
fi
# Increase the maximum file descriptors if we can.
if [ "$cygwin" = "false" -a "$darwin" = "false" ] ; then
MAX_FD_LIMIT=`ulimit -H -n`
if [ $? -eq 0 ] ; then
if [ "$MAX_FD" = "maximum" -o "$MAX_FD" = "max" ] ; then
MAX_FD="$MAX_FD_LIMIT"
fi
ulimit -n $MAX_FD
if [ $? -ne 0 ] ; then
warn "Could not set maximum file descriptor limit: $MAX_FD"
fi
else
warn "Could not query maximum file descriptor limit: $MAX_FD_LIMIT"
fi
fi
# For Darwin, add options to specify how the application appears in the dock
if $darwin; then
GRADLE_OPTS="$GRADLE_OPTS \"-Xdock:name=$APP_NAME\" \"-Xdock:icon=$APP_HOME/media/gradle.icns\""
fi
# For Cygwin, switch paths to Windows format before running java
if $cygwin ; then
APP_HOME=`cygpath --path --mixed "$APP_HOME"`
CLASSPATH=`cygpath --path --mixed "$CLASSPATH"`
JAVACMD=`cygpath --unix "$JAVACMD"`
# We build the pattern for arguments to be converted via cygpath
ROOTDIRSRAW=`find -L / -maxdepth 1 -mindepth 1 -type d 2>/dev/null`
SEP=""
for dir in $ROOTDIRSRAW ; do
ROOTDIRS="$ROOTDIRS$SEP$dir"
SEP="|"
done
OURCYGPATTERN="(^($ROOTDIRS))"
# Add a user-defined pattern to the cygpath arguments
if [ "$GRADLE_CYGPATTERN" != "" ] ; then
OURCYGPATTERN="$OURCYGPATTERN|($GRADLE_CYGPATTERN)"
fi
# Now convert the arguments - kludge to limit ourselves to /bin/sh
i=0
for arg in "$@" ; do
CHECK=`echo "$arg"|egrep -c "$OURCYGPATTERN" -`
CHECK2=`echo "$arg"|egrep -c "^-"` ### Determine if an option
if [ $CHECK -ne 0 ] && [ $CHECK2 -eq 0 ] ; then ### Added a condition
eval `echo args$i`=`cygpath --path --ignore --mixed "$arg"`
else
eval `echo args$i`="\"$arg\""
fi
i=$((i+1))
done
case $i in
(0) set -- ;;
(1) set -- "$args0" ;;
(2) set -- "$args0" "$args1" ;;
(3) set -- "$args0" "$args1" "$args2" ;;
(4) set -- "$args0" "$args1" "$args2" "$args3" ;;
(5) set -- "$args0" "$args1" "$args2" "$args3" "$args4" ;;
(6) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" ;;
(7) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" ;;
(8) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" "$args7" ;;
(9) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" "$args7" "$args8" ;;
esac
fi
# Split up the JVM_OPTS And GRADLE_OPTS values into an array, following the shell quoting and substitution rules
function splitJvmOpts() {
JVM_OPTS=("$@")
}
eval splitJvmOpts $DEFAULT_JVM_OPTS $JAVA_OPTS $GRADLE_OPTS
JVM_OPTS[${#JVM_OPTS[*]}]="-Dorg.gradle.appname=$APP_BASE_NAME"
exec "$JAVACMD" "${JVM_OPTS[@]}" -classpath "$CLASSPATH" org.gradle.wrapper.GradleWrapperMain "$@"
| {
"pile_set_name": "Github"
} |
/*
* This definitions of the PIC18F2523 MCU.
*
* This file is part of the GNU PIC library for SDCC, originally
* created by Molnar Karoly <[email protected]> 2016.
*
* This file is generated automatically by the cinc2h.pl, 2016-04-13 17:23:44 UTC.
*
* SDCC is licensed under the GNU Public license (GPL) v2. Note that
* this license covers the code to the compiler and other executables,
* but explicitly does not cover any code or objects generated by sdcc.
*
* For pic device libraries and header files which are derived from
* Microchip header (.inc) and linker script (.lkr) files Microchip
* requires that "The header files should state that they are only to be
* used with authentic Microchip devices" which makes them incompatible
* with the GPL. Pic device libraries and header files are located at
* non-free/lib and non-free/include directories respectively.
* Sdcc should be run with the --use-non-free command line option in
* order to include non-free header files and libraries.
*
* See http://sdcc.sourceforge.net/ for the latest information on sdcc.
*/
#include <pic18f2523.h>
//==============================================================================
__at(0x0F80) __sfr PORTA;
__at(0x0F80) volatile __PORTAbits_t PORTAbits;
__at(0x0F81) __sfr PORTB;
__at(0x0F81) volatile __PORTBbits_t PORTBbits;
__at(0x0F82) __sfr PORTC;
__at(0x0F82) volatile __PORTCbits_t PORTCbits;
__at(0x0F84) __sfr PORTE;
__at(0x0F84) volatile __PORTEbits_t PORTEbits;
__at(0x0F89) __sfr LATA;
__at(0x0F89) volatile __LATAbits_t LATAbits;
__at(0x0F8A) __sfr LATB;
__at(0x0F8A) volatile __LATBbits_t LATBbits;
__at(0x0F8B) __sfr LATC;
__at(0x0F8B) volatile __LATCbits_t LATCbits;
__at(0x0F92) __sfr DDRA;
__at(0x0F92) volatile __DDRAbits_t DDRAbits;
__at(0x0F92) __sfr TRISA;
__at(0x0F92) volatile __TRISAbits_t TRISAbits;
__at(0x0F93) __sfr DDRB;
__at(0x0F93) volatile __DDRBbits_t DDRBbits;
__at(0x0F93) __sfr TRISB;
__at(0x0F93) volatile __TRISBbits_t TRISBbits;
__at(0x0F94) __sfr DDRC;
__at(0x0F94) volatile __DDRCbits_t DDRCbits;
__at(0x0F94) __sfr TRISC;
__at(0x0F94) volatile __TRISCbits_t TRISCbits;
__at(0x0F9B) __sfr OSCTUNE;
__at(0x0F9B) volatile __OSCTUNEbits_t OSCTUNEbits;
__at(0x0F9D) __sfr PIE1;
__at(0x0F9D) volatile __PIE1bits_t PIE1bits;
__at(0x0F9E) __sfr PIR1;
__at(0x0F9E) volatile __PIR1bits_t PIR1bits;
__at(0x0F9F) __sfr IPR1;
__at(0x0F9F) volatile __IPR1bits_t IPR1bits;
__at(0x0FA0) __sfr PIE2;
__at(0x0FA0) volatile __PIE2bits_t PIE2bits;
__at(0x0FA1) __sfr PIR2;
__at(0x0FA1) volatile __PIR2bits_t PIR2bits;
__at(0x0FA2) __sfr IPR2;
__at(0x0FA2) volatile __IPR2bits_t IPR2bits;
__at(0x0FA6) __sfr EECON1;
__at(0x0FA6) volatile __EECON1bits_t EECON1bits;
__at(0x0FA7) __sfr EECON2;
__at(0x0FA8) __sfr EEDATA;
__at(0x0FA9) __sfr EEADR;
__at(0x0FAB) __sfr RCSTA;
__at(0x0FAB) volatile __RCSTAbits_t RCSTAbits;
__at(0x0FAB) __sfr RCSTA1;
__at(0x0FAB) volatile __RCSTA1bits_t RCSTA1bits;
__at(0x0FAC) __sfr TXSTA;
__at(0x0FAC) volatile __TXSTAbits_t TXSTAbits;
__at(0x0FAC) __sfr TXSTA1;
__at(0x0FAC) volatile __TXSTA1bits_t TXSTA1bits;
__at(0x0FAD) __sfr TXREG;
__at(0x0FAD) __sfr TXREG1;
__at(0x0FAE) __sfr RCREG;
__at(0x0FAE) __sfr RCREG1;
__at(0x0FAF) __sfr SPBRG;
__at(0x0FAF) __sfr SPBTG1;
__at(0x0FB0) __sfr SPBRGH;
__at(0x0FB1) __sfr T3CON;
__at(0x0FB1) volatile __T3CONbits_t T3CONbits;
__at(0x0FB2) __sfr TMR3;
__at(0x0FB2) __sfr TMR3L;
__at(0x0FB3) __sfr TMR3H;
__at(0x0FB4) __sfr CMCON;
__at(0x0FB4) volatile __CMCONbits_t CMCONbits;
__at(0x0FB5) __sfr CVRCON;
__at(0x0FB5) volatile __CVRCONbits_t CVRCONbits;
__at(0x0FB6) __sfr ECCP1AS;
__at(0x0FB6) volatile __ECCP1ASbits_t ECCP1ASbits;
__at(0x0FB6) __sfr ECCPAS;
__at(0x0FB6) volatile __ECCPASbits_t ECCPASbits;
__at(0x0FB7) __sfr ECCP1DEL;
__at(0x0FB7) volatile __ECCP1DELbits_t ECCP1DELbits;
__at(0x0FB7) __sfr PWM1CON;
__at(0x0FB7) volatile __PWM1CONbits_t PWM1CONbits;
__at(0x0FB8) __sfr BAUDCON;
__at(0x0FB8) volatile __BAUDCONbits_t BAUDCONbits;
__at(0x0FB8) __sfr BAUDCTL;
__at(0x0FB8) volatile __BAUDCTLbits_t BAUDCTLbits;
__at(0x0FBA) __sfr CCP2CON;
__at(0x0FBA) volatile __CCP2CONbits_t CCP2CONbits;
__at(0x0FBB) __sfr CCPR2;
__at(0x0FBB) __sfr CCPR2L;
__at(0x0FBC) __sfr CCPR2H;
__at(0x0FBD) __sfr CCP1CON;
__at(0x0FBD) volatile __CCP1CONbits_t CCP1CONbits;
__at(0x0FBE) __sfr CCPR1;
__at(0x0FBE) __sfr CCPR1L;
__at(0x0FBF) __sfr CCPR1H;
__at(0x0FC0) __sfr ADCON2;
__at(0x0FC0) volatile __ADCON2bits_t ADCON2bits;
__at(0x0FC1) __sfr ADCON1;
__at(0x0FC1) volatile __ADCON1bits_t ADCON1bits;
__at(0x0FC2) __sfr ADCON0;
__at(0x0FC2) volatile __ADCON0bits_t ADCON0bits;
__at(0x0FC3) __sfr ADRES;
__at(0x0FC3) __sfr ADRESL;
__at(0x0FC4) __sfr ADRESH;
__at(0x0FC5) __sfr SSPCON2;
__at(0x0FC5) volatile __SSPCON2bits_t SSPCON2bits;
__at(0x0FC6) __sfr SSPCON1;
__at(0x0FC6) volatile __SSPCON1bits_t SSPCON1bits;
__at(0x0FC7) __sfr SSPSTAT;
__at(0x0FC7) volatile __SSPSTATbits_t SSPSTATbits;
__at(0x0FC8) __sfr SSPADD;
__at(0x0FC9) __sfr SSPBUF;
__at(0x0FCA) __sfr T2CON;
__at(0x0FCA) volatile __T2CONbits_t T2CONbits;
__at(0x0FCB) __sfr PR2;
__at(0x0FCC) __sfr TMR2;
__at(0x0FCD) __sfr T1CON;
__at(0x0FCD) volatile __T1CONbits_t T1CONbits;
__at(0x0FCE) __sfr TMR1;
__at(0x0FCE) __sfr TMR1L;
__at(0x0FCF) __sfr TMR1H;
__at(0x0FD0) __sfr RCON;
__at(0x0FD0) volatile __RCONbits_t RCONbits;
__at(0x0FD1) __sfr WDTCON;
__at(0x0FD1) volatile __WDTCONbits_t WDTCONbits;
__at(0x0FD2) __sfr HLVDCON;
__at(0x0FD2) volatile __HLVDCONbits_t HLVDCONbits;
__at(0x0FD2) __sfr LVDCON;
__at(0x0FD2) volatile __LVDCONbits_t LVDCONbits;
__at(0x0FD3) __sfr OSCCON;
__at(0x0FD3) volatile __OSCCONbits_t OSCCONbits;
__at(0x0FD5) __sfr T0CON;
__at(0x0FD5) volatile __T0CONbits_t T0CONbits;
__at(0x0FD6) __sfr TMR0;
__at(0x0FD6) __sfr TMR0L;
__at(0x0FD7) __sfr TMR0H;
__at(0x0FD8) __sfr STATUS;
__at(0x0FD8) volatile __STATUSbits_t STATUSbits;
__at(0x0FD9) __sfr FSR2L;
__at(0x0FDA) __sfr FSR2H;
__at(0x0FDB) __sfr PLUSW2;
__at(0x0FDC) __sfr PREINC2;
__at(0x0FDD) __sfr POSTDEC2;
__at(0x0FDE) __sfr POSTINC2;
__at(0x0FDF) __sfr INDF2;
__at(0x0FE0) __sfr BSR;
__at(0x0FE1) __sfr FSR1L;
__at(0x0FE2) __sfr FSR1H;
__at(0x0FE3) __sfr PLUSW1;
__at(0x0FE4) __sfr PREINC1;
__at(0x0FE5) __sfr POSTDEC1;
__at(0x0FE6) __sfr POSTINC1;
__at(0x0FE7) __sfr INDF1;
__at(0x0FE8) __sfr WREG;
__at(0x0FE9) __sfr FSR0L;
__at(0x0FEA) __sfr FSR0H;
__at(0x0FEB) __sfr PLUSW0;
__at(0x0FEC) __sfr PREINC0;
__at(0x0FED) __sfr POSTDEC0;
__at(0x0FEE) __sfr POSTINC0;
__at(0x0FEF) __sfr INDF0;
__at(0x0FF0) __sfr INTCON3;
__at(0x0FF0) volatile __INTCON3bits_t INTCON3bits;
__at(0x0FF1) __sfr INTCON2;
__at(0x0FF1) volatile __INTCON2bits_t INTCON2bits;
__at(0x0FF2) __sfr INTCON;
__at(0x0FF2) volatile __INTCONbits_t INTCONbits;
__at(0x0FF3) __sfr PROD;
__at(0x0FF3) __sfr PRODL;
__at(0x0FF4) __sfr PRODH;
__at(0x0FF5) __sfr TABLAT;
__at(0x0FF6) __sfr TBLPTR;
__at(0x0FF6) __sfr TBLPTRL;
__at(0x0FF7) __sfr TBLPTRH;
__at(0x0FF8) __sfr TBLPTRU;
__at(0x0FF9) __sfr PC;
__at(0x0FF9) __sfr PCL;
__at(0x0FFA) __sfr PCLATH;
__at(0x0FFB) __sfr PCLATU;
__at(0x0FFC) __sfr STKPTR;
__at(0x0FFC) volatile __STKPTRbits_t STKPTRbits;
__at(0x0FFD) __sfr TOS;
__at(0x0FFD) __sfr TOSL;
__at(0x0FFE) __sfr TOSH;
__at(0x0FFF) __sfr TOSU;
| {
"pile_set_name": "Github"
} |
We define a harmonious array is an array where the difference between its maximum value and its minimum value is exactly 1.
Now, given an integer array, you need to find the length of its longest harmonious subsequence among all its possible subsequences.
Example 1:
Input: [1,3,2,2,5,2,3,7]
Output: 5
Explanation: The longest harmonious subsequence is [3,2,2,2,3].
Note:
The length of the input array will not exceed 20,000.
| {
"pile_set_name": "Github"
} |
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_KRONECKER_PRODUCT_MODULE_H
#define EIGEN_KRONECKER_PRODUCT_MODULE_H
#include "../../Eigen/Core"
#include "../../Eigen/src/Core/util/DisableStupidWarnings.h"
#include "../../Eigen/src/SparseCore/SparseUtil.h"
namespace Eigen {
/**
* \defgroup KroneckerProduct_Module KroneckerProduct module
*
* This module contains an experimental Kronecker product implementation.
*
* \code
* #include <Eigen/KroneckerProduct>
* \endcode
*/
} // namespace Eigen
#include "src/KroneckerProduct/KroneckerTensorProduct.h"
#include "../../Eigen/src/Core/util/ReenableStupidWarnings.h"
#endif // EIGEN_KRONECKER_PRODUCT_MODULE_H
| {
"pile_set_name": "Github"
} |
<?php
namespace AlgoliaSearch\Tests;
use AlgoliaSearch\AlgoliaException;
use AlgoliaSearch\Client;
use AlgoliaSearch\Index;
class RulesTest extends AlgoliaSearchTestCase
{
/** @var Client */
private $client;
/** @var Index */
private $index;
protected function setUp()
{
$this->client = new Client(getenv('ALGOLIA_APPLICATION_ID'), getenv('ALGOLIA_API_KEY'));
$this->index = $this->client->initIndex($this->safe_name('àlgol?à-php'));
$this->index->clearIndex();
$this->index->clearRules();
}
protected function tearDown()
{
try {
$this->client->deleteIndex($this->safe_name('àlgol?à-php'));
} catch (AlgoliaException $e) {
}
}
public function testSaveAndGetRule()
{
$rule = $this->getRuleStub();
$response = $this->index->saveRule('my-rule', $rule);
$this->index->waitTask($response['taskID']);
$this->assertEquals($rule, $this->index->getRule('my-rule'));
}
/**
* @depends testSaveAndGetRule
* @expectedException \AlgoliaSearch\AlgoliaException
* @expectedExceptionMessage ObjectID does not exist
*/
public function testDeleteRule()
{
$rule = $this->getRuleStub();
$response = $this->index->saveRule('my-rule', $rule);
$this->index->waitTask($response['taskID']);
$response = $this->index->deleteRule('my-rule');
$this->index->waitTask($response['taskID']);
$this->index->getRule('my-rule');
}
/**
* @depends testSaveAndGetRule
*/
public function testSearchRules()
{
$rule = $this->getRuleStub();
$rule2 = $this->getRuleStub('my-second-rule');
$response = $this->index->saveRule('my-rule', $rule);
$this->index->waitTask($response['taskID']);
$response = $this->index->saveRule('my-second-rule', $rule2);
$this->index->waitTask($response['taskID']);
$rules = $this->index->searchRules();
$this->assertEquals(2, $rules['nbHits']);
}
/**
* @depends testSaveAndGetRule
* @depends testSearchRules
*/
public function testBatchAndClearRules()
{
$rule = $this->getRuleStub();
$rule2 = $this->getRuleStub('my-second-rule');
$response = $this->index->batchRules(array($rule, $rule2));
$this->index->waitTask($response['taskID']);
$this->assertEquals($rule, $this->index->getRule('my-rule'));
$this->assertEquals($rule2, $this->index->getRule('my-second-rule'));
$response = $this->index->clearRules();
$this->index->waitTask($response['taskID']);
$rules = $this->index->searchRules();
$this->assertEquals(0, $rules['nbHits']);
}
/**
* @depends testBatchAndClearRules
*/
public function testBatchClearExisting()
{
$rule = $this->getRuleStub();
$rule2 = $this->getRuleStub('my-second-rule');
$rule3 = $this->getRuleStub('my-second-rule-3');
$rule4 = $this->getRuleStub('my-second-rule-4');
$response = $this->index->batchRules(array($rule, $rule2));
$this->index->waitTask($response['taskID']);
$response = $this->index->batchRules(array($rule3, $rule4), false, true);
$this->index->waitTask($response['taskID']);
$rules = $this->index->searchRules();
$this->assertEquals(2, $rules['nbHits']);
unset($rules['hits'][0]['_highlightResult']);
unset($rules['hits'][1]['_highlightResult']);
$this->assertEquals(array($rule4, $rule3), $rules['hits']);
}
private function getRuleStub($objectID = 'my-rule')
{
return $rule = array(
'objectID' => $objectID,
'condition' => array(
'pattern' => 'some text',
'anchoring' => 'is'
),
'consequence' => array(
'params' => array(
'query' => 'other text'
)
)
);
}
}
| {
"pile_set_name": "Github"
} |
// @flow strict
import { WidgetActions } from 'views/stores/WidgetStore';
import AggregationWidgetConfig from 'views/logic/aggregationbuilder/AggregationWidgetConfig';
import Widget from 'views/logic/widgets/Widget';
import AggregationWidget from 'views/logic/aggregationbuilder/AggregationWidget';
import Series from 'views/logic/aggregationbuilder/Series';
import { TitlesActions, TitleTypes } from 'views/stores/TitlesStore';
import type { FieldActionHandler } from './FieldActionHandler';
import duplicateCommonWidgetSettings from './DuplicateCommonWidgetSettings';
const NUMERIC_FIELD_SERIES = ['count', 'sum', 'avg', 'min', 'max', 'stddev', 'variance', 'card', 'percentile'];
const NONNUMERIC_FIELD_SERIES = ['count', 'card'];
const handler: FieldActionHandler = ({ field, type, contexts: { widget: origWidget = Widget.empty() } }) => {
const series = ((type && type.isNumeric()) ? NUMERIC_FIELD_SERIES : NONNUMERIC_FIELD_SERIES)
.map((f) => {
if (f === 'percentile') {
return `${f}(${field},95)`;
}
return `${f}(${field})`;
})
.map(Series.forFunction);
const config = AggregationWidgetConfig.builder()
.series(series)
.visualization('table')
.rollup(true)
.build();
const widgetBuilder = AggregationWidget.builder()
.newId()
.config(config);
const widget = duplicateCommonWidgetSettings(widgetBuilder, origWidget).build();
return WidgetActions.create(widget).then((newWidget) => TitlesActions.set(TitleTypes.Widget, newWidget.id, `Field Statistics for ${field}`));
};
export default handler;
| {
"pile_set_name": "Github"
} |
Royal Oak Mines Inc is reshaping its operations to concentrate on its low-cost gold deposits in the Timmins area and British Columbia, the company president said Wednesday.
"We have refocused our strategy to bring on very low-cost production," Peggy Witte said in an analysts' conference call. "It's very clear that's where Royal Oak's future lies."
The Kirkland, Washington-based gold producer said Wednesday it will close down its mine in Hope Brook, Newfoundland, and write down the reserves at its Colomac mine in the Northwest Territories.
Royal Oak expects to take C$37.4 million in charges because of the Hope Brook and Colomac decisions. After tax, the charges will amount to a decrease in net income of about C$27 million or C$0.19 a share in the fourth quarter of 1996.
The company plans to move most of the mining and mill assets from Hope Brook to its Matachewan project in northern Ontario.
"It's a very positive story for us being able to redeploy those assets, and we did not want to spread our senior management too thin," Witte said, explaining the decision.
Hope Brook will shut down in the third quarter of 1997, while Matachewan is expected to start production in the second half of 1998, Royal Oak said.
The company recently increased its gold reserves at its Timmins and Matachewan properties, decided to expand its Pamour Mill and is putting together a feasibility study for an expanded open pit in the area.
The northern Ontario mines, along with production expected in 1998 from the Kemess copper and gold mine in British Columbia, will boost production and decrease costs significantly for Royal Oak, said chief financial officer Jim Wood.
The company expects to produce 375,000 ounces at a cash costs of US$325 an ounce in 1997. In 1998, production will rise to 500,000 ounces at a cost of US$250 or US$260 an ounce. By 1999, the company expects up to 750,000 ounces at about US$240 an ounce, and in 2000, production should be almost one million ounces with costs in the low US$200s, Wood said.
"Although we have closed out Hope Brook cash costs are somewhat disappointing at Colomac, the future of the company, with the Pamour expansion, looks very bright for us in terms of production and our average cash costs," he said.
The company reported third quarter profits of C$10.2 million or C$0.07 a share, up from C$6.2 million or C$0.05 a share a year earlier.
Gold production rose 13 percent in the quarter to 104,012 ounces from 92,159 ounces during the third quarter last year.
The increase came from the Pamour mine in Northern Ontario, which produced higher grade ore.
Revenue was 48 percent higher in the third quarter, mainly because of a successful hedging program. The company realized a gold price of US$543 per ounce, compared to US$418 an ounce at the same time last year.
Royal Oak expects to produce about 400,000 ounces of gold in 1996 at a cash cost of US$335 an ounce. The lower production level and increase in cash cost from previous estimates are a result of lower grade ore at Colomac.
-- Reuters Toronto Bureau 416 941-8100
| {
"pile_set_name": "Github"
} |
--------------------------------
-- @module TableViewCell
-- @extend Node
--------------------------------
-- @function [parent=#TableViewCell] reset
-- @param self
--------------------------------
-- @function [parent=#TableViewCell] getIdx
-- @param self
-- @return long#long ret (return value: long)
--------------------------------
-- @function [parent=#TableViewCell] setIdx
-- @param self
-- @param #long long
--------------------------------
-- @function [parent=#TableViewCell] create
-- @param self
-- @return TableViewCell#TableViewCell ret (return value: cc.TableViewCell)
--------------------------------
-- @function [parent=#TableViewCell] TableViewCell
-- @param self
return nil
| {
"pile_set_name": "Github"
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.