unique_id
int64 13
189k
| target
int64 0
1
| code
stringlengths 20
241k
| __index_level_0__
int64 0
18.9k
|
---|---|---|---|
81,878 | 0 | int wc_ecc_export_x963_ex(ecc_key* key, byte* out, word32* outLen,
int compressed)
{
if (compressed == 0)
return wc_ecc_export_x963(key, out, outLen);
#ifdef HAVE_COMP_KEY
else
return wc_ecc_export_x963_compressed(key, out, outLen);
#else
return NOT_COMPILED_IN;
#endif
}
| 13,900 |
57,886 | 0 | void sync_mm_rss(struct mm_struct *mm)
{
int i;
for (i = 0; i < NR_MM_COUNTERS; i++) {
if (current->rss_stat.count[i]) {
add_mm_counter(mm, i, current->rss_stat.count[i]);
current->rss_stat.count[i] = 0;
}
}
current->rss_stat.events = 0;
}
| 13,901 |
37,326 | 0 | struct sctp_chunkhdr *sctp_inq_peek(struct sctp_inq *queue)
{
struct sctp_chunk *chunk;
sctp_chunkhdr_t *ch = NULL;
chunk = queue->in_progress;
/* If there is no more chunks in this packet, say so */
if (chunk->singleton ||
chunk->end_of_packet ||
chunk->pdiscard)
return NULL;
ch = (sctp_chunkhdr_t *)chunk->chunk_end;
return ch;
}
| 13,902 |
135,487 | 0 | MockSyncService() {}
| 13,903 |
100,313 | 0 | static std::string WrapWithTH(std::string text) {
return "<th>" + text + "</th>";
}
| 13,904 |
60,734 | 0 | dup_dh_params(const DH *src)
{
const BIGNUM *oldp, *oldq, *oldg;
BIGNUM *p = NULL, *q = NULL, *g = NULL;
DH *dh;
DH_get0_pqg(src, &oldp, &oldq, &oldg);
p = BN_dup(oldp);
q = BN_dup(oldq);
g = BN_dup(oldg);
dh = DH_new();
if (p == NULL || q == NULL || g == NULL || dh == NULL) {
BN_free(p);
BN_free(q);
BN_free(g);
DH_free(dh);
return NULL;
}
DH_set0_pqg(dh, p, q, g);
return dh;
}
| 13,905 |
109,953 | 0 | void H264DPB::GetShortTermRefPicsAppending(H264Picture::PtrVector& out) {
for (size_t i = 0; i < pics_.size(); ++i) {
H264Picture* pic = pics_[i];
if (pic->ref && !pic->long_term)
out.push_back(pic);
}
}
| 13,906 |
86,760 | 0 | TIFFOpenW(const wchar_t* name, const char* mode)
{
static const char module[] = "TIFFOpenW";
int m, fd;
int mbsize;
char *mbname;
TIFF* tif;
m = _TIFFgetMode(mode, module);
if (m == -1)
return ((TIFF*)0);
/* for cygwin and mingw */
#ifdef O_BINARY
m |= O_BINARY;
#endif
fd = _wopen(name, m, 0666);
if (fd < 0) {
TIFFErrorExt(0, module, "%s: Cannot open", name);
return ((TIFF *)0);
}
mbname = NULL;
mbsize = WideCharToMultiByte(CP_ACP, 0, name, -1, NULL, 0, NULL, NULL);
if (mbsize > 0) {
mbname = _TIFFmalloc(mbsize);
if (!mbname) {
TIFFErrorExt(0, module,
"Can't allocate space for filename conversion buffer");
return ((TIFF*)0);
}
WideCharToMultiByte(CP_ACP, 0, name, -1, mbname, mbsize,
NULL, NULL);
}
tif = TIFFFdOpen((int)fd, (mbname != NULL) ? mbname : "<unknown>",
mode);
_TIFFfree(mbname);
if(!tif)
close(fd);
return tif;
}
| 13,907 |
39,297 | 0 | static int security_sid_to_context_core(u32 sid, char **scontext,
u32 *scontext_len, int force)
{
struct context *context;
int rc = 0;
if (scontext)
*scontext = NULL;
*scontext_len = 0;
if (!ss_initialized) {
if (sid <= SECINITSID_NUM) {
char *scontextp;
*scontext_len = strlen(initial_sid_to_string[sid]) + 1;
if (!scontext)
goto out;
scontextp = kmalloc(*scontext_len, GFP_ATOMIC);
if (!scontextp) {
rc = -ENOMEM;
goto out;
}
strcpy(scontextp, initial_sid_to_string[sid]);
*scontext = scontextp;
goto out;
}
printk(KERN_ERR "SELinux: %s: called before initial "
"load_policy on unknown SID %d\n", __func__, sid);
rc = -EINVAL;
goto out;
}
read_lock(&policy_rwlock);
if (force)
context = sidtab_search_force(&sidtab, sid);
else
context = sidtab_search(&sidtab, sid);
if (!context) {
printk(KERN_ERR "SELinux: %s: unrecognized SID %d\n",
__func__, sid);
rc = -EINVAL;
goto out_unlock;
}
rc = context_struct_to_string(context, scontext, scontext_len);
out_unlock:
read_unlock(&policy_rwlock);
out:
return rc;
}
| 13,908 |
33,860 | 0 | cib_ipc_destroy(qb_ipcs_connection_t *c)
{
cib_client_t *cib_client = qb_ipcs_context_get(c);
CRM_ASSERT(cib_client != NULL);
CRM_ASSERT(cib_client->id != NULL);
/* In case we arrive here without a call to cib_ipc_close() */
g_hash_table_remove(client_list, cib_client->id);
crm_trace("Destroying %s (%p)", cib_client->name, c);
free(cib_client->name);
free(cib_client->callback_id);
free(cib_client->id);
free(cib_client->user);
free(cib_client);
crm_trace("Freed the cib client");
if (cib_shutdown_flag) {
cib_shutdown(0);
}
}
| 13,909 |
166,355 | 0 | void SpeechRecognitionManagerImpl::StartSession(int session_id) {
DCHECK_CURRENTLY_ON(BrowserThread::IO);
if (!SessionExists(session_id))
return;
if (primary_session_id_ != kSessionIDInvalid &&
primary_session_id_ != session_id) {
AbortSession(primary_session_id_);
}
primary_session_id_ = session_id;
if (delegate_) {
delegate_->CheckRecognitionIsAllowed(
session_id,
base::BindOnce(
&SpeechRecognitionManagerImpl::RecognitionAllowedCallback,
weak_factory_.GetWeakPtr(), session_id));
}
}
| 13,910 |
21,553 | 0 | static void kernel_shutdown_prepare(enum system_states state)
{
blocking_notifier_call_chain(&reboot_notifier_list,
(state == SYSTEM_HALT)?SYS_HALT:SYS_POWER_OFF, NULL);
system_state = state;
usermodehelper_disable();
device_shutdown();
}
| 13,911 |
10,813 | 0 | int tls1_check_chain(SSL *s, X509 *x, EVP_PKEY *pk, STACK_OF(X509) *chain,
int idx)
{
int i;
int rv = 0;
int check_flags = 0, strict_mode;
CERT_PKEY *cpk = NULL;
CERT *c = s->cert;
unsigned int suiteb_flags = tls1_suiteb(s);
/* idx == -1 means checking server chains */
if (idx != -1)
{
/* idx == -2 means checking client certificate chains */
if (idx == -2)
{
cpk = c->key;
idx = cpk - c->pkeys;
}
else
cpk = c->pkeys + idx;
x = cpk->x509;
pk = cpk->privatekey;
chain = cpk->chain;
strict_mode = c->cert_flags & SSL_CERT_FLAGS_CHECK_TLS_STRICT;
/* If no cert or key, forget it */
if (!x || !pk)
goto end;
#ifdef OPENSSL_SSL_DEBUG_BROKEN_PROTOCOL
/* Allow any certificate to pass test */
if (s->cert->cert_flags & SSL_CERT_FLAG_BROKEN_PROTOCOL)
{
rv = CERT_PKEY_STRICT_FLAGS|CERT_PKEY_EXPLICIT_SIGN|CERT_PKEY_VALID|CERT_PKEY_SIGN;
cpk->valid_flags = rv;
return rv;
}
#endif
}
else
{
if (!x || !pk)
goto end;
idx = ssl_cert_type(x, pk);
if (idx == -1)
goto end;
cpk = c->pkeys + idx;
if (c->cert_flags & SSL_CERT_FLAGS_CHECK_TLS_STRICT)
check_flags = CERT_PKEY_STRICT_FLAGS;
else
check_flags = CERT_PKEY_VALID_FLAGS;
strict_mode = 1;
}
if (suiteb_flags)
{
int ok;
if (check_flags)
check_flags |= CERT_PKEY_SUITEB;
ok = X509_chain_check_suiteb(NULL, x, chain, suiteb_flags);
if (ok != X509_V_OK)
{
if (check_flags)
rv |= CERT_PKEY_SUITEB;
else
goto end;
}
}
/* Check all signature algorithms are consistent with
* signature algorithms extension if TLS 1.2 or later
* and strict mode.
*/
if (TLS1_get_version(s) >= TLS1_2_VERSION && strict_mode)
{
int default_nid;
unsigned char rsign = 0;
if (c->peer_sigalgs)
default_nid = 0;
/* If no sigalgs extension use defaults from RFC5246 */
else
{
switch(idx)
{
case SSL_PKEY_RSA_ENC:
case SSL_PKEY_RSA_SIGN:
case SSL_PKEY_DH_RSA:
rsign = TLSEXT_signature_rsa;
default_nid = NID_sha1WithRSAEncryption;
break;
case SSL_PKEY_DSA_SIGN:
case SSL_PKEY_DH_DSA:
rsign = TLSEXT_signature_dsa;
default_nid = NID_dsaWithSHA1;
break;
case SSL_PKEY_ECC:
rsign = TLSEXT_signature_ecdsa;
default_nid = NID_ecdsa_with_SHA1;
break;
default:
default_nid = -1;
break;
}
}
/* If peer sent no signature algorithms extension and we
* have set preferred signature algorithms check we support
* sha1.
*/
if (default_nid > 0 && c->conf_sigalgs)
{
size_t j;
const unsigned char *p = c->conf_sigalgs;
for (j = 0; j < c->conf_sigalgslen; j += 2, p += 2)
{
if (p[0] == TLSEXT_hash_sha1 && p[1] == rsign)
break;
}
if (j == c->conf_sigalgslen)
{
if (check_flags)
goto skip_sigs;
else
goto end;
}
}
/* Check signature algorithm of each cert in chain */
if (!tls1_check_sig_alg(c, x, default_nid))
{
if (!check_flags) goto end;
}
else
rv |= CERT_PKEY_EE_SIGNATURE;
rv |= CERT_PKEY_CA_SIGNATURE;
for (i = 0; i < sk_X509_num(chain); i++)
{
if (!tls1_check_sig_alg(c, sk_X509_value(chain, i),
default_nid))
{
if (check_flags)
{
rv &= ~CERT_PKEY_CA_SIGNATURE;
break;
}
else
goto end;
}
}
}
/* Else not TLS 1.2, so mark EE and CA signing algorithms OK */
else if(check_flags)
rv |= CERT_PKEY_EE_SIGNATURE|CERT_PKEY_CA_SIGNATURE;
skip_sigs:
/* Check cert parameters are consistent */
if (tls1_check_cert_param(s, x, check_flags ? 1 : 2))
rv |= CERT_PKEY_EE_PARAM;
else if (!check_flags)
goto end;
if (!s->server)
rv |= CERT_PKEY_CA_PARAM;
/* In strict mode check rest of chain too */
else if (strict_mode)
{
rv |= CERT_PKEY_CA_PARAM;
for (i = 0; i < sk_X509_num(chain); i++)
{
X509 *ca = sk_X509_value(chain, i);
if (!tls1_check_cert_param(s, ca, 0))
{
if (check_flags)
{
rv &= ~CERT_PKEY_CA_PARAM;
break;
}
else
goto end;
}
}
}
if (!s->server && strict_mode)
{
STACK_OF(X509_NAME) *ca_dn;
int check_type = 0;
switch (pk->type)
{
case EVP_PKEY_RSA:
check_type = TLS_CT_RSA_SIGN;
break;
case EVP_PKEY_DSA:
check_type = TLS_CT_DSS_SIGN;
break;
case EVP_PKEY_EC:
check_type = TLS_CT_ECDSA_SIGN;
break;
case EVP_PKEY_DH:
case EVP_PKEY_DHX:
{
int cert_type = X509_certificate_type(x, pk);
if (cert_type & EVP_PKS_RSA)
check_type = TLS_CT_RSA_FIXED_DH;
if (cert_type & EVP_PKS_DSA)
check_type = TLS_CT_DSS_FIXED_DH;
}
}
if (check_type)
{
const unsigned char *ctypes;
int ctypelen;
if (c->ctypes)
{
ctypes = c->ctypes;
ctypelen = (int)c->ctype_num;
}
else
{
ctypes = (unsigned char *)s->s3->tmp.ctype;
ctypelen = s->s3->tmp.ctype_num;
}
for (i = 0; i < ctypelen; i++)
{
if (ctypes[i] == check_type)
{
rv |= CERT_PKEY_CERT_TYPE;
break;
}
}
if (!(rv & CERT_PKEY_CERT_TYPE) && !check_flags)
goto end;
}
else
rv |= CERT_PKEY_CERT_TYPE;
ca_dn = s->s3->tmp.ca_names;
if (!sk_X509_NAME_num(ca_dn))
rv |= CERT_PKEY_ISSUER_NAME;
if (!(rv & CERT_PKEY_ISSUER_NAME))
{
if (ssl_check_ca_name(ca_dn, x))
rv |= CERT_PKEY_ISSUER_NAME;
}
if (!(rv & CERT_PKEY_ISSUER_NAME))
{
for (i = 0; i < sk_X509_num(chain); i++)
{
X509 *xtmp = sk_X509_value(chain, i);
if (ssl_check_ca_name(ca_dn, xtmp))
{
rv |= CERT_PKEY_ISSUER_NAME;
break;
}
}
}
if (!check_flags && !(rv & CERT_PKEY_ISSUER_NAME))
goto end;
}
else
rv |= CERT_PKEY_ISSUER_NAME|CERT_PKEY_CERT_TYPE;
if (!check_flags || (rv & check_flags) == check_flags)
rv |= CERT_PKEY_VALID;
end:
if (TLS1_get_version(s) >= TLS1_2_VERSION)
{
if (cpk->valid_flags & CERT_PKEY_EXPLICIT_SIGN)
rv |= CERT_PKEY_EXPLICIT_SIGN|CERT_PKEY_SIGN;
else if (cpk->digest)
rv |= CERT_PKEY_SIGN;
}
else
rv |= CERT_PKEY_SIGN|CERT_PKEY_EXPLICIT_SIGN;
/* When checking a CERT_PKEY structure all flags are irrelevant
* if the chain is invalid.
*/
if (!check_flags)
{
if (rv & CERT_PKEY_VALID)
cpk->valid_flags = rv;
else
{
/* Preserve explicit sign flag, clear rest */
cpk->valid_flags &= CERT_PKEY_EXPLICIT_SIGN;
return 0;
}
}
return rv;
}
| 13,912 |
107,380 | 0 | string16 GetMatchCountText() {
return GetFindBarMatchCountTextForBrowser(browser());
}
| 13,913 |
153,484 | 0 | bool TabStrip::SupportsMultipleSelection() {
return touch_layout_ == nullptr;
}
| 13,914 |
104,421 | 0 | GLvoid StubGLClearStencil(GLint s) {
glClearStencil(s);
}
| 13,915 |
76,905 | 0 | encode_STACK_PUSH(const struct ofpact_stack *stack,
enum ofp_version ofp_version OVS_UNUSED, struct ofpbuf *out)
{
encode_STACK_op(stack, put_NXAST_STACK_PUSH(out));
}
| 13,916 |
165,772 | 0 | void SVGElement::ClearAnimatedAttribute(const QualifiedName& attribute) {
ForSelfAndInstances(this, [&attribute](SVGElement* element) {
if (SVGAnimatedPropertyBase* animated_property =
element->PropertyFromAttribute(attribute))
animated_property->AnimationEnded();
});
}
| 13,917 |
149,545 | 0 | void CheckForWaitingLoop() {
switch (wait_event_) {
case WaitEvent::kNone:
return;
case WaitEvent::kDns:
if (!HasHostBeenLookedUp(waiting_on_dns_))
return;
waiting_on_dns_ = std::string();
break;
case WaitEvent::kProxy:
if (!HasProxyBeenLookedUp(waiting_on_proxy_))
return;
waiting_on_proxy_ = GURL();
break;
}
DCHECK(run_loop_);
run_loop_->Quit();
run_loop_ = nullptr;
wait_event_ = WaitEvent::kNone;
}
| 13,918 |
92,690 | 0 | static void start_cfs_slack_bandwidth(struct cfs_bandwidth *cfs_b)
{
u64 min_left = cfs_bandwidth_slack_period + min_bandwidth_expiration;
/* if there's a quota refresh soon don't bother with slack */
if (runtime_refresh_within(cfs_b, min_left))
return;
hrtimer_start(&cfs_b->slack_timer,
ns_to_ktime(cfs_bandwidth_slack_period),
HRTIMER_MODE_REL);
}
| 13,919 |
43,748 | 0 | static int set_pid_priority(pid_t pid,int policy,int priority,char *message,char *name){
struct sched_param par={0};
par.sched_priority=priority;
if((sched_setscheduler(pid,policy,&par)!=0)){
print_error(stderr,message,pid,name,strerror(errno));
return 0;
}
return 1;
}
| 13,920 |
6,621 | 0 | filter_open(const char *file_access, uint buffer_size, ref * pfile,
const stream_procs * procs, const stream_template * templat,
const stream_state * st, gs_memory_t *mem)
{
stream *s;
uint ssize = gs_struct_type_size(templat->stype);
stream_state *sst = 0;
int code;
if (templat->stype != &st_stream_state) {
sst = s_alloc_state(mem, templat->stype, "filter_open(stream_state)");
if (sst == 0)
return_error(gs_error_VMerror);
}
code = file_open_stream((char *)0, 0, file_access, buffer_size, &s,
(gx_io_device *)0, (iodev_proc_fopen_t)0, mem);
if (code < 0) {
gs_free_object(mem, sst, "filter_open(stream_state)");
return code;
}
s_std_init(s, s->cbuf, s->bsize, procs,
(*file_access == 'r' ? s_mode_read : s_mode_write));
s->procs.process = templat->process;
s->save_close = s->procs.close;
s->procs.close = file_close_file;
if (sst == 0) {
/* This stream doesn't have any state of its own. */
/* Hack: use the stream itself as the state. */
sst = (stream_state *) s;
} else if (st != 0) /* might not have client parameters */
memcpy(sst, st, ssize);
s->state = sst;
s_init_state(sst, templat, mem);
sst->report_error = filter_report_error;
if (templat->init != 0) {
code = (*templat->init)(sst);
if (code < 0) {
gs_free_object(mem, sst, "filter_open(stream_state)");
gs_free_object(mem, s->cbuf, "filter_open(buffer)");
return code;
}
}
make_stream_file(pfile, s, file_access);
return 0;
}
| 13,921 |
32,646 | 0 | static void tg3_periodic_fetch_stats(struct tg3 *tp)
{
struct tg3_hw_stats *sp = tp->hw_stats;
if (!tp->link_up)
return;
TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
if (unlikely(tg3_flag(tp, 5719_RDMA_BUG) &&
(sp->tx_ucast_packets.low + sp->tx_mcast_packets.low +
sp->tx_bcast_packets.low) > TG3_NUM_RDMA_CHANNELS)) {
u32 val;
val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
val &= ~TG3_LSO_RD_DMA_TX_LENGTH_WA;
tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
tg3_flag_clear(tp, 5719_RDMA_BUG);
}
TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
if (tg3_asic_rev(tp) != ASIC_REV_5717 &&
tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0 &&
tg3_chip_rev_id(tp) != CHIPREV_ID_5720_A0) {
TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
} else {
u32 val = tr32(HOSTCC_FLOW_ATTN);
val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0;
if (val) {
tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM);
sp->rx_discards.low += val;
if (sp->rx_discards.low < val)
sp->rx_discards.high += 1;
}
sp->mbuf_lwm_thresh_hit = sp->rx_discards;
}
TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
}
| 13,922 |
182,979 | 1 | rdpsnddbg_process(STREAM s)
{
unsigned int pkglen;
static char *rest = NULL;
char *buf;
pkglen = s->end - s->p;
/* str_handle_lines requires null terminated strings */
buf = (char *) xmalloc(pkglen + 1);
STRNCPY(buf, (char *) s->p, pkglen + 1);
str_handle_lines(buf, &rest, rdpsnddbg_line_handler, NULL);
xfree(buf);
}
| 13,923 |
126,608 | 0 | TabStripGtk::~TabStripGtk() {
model_->RemoveObserver(this);
tabstrip_.Destroy();
std::vector<TabData>::iterator iterator = tab_data_.begin();
for (; iterator < tab_data_.end(); iterator++) {
delete iterator->tab;
}
tab_data_.clear();
RemoveMessageLoopObserver();
}
| 13,924 |
160,272 | 0 | void MimeHandlerViewContainer::DidResizeElement(const gfx::Size& new_size) {
element_size_ = new_size;
CreateMimeHandlerViewGuestIfNecessary();
if (!guest_created_)
return;
render_frame()->Send(new ExtensionsGuestViewHostMsg_ResizeGuest(
render_frame()->GetRoutingID(), element_instance_id(), new_size));
}
| 13,925 |
3,332 | 0 | static uint64_t fdctrl_read_mem (void *opaque, hwaddr reg,
unsigned ize)
{
return fdctrl_read(opaque, (uint32_t)reg);
}
| 13,926 |
21,015 | 0 | __mem_cgroup_uncharge_common(struct page *page, enum charge_type ctype)
{
struct mem_cgroup *memcg = NULL;
unsigned int nr_pages = 1;
struct page_cgroup *pc;
if (mem_cgroup_disabled())
return NULL;
if (PageSwapCache(page))
return NULL;
if (PageTransHuge(page)) {
nr_pages <<= compound_order(page);
VM_BUG_ON(!PageTransHuge(page));
}
/*
* Check if our page_cgroup is valid
*/
pc = lookup_page_cgroup(page);
if (unlikely(!PageCgroupUsed(pc)))
return NULL;
lock_page_cgroup(pc);
memcg = pc->mem_cgroup;
if (!PageCgroupUsed(pc))
goto unlock_out;
switch (ctype) {
case MEM_CGROUP_CHARGE_TYPE_MAPPED:
case MEM_CGROUP_CHARGE_TYPE_DROP:
/* See mem_cgroup_prepare_migration() */
if (page_mapped(page) || PageCgroupMigration(pc))
goto unlock_out;
break;
case MEM_CGROUP_CHARGE_TYPE_SWAPOUT:
if (!PageAnon(page)) { /* Shared memory */
if (page->mapping && !page_is_file_cache(page))
goto unlock_out;
} else if (page_mapped(page)) /* Anon */
goto unlock_out;
break;
default:
break;
}
mem_cgroup_charge_statistics(memcg, PageCgroupCache(pc), -nr_pages);
ClearPageCgroupUsed(pc);
/*
* pc->mem_cgroup is not cleared here. It will be accessed when it's
* freed from LRU. This is safe because uncharged page is expected not
* to be reused (freed soon). Exception is SwapCache, it's handled by
* special functions.
*/
unlock_page_cgroup(pc);
/*
* even after unlock, we have memcg->res.usage here and this memcg
* will never be freed.
*/
memcg_check_events(memcg, page);
if (do_swap_account && ctype == MEM_CGROUP_CHARGE_TYPE_SWAPOUT) {
mem_cgroup_swap_statistics(memcg, true);
mem_cgroup_get(memcg);
}
if (!mem_cgroup_is_root(memcg))
mem_cgroup_do_uncharge(memcg, nr_pages, ctype);
return memcg;
unlock_out:
unlock_page_cgroup(pc);
return NULL;
}
| 13,927 |
122,392 | 0 | bool HTMLSelectElement::anonymousIndexedSetterRemove(unsigned index, ExceptionState& exceptionState)
{
remove(index);
return true;
}
| 13,928 |
44,414 | 0 | static int lxcfs_opendir(const char *path, struct fuse_file_info *fi)
{
if (strcmp(path, "/") == 0)
return 0;
if (strncmp(path, "/cgroup", 7) == 0) {
return cg_opendir(path, fi);
}
if (strcmp(path, "/proc") == 0)
return 0;
return -ENOENT;
}
| 13,929 |
48,741 | 0 | static int __dev_set_promiscuity(struct net_device *dev, int inc, bool notify)
{
unsigned int old_flags = dev->flags;
kuid_t uid;
kgid_t gid;
ASSERT_RTNL();
dev->flags |= IFF_PROMISC;
dev->promiscuity += inc;
if (dev->promiscuity == 0) {
/*
* Avoid overflow.
* If inc causes overflow, untouch promisc and return error.
*/
if (inc < 0)
dev->flags &= ~IFF_PROMISC;
else {
dev->promiscuity -= inc;
pr_warn("%s: promiscuity touches roof, set promiscuity failed. promiscuity feature of device might be broken.\n",
dev->name);
return -EOVERFLOW;
}
}
if (dev->flags != old_flags) {
pr_info("device %s %s promiscuous mode\n",
dev->name,
dev->flags & IFF_PROMISC ? "entered" : "left");
if (audit_enabled) {
current_uid_gid(&uid, &gid);
audit_log(current->audit_context, GFP_ATOMIC,
AUDIT_ANOM_PROMISCUOUS,
"dev=%s prom=%d old_prom=%d auid=%u uid=%u gid=%u ses=%u",
dev->name, (dev->flags & IFF_PROMISC),
(old_flags & IFF_PROMISC),
from_kuid(&init_user_ns, audit_get_loginuid(current)),
from_kuid(&init_user_ns, uid),
from_kgid(&init_user_ns, gid),
audit_get_sessionid(current));
}
dev_change_rx_flags(dev, IFF_PROMISC);
}
if (notify)
__dev_notify_flags(dev, old_flags, IFF_PROMISC);
return 0;
}
| 13,930 |
6,849 | 0 | http_res_get_intercept_rule(struct proxy *px, struct list *rules, struct stream *s)
{
struct session *sess = strm_sess(s);
struct http_txn *txn = s->txn;
struct connection *cli_conn;
struct act_rule *rule;
struct hdr_ctx ctx;
int act_flags = 0;
/* If "the current_rule_list" match the executed rule list, we are in
* resume condition. If a resume is needed it is always in the action
* and never in the ACL or converters. In this case, we initialise the
* current rule, and go to the action execution point.
*/
if (s->current_rule) {
rule = s->current_rule;
s->current_rule = NULL;
if (s->current_rule_list == rules)
goto resume_execution;
}
s->current_rule_list = rules;
list_for_each_entry(rule, rules, list) {
/* check optional condition */
if (rule->cond) {
int ret;
ret = acl_exec_cond(rule->cond, px, sess, s, SMP_OPT_DIR_RES|SMP_OPT_FINAL);
ret = acl_pass(ret);
if (rule->cond->pol == ACL_COND_UNLESS)
ret = !ret;
if (!ret) /* condition not matched */
continue;
}
act_flags |= ACT_FLAG_FIRST;
resume_execution:
switch (rule->action) {
case ACT_ACTION_ALLOW:
return HTTP_RULE_RES_STOP; /* "allow" rules are OK */
case ACT_ACTION_DENY:
txn->flags |= TX_SVDENY;
return HTTP_RULE_RES_STOP;
case ACT_HTTP_SET_NICE:
s->task->nice = rule->arg.nice;
break;
case ACT_HTTP_SET_TOS:
if ((cli_conn = objt_conn(sess->origin)) && conn_ctrl_ready(cli_conn))
inet_set_tos(cli_conn->handle.fd, &cli_conn->addr.from, rule->arg.tos);
break;
case ACT_HTTP_SET_MARK:
#ifdef SO_MARK
if ((cli_conn = objt_conn(sess->origin)) && conn_ctrl_ready(cli_conn))
setsockopt(cli_conn->handle.fd, SOL_SOCKET, SO_MARK, &rule->arg.mark, sizeof(rule->arg.mark));
#endif
break;
case ACT_HTTP_SET_LOGL:
s->logs.level = rule->arg.loglevel;
break;
case ACT_HTTP_REPLACE_HDR:
case ACT_HTTP_REPLACE_VAL:
if (http_transform_header(s, &txn->rsp, rule->arg.hdr_add.name,
rule->arg.hdr_add.name_len,
&rule->arg.hdr_add.fmt,
&rule->arg.hdr_add.re, rule->action))
return HTTP_RULE_RES_BADREQ;
break;
case ACT_HTTP_DEL_HDR:
ctx.idx = 0;
/* remove all occurrences of the header */
while (http_find_header2(rule->arg.hdr_add.name, rule->arg.hdr_add.name_len,
txn->rsp.chn->buf->p, &txn->hdr_idx, &ctx)) {
http_remove_header2(&txn->rsp, &txn->hdr_idx, &ctx);
}
break;
case ACT_HTTP_SET_HDR:
case ACT_HTTP_ADD_HDR: {
struct chunk *replace;
replace = alloc_trash_chunk();
if (!replace)
return HTTP_RULE_RES_BADREQ;
chunk_printf(replace, "%s: ", rule->arg.hdr_add.name);
memcpy(replace->str, rule->arg.hdr_add.name, rule->arg.hdr_add.name_len);
replace->len = rule->arg.hdr_add.name_len;
replace->str[replace->len++] = ':';
replace->str[replace->len++] = ' ';
replace->len += build_logline(s, replace->str + replace->len, replace->size - replace->len,
&rule->arg.hdr_add.fmt);
if (rule->action == ACT_HTTP_SET_HDR) {
/* remove all occurrences of the header */
ctx.idx = 0;
while (http_find_header2(rule->arg.hdr_add.name, rule->arg.hdr_add.name_len,
txn->rsp.chn->buf->p, &txn->hdr_idx, &ctx)) {
http_remove_header2(&txn->rsp, &txn->hdr_idx, &ctx);
}
}
http_header_add_tail2(&txn->rsp, &txn->hdr_idx, replace->str, replace->len);
free_trash_chunk(replace);
break;
}
case ACT_HTTP_DEL_ACL:
case ACT_HTTP_DEL_MAP: {
struct pat_ref *ref;
struct chunk *key;
/* collect reference */
ref = pat_ref_lookup(rule->arg.map.ref);
if (!ref)
continue;
/* allocate key */
key = alloc_trash_chunk();
if (!key)
return HTTP_RULE_RES_BADREQ;
/* collect key */
key->len = build_logline(s, key->str, key->size, &rule->arg.map.key);
key->str[key->len] = '\0';
/* perform update */
/* returned code: 1=ok, 0=ko */
HA_SPIN_LOCK(PATREF_LOCK, &ref->lock);
pat_ref_delete(ref, key->str);
HA_SPIN_UNLOCK(PATREF_LOCK, &ref->lock);
free_trash_chunk(key);
break;
}
case ACT_HTTP_ADD_ACL: {
struct pat_ref *ref;
struct chunk *key;
/* collect reference */
ref = pat_ref_lookup(rule->arg.map.ref);
if (!ref)
continue;
/* allocate key */
key = alloc_trash_chunk();
if (!key)
return HTTP_RULE_RES_BADREQ;
/* collect key */
key->len = build_logline(s, key->str, key->size, &rule->arg.map.key);
key->str[key->len] = '\0';
/* perform update */
/* check if the entry already exists */
if (pat_ref_find_elt(ref, key->str) == NULL)
pat_ref_add(ref, key->str, NULL, NULL);
free_trash_chunk(key);
break;
}
case ACT_HTTP_SET_MAP: {
struct pat_ref *ref;
struct chunk *key, *value;
/* collect reference */
ref = pat_ref_lookup(rule->arg.map.ref);
if (!ref)
continue;
/* allocate key */
key = alloc_trash_chunk();
if (!key)
return HTTP_RULE_RES_BADREQ;
/* allocate value */
value = alloc_trash_chunk();
if (!value) {
free_trash_chunk(key);
return HTTP_RULE_RES_BADREQ;
}
/* collect key */
key->len = build_logline(s, key->str, key->size, &rule->arg.map.key);
key->str[key->len] = '\0';
/* collect value */
value->len = build_logline(s, value->str, value->size, &rule->arg.map.value);
value->str[value->len] = '\0';
/* perform update */
HA_SPIN_LOCK(PATREF_LOCK, &ref->lock);
if (pat_ref_find_elt(ref, key->str) != NULL)
/* update entry if it exists */
pat_ref_set(ref, key->str, value->str, NULL);
else
/* insert a new entry */
pat_ref_add(ref, key->str, value->str, NULL);
HA_SPIN_UNLOCK(PATREF_LOCK, &ref->lock);
free_trash_chunk(key);
free_trash_chunk(value);
break;
}
case ACT_HTTP_REDIR:
if (!http_apply_redirect_rule(rule->arg.redir, s, txn))
return HTTP_RULE_RES_BADREQ;
return HTTP_RULE_RES_DONE;
case ACT_ACTION_TRK_SC0 ... ACT_ACTION_TRK_SCMAX:
/* Note: only the first valid tracking parameter of each
* applies.
*/
if (stkctr_entry(&s->stkctr[trk_idx(rule->action)]) == NULL) {
struct stktable *t;
struct stksess *ts;
struct stktable_key *key;
void *ptr;
t = rule->arg.trk_ctr.table.t;
key = stktable_fetch_key(t, s->be, sess, s, SMP_OPT_DIR_RES | SMP_OPT_FINAL, rule->arg.trk_ctr.expr, NULL);
if (key && (ts = stktable_get_entry(t, key))) {
stream_track_stkctr(&s->stkctr[trk_idx(rule->action)], t, ts);
HA_RWLOCK_WRLOCK(STK_SESS_LOCK, &ts->lock);
/* let's count a new HTTP request as it's the first time we do it */
ptr = stktable_data_ptr(t, ts, STKTABLE_DT_HTTP_REQ_CNT);
if (ptr)
stktable_data_cast(ptr, http_req_cnt)++;
ptr = stktable_data_ptr(t, ts, STKTABLE_DT_HTTP_REQ_RATE);
if (ptr)
update_freq_ctr_period(&stktable_data_cast(ptr, http_req_rate),
t->data_arg[STKTABLE_DT_HTTP_REQ_RATE].u, 1);
/* When the client triggers a 4xx from the server, it's most often due
* to a missing object or permission. These events should be tracked
* because if they happen often, it may indicate a brute force or a
* vulnerability scan. Normally this is done when receiving the response
* but here we're tracking after this ought to have been done so we have
* to do it on purpose.
*/
if ((unsigned)(txn->status - 400) < 100) {
ptr = stktable_data_ptr(t, ts, STKTABLE_DT_HTTP_ERR_CNT);
if (ptr)
stktable_data_cast(ptr, http_err_cnt)++;
ptr = stktable_data_ptr(t, ts, STKTABLE_DT_HTTP_ERR_RATE);
if (ptr)
update_freq_ctr_period(&stktable_data_cast(ptr, http_err_rate),
t->data_arg[STKTABLE_DT_HTTP_ERR_RATE].u, 1);
}
HA_RWLOCK_WRUNLOCK(STK_SESS_LOCK, &ts->lock);
/* If data was modified, we need to touch to re-schedule sync */
stktable_touch_local(t, ts, 0);
stkctr_set_flags(&s->stkctr[trk_idx(rule->action)], STKCTR_TRACK_CONTENT);
if (sess->fe != s->be)
stkctr_set_flags(&s->stkctr[trk_idx(rule->action)], STKCTR_TRACK_BACKEND);
}
}
break;
case ACT_CUSTOM:
if ((s->req.flags & CF_READ_ERROR) ||
((s->req.flags & (CF_SHUTR|CF_READ_NULL)) &&
!(s->si[0].flags & SI_FL_CLEAN_ABRT) &&
(px->options & PR_O_ABRT_CLOSE)))
act_flags |= ACT_FLAG_FINAL;
switch (rule->action_ptr(rule, px, s->sess, s, act_flags)) {
case ACT_RET_ERR:
case ACT_RET_CONT:
break;
case ACT_RET_STOP:
return HTTP_RULE_RES_STOP;
case ACT_RET_YIELD:
s->current_rule = rule;
return HTTP_RULE_RES_YIELD;
}
break;
/* other flags exists, but normaly, they never be matched. */
default:
break;
}
}
/* we reached the end of the rules, nothing to report */
return HTTP_RULE_RES_CONT;
}
| 13,931 |
46,443 | 0 | xfs_file_splice_read(
struct file *infilp,
loff_t *ppos,
struct pipe_inode_info *pipe,
size_t count,
unsigned int flags)
{
struct xfs_inode *ip = XFS_I(infilp->f_mapping->host);
int ioflags = 0;
ssize_t ret;
XFS_STATS_INC(xs_read_calls);
if (infilp->f_mode & FMODE_NOCMTIME)
ioflags |= IO_INVIS;
if (XFS_FORCED_SHUTDOWN(ip->i_mount))
return -EIO;
xfs_rw_ilock(ip, XFS_IOLOCK_SHARED);
trace_xfs_file_splice_read(ip, count, *ppos, ioflags);
ret = generic_file_splice_read(infilp, ppos, pipe, count, flags);
if (ret > 0)
XFS_STATS_ADD(xs_read_bytes, ret);
xfs_rw_iunlock(ip, XFS_IOLOCK_SHARED);
return ret;
}
| 13,932 |
29,985 | 0 | static inline size_t rtnl_mdb_nlmsg_size(void)
{
return NLMSG_ALIGN(sizeof(struct br_port_msg))
+ nla_total_size(sizeof(struct br_mdb_entry));
}
| 13,933 |
51,860 | 0 | dissect_dch_timing_advance(proto_tree *tree, packet_info *pinfo,
tvbuff_t *tvb, int offset, struct fp_info *p_fp_info)
{
guint8 cfn;
guint16 timing_advance;
proto_item *timing_advance_ti;
/* CFN control */
cfn = tvb_get_guint8(tvb, offset);
proto_tree_add_item(tree, hf_fp_cfn_control, tvb, offset, 1, ENC_BIG_ENDIAN);
offset++;
/* Timing Advance */
timing_advance = (tvb_get_guint8(tvb, offset) & 0x3f) * 4;
timing_advance_ti = proto_tree_add_uint(tree, hf_fp_timing_advance, tvb, offset, 1, timing_advance);
offset++;
if ((p_fp_info->release == 7) &&
(tvb_reported_length_remaining(tvb, offset) > 0)) {
/* New IE flags */
guint8 flags = tvb_get_guint8(tvb, offset);
guint8 extended_bits = flags & 0x01;
offset++;
if (extended_bits) {
guint8 extra_bit = tvb_get_guint8(tvb, offset) & 0x01;
proto_item_append_text(timing_advance_ti, " (extended to %u)",
(timing_advance << 1) | extra_bit);
}
offset++;
}
col_append_fstr(pinfo->cinfo, COL_INFO, " CFN = %u, TA = %u",
cfn, timing_advance);
return offset;
}
| 13,934 |
173,470 | 0 | OMX_ERRORTYPE omx_vdec::is_video_session_supported()
{
if (!strncmp(drv_ctx.kind, "OMX.qcom.video.decoder.avc",
OMX_MAX_STRINGNAME_SIZE) &&
(m_profile == HIGH_PROFILE || m_profile == MAIN_PROFILE)) {
m_decoder_capability.max_width = 1280;
m_decoder_capability.max_height = 720;
DEBUG_PRINT_HIGH("Set max_width=1280 & max_height=720 for H264 HP/MP");
}
if ((drv_ctx.video_resolution.frame_width *
drv_ctx.video_resolution.frame_height >
m_decoder_capability.max_width *
m_decoder_capability.max_height) ||
(drv_ctx.video_resolution.frame_width*
drv_ctx.video_resolution.frame_height <
m_decoder_capability.min_width *
m_decoder_capability.min_height)) {
DEBUG_PRINT_ERROR(
"Unsupported WxH = (%u)x(%u) supported range is min(%u)x(%u) - max(%u)x(%u)",
drv_ctx.video_resolution.frame_width,
drv_ctx.video_resolution.frame_height,
m_decoder_capability.min_width,
m_decoder_capability.min_height,
m_decoder_capability.max_width,
m_decoder_capability.max_height);
return OMX_ErrorUnsupportedSetting;
}
DEBUG_PRINT_HIGH("video session supported");
return OMX_ErrorNone;
}
| 13,935 |
180,642 | 1 | static void *__alloc_from_pool(size_t size, struct page **ret_page, gfp_t flags)
{
unsigned long val;
void *ptr = NULL;
if (!atomic_pool) {
WARN(1, "coherent pool not initialised!\n");
return NULL;
}
val = gen_pool_alloc(atomic_pool, size);
if (val) {
phys_addr_t phys = gen_pool_virt_to_phys(atomic_pool, val);
*ret_page = phys_to_page(phys);
ptr = (void *)val;
if (flags & __GFP_ZERO)
memset(ptr, 0, size);
}
return ptr;
}
| 13,936 |
176,899 | 0 | void InputDispatcher::addWindowTargetLocked(const sp<InputWindowHandle>& windowHandle,
int32_t targetFlags, BitSet32 pointerIds, Vector<InputTarget>& inputTargets) {
inputTargets.push();
const InputWindowInfo* windowInfo = windowHandle->getInfo();
InputTarget& target = inputTargets.editTop();
target.inputChannel = windowInfo->inputChannel;
target.flags = targetFlags;
target.xOffset = - windowInfo->frameLeft;
target.yOffset = - windowInfo->frameTop;
target.scaleFactor = windowInfo->scaleFactor;
target.pointerIds = pointerIds;
}
| 13,937 |
116,315 | 0 | void QQuickWebViewExperimental::setUserAgent(const QString& userAgent)
{
Q_D(QQuickWebView);
if (userAgent == QString(d->webPageProxy->userAgent()))
return;
d->webPageProxy->setUserAgent(userAgent);
emit userAgentChanged();
}
| 13,938 |
32,839 | 0 | static struct wdm_device *wdm_find_device_by_minor(int minor)
{
struct wdm_device *desc;
spin_lock(&wdm_device_list_lock);
list_for_each_entry(desc, &wdm_device_list, device_list)
if (desc->intf->minor == minor)
goto found;
desc = NULL;
found:
spin_unlock(&wdm_device_list_lock);
return desc;
}
| 13,939 |
41,656 | 0 | static struct extent_map *btrfs_new_extent_direct(struct inode *inode,
u64 start, u64 len)
{
struct btrfs_root *root = BTRFS_I(inode)->root;
struct extent_map *em;
struct btrfs_key ins;
u64 alloc_hint;
int ret;
alloc_hint = get_extent_allocation_hint(inode, start, len);
ret = btrfs_reserve_extent(root, len, root->sectorsize, 0,
alloc_hint, &ins, 1, 1);
if (ret)
return ERR_PTR(ret);
em = create_pinned_em(inode, start, ins.offset, start, ins.objectid,
ins.offset, ins.offset, ins.offset, 0);
if (IS_ERR(em)) {
btrfs_free_reserved_extent(root, ins.objectid, ins.offset, 1);
return em;
}
ret = btrfs_add_ordered_extent_dio(inode, start, ins.objectid,
ins.offset, ins.offset, 0);
if (ret) {
btrfs_free_reserved_extent(root, ins.objectid, ins.offset, 1);
free_extent_map(em);
return ERR_PTR(ret);
}
return em;
}
| 13,940 |
19,257 | 0 | static struct sock *netlink_seq_socket_idx(struct seq_file *seq, loff_t pos)
{
struct nl_seq_iter *iter = seq->private;
int i, j;
struct sock *s;
struct hlist_node *node;
loff_t off = 0;
for (i = 0; i < MAX_LINKS; i++) {
struct nl_pid_hash *hash = &nl_table[i].hash;
for (j = 0; j <= hash->mask; j++) {
sk_for_each(s, node, &hash->table[j]) {
if (sock_net(s) != seq_file_net(seq))
continue;
if (off == pos) {
iter->link = i;
iter->hash_idx = j;
return s;
}
++off;
}
}
}
return NULL;
}
| 13,941 |
42,722 | 0 | static void setup_msrs(struct vcpu_vmx *vmx)
{
int save_nmsrs, index;
save_nmsrs = 0;
#ifdef CONFIG_X86_64
if (is_long_mode(&vmx->vcpu)) {
index = __find_msr_index(vmx, MSR_SYSCALL_MASK);
if (index >= 0)
move_msr_up(vmx, index, save_nmsrs++);
index = __find_msr_index(vmx, MSR_LSTAR);
if (index >= 0)
move_msr_up(vmx, index, save_nmsrs++);
index = __find_msr_index(vmx, MSR_CSTAR);
if (index >= 0)
move_msr_up(vmx, index, save_nmsrs++);
index = __find_msr_index(vmx, MSR_TSC_AUX);
if (index >= 0 && guest_cpuid_has_rdtscp(&vmx->vcpu))
move_msr_up(vmx, index, save_nmsrs++);
/*
* MSR_STAR is only needed on long mode guests, and only
* if efer.sce is enabled.
*/
index = __find_msr_index(vmx, MSR_STAR);
if ((index >= 0) && (vmx->vcpu.arch.efer & EFER_SCE))
move_msr_up(vmx, index, save_nmsrs++);
}
#endif
index = __find_msr_index(vmx, MSR_EFER);
if (index >= 0 && update_transition_efer(vmx, index))
move_msr_up(vmx, index, save_nmsrs++);
vmx->save_nmsrs = save_nmsrs;
if (cpu_has_vmx_msr_bitmap())
vmx_set_msr_bitmap(&vmx->vcpu);
}
| 13,942 |
103,918 | 0 | bool RenderView::runModalPromptDialog(
WebFrame* frame, const WebString& message, const WebString& default_value,
WebString* actual_value) {
string16 result;
bool ok = RunJavaScriptMessage(ui::MessageBoxFlags::kIsJavascriptPrompt,
message,
default_value,
frame->document().url(),
&result);
if (ok)
actual_value->assign(result);
return ok;
}
| 13,943 |
35,437 | 0 | ppp_if_print(netdissect_options *ndo,
const struct pcap_pkthdr *h, register const u_char *p)
{
register u_int length = h->len;
register u_int caplen = h->caplen;
if (caplen < PPP_HDRLEN) {
ND_PRINT((ndo, "[|ppp]"));
return (caplen);
}
#if 0
/*
* XXX: seems to assume that there are 2 octets prepended to an
* actual PPP frame. The 1st octet looks like Input/Output flag
* while 2nd octet is unknown, at least to me
* ([email protected]).
*
* That was what the original tcpdump code did.
*
* FreeBSD's "if_ppp.c" *does* set the first octet to 1 for outbound
* packets and 0 for inbound packets - but only if the
* protocol field has the 0x8000 bit set (i.e., it's a network
* control protocol); it does so before running the packet through
* "bpf_filter" to see if it should be discarded, and to see
* if we should update the time we sent the most recent packet...
*
* ...but it puts the original address field back after doing
* so.
*
* NetBSD's "if_ppp.c" doesn't set the first octet in that fashion.
*
* I don't know if any PPP implementation handed up to a BPF
* device packets with the first octet being 1 for outbound and
* 0 for inbound packets, so I ([email protected]) don't know
* whether that ever needs to be checked or not.
*
* Note that NetBSD has a DLT_PPP_SERIAL, which it uses for PPP,
* and its tcpdump appears to assume that the frame always
* begins with an address field and a control field, and that
* the address field might be 0x0f or 0x8f, for Cisco
* point-to-point with HDLC framing as per section 4.3.1 of RFC
* 1547, as well as 0xff, for PPP in HDLC-like framing as per
* RFC 1662.
*
* (Is the Cisco framing in question what DLT_C_HDLC, in
* BSD/OS, is?)
*/
if (ndo->ndo_eflag)
ND_PRINT((ndo, "%c %4d %02x ", p[0] ? 'O' : 'I', length, p[1]));
#endif
ppp_print(ndo, p, length);
return (0);
}
| 13,944 |
141,244 | 0 | void Document::SetURL(const KURL& url) {
const KURL& new_url = url.IsEmpty() ? BlankURL() : url;
if (new_url == url_)
return;
url_ = new_url;
access_entry_from_url_ = nullptr;
UpdateBaseURL();
GetContextFeatures().UrlDidChange(this);
if (ukm_recorder_ && IsInMainFrame())
ukm_recorder_->UpdateSourceURL(ukm_source_id_, url_);
}
| 13,945 |
41,843 | 0 | static inline size_t inet6_ifla6_size(void)
{
return nla_total_size(4) /* IFLA_INET6_FLAGS */
+ nla_total_size(sizeof(struct ifla_cacheinfo))
+ nla_total_size(DEVCONF_MAX * 4) /* IFLA_INET6_CONF */
+ nla_total_size(IPSTATS_MIB_MAX * 8) /* IFLA_INET6_STATS */
+ nla_total_size(ICMP6_MIB_MAX * 8) /* IFLA_INET6_ICMP6STATS */
+ nla_total_size(sizeof(struct in6_addr)); /* IFLA_INET6_TOKEN */
}
| 13,946 |
161,072 | 0 | void ServerWrapper::OnWebSocketRequest(
int connection_id,
const net::HttpServerRequestInfo& request) {
BrowserThread::PostTask(
BrowserThread::UI, FROM_HERE,
base::BindOnce(&DevToolsHttpHandler::OnWebSocketRequest, handler_,
connection_id, request));
}
| 13,947 |
29,670 | 0 | static void do_metadata(struct work_struct *work)
{
struct mdata_req *req = container_of(work, struct mdata_req, work);
req->result = dm_io(req->io_req, 1, req->where, NULL);
}
| 13,948 |
50,488 | 0 | static void perf_event_mmap_output(struct perf_event *event,
void *data)
{
struct perf_mmap_event *mmap_event = data;
struct perf_output_handle handle;
struct perf_sample_data sample;
int size = mmap_event->event_id.header.size;
int ret;
if (!perf_event_mmap_match(event, data))
return;
if (event->attr.mmap2) {
mmap_event->event_id.header.type = PERF_RECORD_MMAP2;
mmap_event->event_id.header.size += sizeof(mmap_event->maj);
mmap_event->event_id.header.size += sizeof(mmap_event->min);
mmap_event->event_id.header.size += sizeof(mmap_event->ino);
mmap_event->event_id.header.size += sizeof(mmap_event->ino_generation);
mmap_event->event_id.header.size += sizeof(mmap_event->prot);
mmap_event->event_id.header.size += sizeof(mmap_event->flags);
}
perf_event_header__init_id(&mmap_event->event_id.header, &sample, event);
ret = perf_output_begin(&handle, event,
mmap_event->event_id.header.size);
if (ret)
goto out;
mmap_event->event_id.pid = perf_event_pid(event, current);
mmap_event->event_id.tid = perf_event_tid(event, current);
perf_output_put(&handle, mmap_event->event_id);
if (event->attr.mmap2) {
perf_output_put(&handle, mmap_event->maj);
perf_output_put(&handle, mmap_event->min);
perf_output_put(&handle, mmap_event->ino);
perf_output_put(&handle, mmap_event->ino_generation);
perf_output_put(&handle, mmap_event->prot);
perf_output_put(&handle, mmap_event->flags);
}
__output_copy(&handle, mmap_event->file_name,
mmap_event->file_size);
perf_event__output_id_sample(event, &handle, &sample);
perf_output_end(&handle);
out:
mmap_event->event_id.header.size = size;
}
| 13,949 |
102,089 | 0 | void SyncBackendHost::Core::OnClearServerDataFailed() {
host_->frontend_loop_->PostTask(FROM_HERE, NewRunnableMethod(this,
&Core::HandleClearServerDataFailedOnFrontendLoop));
}
| 13,950 |
33,300 | 0 | int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
{
struct page *page;
struct kvm *kvm;
int r;
BUG_ON(vcpu->kvm == NULL);
kvm = vcpu->kvm;
vcpu->arch.emulate_ctxt.ops = &emulate_ops;
if (!irqchip_in_kernel(kvm) || kvm_vcpu_is_bsp(vcpu))
vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
else
vcpu->arch.mp_state = KVM_MP_STATE_UNINITIALIZED;
page = alloc_page(GFP_KERNEL | __GFP_ZERO);
if (!page) {
r = -ENOMEM;
goto fail;
}
vcpu->arch.pio_data = page_address(page);
kvm_set_tsc_khz(vcpu, max_tsc_khz);
r = kvm_mmu_create(vcpu);
if (r < 0)
goto fail_free_pio_data;
if (irqchip_in_kernel(kvm)) {
r = kvm_create_lapic(vcpu);
if (r < 0)
goto fail_mmu_destroy;
} else
static_key_slow_inc(&kvm_no_apic_vcpu);
vcpu->arch.mce_banks = kzalloc(KVM_MAX_MCE_BANKS * sizeof(u64) * 4,
GFP_KERNEL);
if (!vcpu->arch.mce_banks) {
r = -ENOMEM;
goto fail_free_lapic;
}
vcpu->arch.mcg_cap = KVM_MAX_MCE_BANKS;
if (!zalloc_cpumask_var(&vcpu->arch.wbinvd_dirty_mask, GFP_KERNEL))
goto fail_free_mce_banks;
r = fx_init(vcpu);
if (r)
goto fail_free_wbinvd_dirty_mask;
vcpu->arch.ia32_tsc_adjust_msr = 0x0;
kvm_async_pf_hash_reset(vcpu);
kvm_pmu_init(vcpu);
return 0;
fail_free_wbinvd_dirty_mask:
free_cpumask_var(vcpu->arch.wbinvd_dirty_mask);
fail_free_mce_banks:
kfree(vcpu->arch.mce_banks);
fail_free_lapic:
kvm_free_lapic(vcpu);
fail_mmu_destroy:
kvm_mmu_destroy(vcpu);
fail_free_pio_data:
free_page((unsigned long)vcpu->arch.pio_data);
fail:
return r;
}
| 13,951 |
128,572 | 0 | pp::Instance* Instance::GetPluginInstance() {
return GetInstance();
}
| 13,952 |
180,329 | 1 | AirPDcapDecryptWPABroadcastKey(const EAPOL_RSN_KEY *pEAPKey, guint8 *decryption_key, PAIRPDCAP_SEC_ASSOCIATION sa, guint eapol_len)
{
guint8 key_version;
guint8 *key_data;
guint8 *szEncryptedKey;
guint16 key_bytes_len = 0; /* Length of the total key data field */
guint16 key_len; /* Actual group key length */
static AIRPDCAP_KEY_ITEM dummy_key; /* needed in case AirPDcapRsnaMng() wants the key structure */
AIRPDCAP_SEC_ASSOCIATION *tmp_sa;
/* We skip verifying the MIC of the key. If we were implementing a WPA supplicant we'd want to verify, but for a sniffer it's not needed. */
/* Preparation for decrypting the group key - determine group key data length */
/* depending on whether the pairwise key is TKIP or AES encryption key */
key_version = AIRPDCAP_EAP_KEY_DESCR_VER(pEAPKey->key_information[1]);
if (key_version == AIRPDCAP_WPA_KEY_VER_NOT_CCMP){
/* TKIP */
key_bytes_len = pntoh16(pEAPKey->key_length);
}else if (key_version == AIRPDCAP_WPA_KEY_VER_AES_CCMP){
/* AES */
key_bytes_len = pntoh16(pEAPKey->key_data_len);
/* AES keys must be at least 128 bits = 16 bytes. */
if (key_bytes_len < 16) {
return AIRPDCAP_RET_NO_VALID_HANDSHAKE;
}
}
if (key_bytes_len < GROUP_KEY_MIN_LEN || key_bytes_len > eapol_len - sizeof(EAPOL_RSN_KEY)) {
return AIRPDCAP_RET_NO_VALID_HANDSHAKE;
}
/* Encrypted key is in the information element field of the EAPOL key packet */
key_data = (guint8 *)pEAPKey + sizeof(EAPOL_RSN_KEY);
szEncryptedKey = (guint8 *)g_memdup(key_data, key_bytes_len);
DEBUG_DUMP("Encrypted Broadcast key:", szEncryptedKey, key_bytes_len);
DEBUG_DUMP("KeyIV:", pEAPKey->key_iv, 16);
DEBUG_DUMP("decryption_key:", decryption_key, 16);
/* We are rekeying, save old sa */
tmp_sa=(AIRPDCAP_SEC_ASSOCIATION *)g_malloc(sizeof(AIRPDCAP_SEC_ASSOCIATION));
memcpy(tmp_sa, sa, sizeof(AIRPDCAP_SEC_ASSOCIATION));
sa->next=tmp_sa;
/* As we have no concept of the prior association request at this point, we need to deduce the */
/* group key cipher from the length of the key bytes. In WPA this is straightforward as the */
/* keybytes just contain the GTK, and the GTK is only in the group handshake, NOT the M3. */
/* In WPA2 its a little more tricky as the M3 keybytes contain an RSN_IE, but the group handshake */
/* does not. Also there are other (variable length) items in the keybytes which we need to account */
/* for to determine the true key length, and thus the group cipher. */
if (key_version == AIRPDCAP_WPA_KEY_VER_NOT_CCMP){
guint8 new_key[32];
guint8 dummy[256];
/* TKIP key */
/* Per 802.11i, Draft 3.0 spec, section 8.5.2, p. 97, line 4-8, */
/* group key is decrypted using RC4. Concatenate the IV with the 16 byte EK (PTK+16) to get the decryption key */
rc4_state_struct rc4_state;
/* The WPA group key just contains the GTK bytes so deducing the type is straightforward */
/* Note - WPA M3 doesn't contain a group key so we'll only be here for the group handshake */
sa->wpa.key_ver = (key_bytes_len >=TKIP_GROUP_KEY_LEN)?AIRPDCAP_WPA_KEY_VER_NOT_CCMP:AIRPDCAP_WPA_KEY_VER_AES_CCMP;
/* Build the full decryption key based on the IV and part of the pairwise key */
memcpy(new_key, pEAPKey->key_iv, 16);
memcpy(new_key+16, decryption_key, 16);
DEBUG_DUMP("FullDecrKey:", new_key, 32);
crypt_rc4_init(&rc4_state, new_key, sizeof(new_key));
/* Do dummy 256 iterations of the RC4 algorithm (per 802.11i, Draft 3.0, p. 97 line 6) */
crypt_rc4(&rc4_state, dummy, 256);
crypt_rc4(&rc4_state, szEncryptedKey, key_bytes_len);
} else if (key_version == AIRPDCAP_WPA_KEY_VER_AES_CCMP){
/* AES CCMP key */
guint8 key_found;
guint8 key_length;
guint16 key_index;
guint8 *decrypted_data;
/* Unwrap the key; the result is key_bytes_len in length */
decrypted_data = AES_unwrap(decryption_key, 16, szEncryptedKey, key_bytes_len);
/* With WPA2 what we get after Broadcast Key decryption is an actual RSN structure.
The key itself is stored as a GTK KDE
WPA2 IE (1 byte) id = 0xdd, length (1 byte), GTK OUI (4 bytes), key index (1 byte) and 1 reserved byte. Thus we have to
pass pointer to the actual key with 8 bytes offset */
key_found = FALSE;
key_index = 0;
/* Parse Key data until we found GTK KDE */
/* GTK KDE = 00-0F-AC 01 */
while(key_index < (key_bytes_len - 6) && !key_found){
guint8 rsn_id;
guint32 type;
/* Get RSN ID */
rsn_id = decrypted_data[key_index];
type = ((decrypted_data[key_index + 2] << 24) +
(decrypted_data[key_index + 3] << 16) +
(decrypted_data[key_index + 4] << 8) +
(decrypted_data[key_index + 5]));
if (rsn_id == 0xdd && type == 0x000fac01) {
key_found = TRUE;
} else {
key_index += decrypted_data[key_index+1]+2;
}
}
if (key_found){
key_length = decrypted_data[key_index+1] - 6;
if (key_index+8 >= key_bytes_len ||
key_length > key_bytes_len - key_index - 8) {
g_free(decrypted_data);
g_free(szEncryptedKey);
return AIRPDCAP_RET_NO_VALID_HANDSHAKE;
}
/* Skip over the GTK header info, and don't copy past the end of the encrypted data */
memcpy(szEncryptedKey, decrypted_data+key_index+8, key_length);
} else {
g_free(decrypted_data);
g_free(szEncryptedKey);
return AIRPDCAP_RET_NO_VALID_HANDSHAKE;
}
if (key_length == TKIP_GROUP_KEY_LEN)
sa->wpa.key_ver = AIRPDCAP_WPA_KEY_VER_NOT_CCMP;
else
sa->wpa.key_ver = AIRPDCAP_WPA_KEY_VER_AES_CCMP;
g_free(decrypted_data);
}
key_len = (sa->wpa.key_ver==AIRPDCAP_WPA_KEY_VER_NOT_CCMP)?TKIP_GROUP_KEY_LEN:CCMP_GROUP_KEY_LEN;
if (key_len > key_bytes_len) {
/* the key required for this protocol is longer than the key that we just calculated */
g_free(szEncryptedKey);
return AIRPDCAP_RET_NO_VALID_HANDSHAKE;
}
/* Decrypted key is now in szEncryptedKey with len of key_len */
DEBUG_DUMP("Broadcast key:", szEncryptedKey, key_len);
/* Load the proper key material info into the SA */
sa->key = &dummy_key; /* we just need key to be not null because it is checked in AirPDcapRsnaMng(). The WPA key materials are actually in the .wpa structure */
sa->validKey = TRUE;
/* Since this is a GTK and its size is only 32 bytes (vs. the 64 byte size of a PTK), we fake it and put it in at a 32-byte offset so the */
/* AirPDcapRsnaMng() function will extract the right piece of the GTK for decryption. (The first 16 bytes of the GTK are used for decryption.) */
memset(sa->wpa.ptk, 0, sizeof(sa->wpa.ptk));
memcpy(sa->wpa.ptk+32, szEncryptedKey, key_len);
g_free(szEncryptedKey);
return AIRPDCAP_RET_SUCCESS_HANDSHAKE;
}
| 13,953 |
46,964 | 0 | static void des3_ede_x86_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
{
des3_ede_dec_blk(crypto_tfm_ctx(tfm), dst, src);
}
| 13,954 |
4,965 | 0 | qtdemux_parse_redirects (GstQTDemux * qtdemux)
{
GNode *rmra, *rmda, *rdrf;
rmra = qtdemux_tree_get_child_by_type (qtdemux->moov_node, FOURCC_rmra);
if (rmra) {
GList *redirects = NULL;
rmda = qtdemux_tree_get_child_by_type (rmra, FOURCC_rmda);
while (rmda) {
GstQtReference ref = { NULL, NULL, 0, 0 };
GNode *rmdr, *rmvc;
if ((rmdr = qtdemux_tree_get_child_by_type (rmda, FOURCC_rmdr))) {
ref.min_req_bitrate = QT_UINT32 ((guint8 *) rmdr->data + 12);
GST_LOG_OBJECT (qtdemux, "data rate atom, required bitrate = %u",
ref.min_req_bitrate);
}
if ((rmvc = qtdemux_tree_get_child_by_type (rmda, FOURCC_rmvc))) {
guint32 package = QT_FOURCC ((guint8 *) rmvc->data + 12);
guint version = QT_UINT32 ((guint8 *) rmvc->data + 16);
#ifndef GST_DISABLE_GST_DEBUG
guint bitmask = QT_UINT32 ((guint8 *) rmvc->data + 20);
#endif
guint check_type = QT_UINT16 ((guint8 *) rmvc->data + 24);
GST_LOG_OBJECT (qtdemux,
"version check atom [%" GST_FOURCC_FORMAT "], version=0x%08x"
", mask=%08x, check_type=%u", GST_FOURCC_ARGS (package), version,
bitmask, check_type);
if (package == FOURCC_qtim && check_type == 0) {
ref.min_req_qt_version = version;
}
}
rdrf = qtdemux_tree_get_child_by_type (rmda, FOURCC_rdrf);
if (rdrf) {
guint32 ref_type;
guint8 *ref_data;
ref_type = QT_FOURCC ((guint8 *) rdrf->data + 12);
ref_data = (guint8 *) rdrf->data + 20;
if (ref_type == FOURCC_alis) {
guint record_len, record_version, fn_len;
/* MacOSX alias record, google for alias-layout.txt */
record_len = QT_UINT16 (ref_data + 4);
record_version = QT_UINT16 (ref_data + 4 + 2);
fn_len = QT_UINT8 (ref_data + 50);
if (record_len > 50 && record_version == 2 && fn_len > 0) {
ref.location = g_strndup ((gchar *) ref_data + 51, fn_len);
}
} else if (ref_type == FOURCC_url_) {
ref.location = g_strdup ((gchar *) ref_data);
} else {
GST_DEBUG_OBJECT (qtdemux,
"unknown rdrf reference type %" GST_FOURCC_FORMAT,
GST_FOURCC_ARGS (ref_type));
}
if (ref.location != NULL) {
GST_INFO_OBJECT (qtdemux, "New location: %s", ref.location);
redirects = g_list_prepend (redirects, g_memdup (&ref, sizeof (ref)));
} else {
GST_WARNING_OBJECT (qtdemux,
"Failed to extract redirect location from rdrf atom");
}
}
/* look for others */
rmda = qtdemux_tree_get_sibling_by_type (rmda, FOURCC_rmda);
}
if (redirects != NULL) {
qtdemux_process_redirects (qtdemux, redirects);
}
}
return TRUE;
}
| 13,955 |
51,230 | 0 | static char *hiddev_devnode(struct device *dev, umode_t *mode)
{
return kasprintf(GFP_KERNEL, "usb/%s", dev_name(dev));
}
| 13,956 |
38,029 | 0 | const char *regs_query_register_name(unsigned int offset)
{
if (offset >= NUM_GPRS)
return NULL;
return gpr_names[offset];
}
| 13,957 |
171,694 | 0 | void bta_hl_co_get_tx_data (UINT8 app_id, tBTA_HL_MDL_HANDLE mdl_handle,
UINT16 buf_size, UINT8 *p_buf, UINT16 evt)
{
UINT8 app_idx, mcl_idx, mdl_idx;
btif_hl_mdl_cb_t *p_dcb;
tBTA_HL_STATUS status = BTA_HL_STATUS_FAIL;
BTIF_TRACE_DEBUG("%s app_id=%d mdl_handle=0x%x buf_size=%d",
__FUNCTION__, app_id, mdl_handle, buf_size);
if (btif_hl_find_mdl_idx_using_handle(mdl_handle, &app_idx, &mcl_idx, &mdl_idx))
{
p_dcb = BTIF_HL_GET_MDL_CB_PTR(app_idx, mcl_idx, mdl_idx);
if (p_dcb->tx_size <= buf_size )
{
memcpy(p_buf, p_dcb->p_tx_pkt, p_dcb->tx_size);
btif_hl_free_buf((void **) &p_dcb->p_tx_pkt);
p_dcb->tx_size = 0;
status = BTA_HL_STATUS_OK;
}
}
bta_hl_ci_get_tx_data(mdl_handle, status, evt);
}
| 13,958 |
109,013 | 0 | void RenderViewImpl::PpapiPluginCancelComposition() {
Send(new ViewHostMsg_ImeCancelComposition(routing_id()));
const ui::Range range(ui::Range::InvalidRange());
const std::vector<gfx::Rect> empty_bounds;
UpdateCompositionInfo(range, empty_bounds);
}
| 13,959 |
57,483 | 0 | static int ext4_blks_to_allocate(Indirect *branch, int k, unsigned int blks,
int blocks_to_boundary)
{
unsigned int count = 0;
/*
* Simple case, [t,d]Indirect block(s) has not allocated yet
* then it's clear blocks on that path have not allocated
*/
if (k > 0) {
/* right now we don't handle cross boundary allocation */
if (blks < blocks_to_boundary + 1)
count += blks;
else
count += blocks_to_boundary + 1;
return count;
}
count++;
while (count < blks && count <= blocks_to_boundary &&
le32_to_cpu(*(branch[0].p + count)) == 0) {
count++;
}
return count;
}
| 13,960 |
68,290 | 0 | static void __perf_event_enable(struct perf_event *event,
struct perf_cpu_context *cpuctx,
struct perf_event_context *ctx,
void *info)
{
struct perf_event *leader = event->group_leader;
struct perf_event_context *task_ctx;
if (event->state >= PERF_EVENT_STATE_INACTIVE ||
event->state <= PERF_EVENT_STATE_ERROR)
return;
if (ctx->is_active)
ctx_sched_out(ctx, cpuctx, EVENT_TIME);
__perf_event_mark_enabled(event);
if (!ctx->is_active)
return;
if (!event_filter_match(event)) {
if (is_cgroup_event(event))
perf_cgroup_defer_enabled(event);
ctx_sched_in(ctx, cpuctx, EVENT_TIME, current);
return;
}
/*
* If the event is in a group and isn't the group leader,
* then don't put it on unless the group is on.
*/
if (leader != event && leader->state != PERF_EVENT_STATE_ACTIVE) {
ctx_sched_in(ctx, cpuctx, EVENT_TIME, current);
return;
}
task_ctx = cpuctx->task_ctx;
if (ctx->task)
WARN_ON_ONCE(task_ctx != ctx);
ctx_resched(cpuctx, task_ctx);
}
| 13,961 |
126,899 | 0 | void BrowserTabStripController::CreateNewTab() {
model_->delegate()->AddBlankTabAt(-1, true);
}
| 13,962 |
109,657 | 0 | Element* Document::elementFromPoint(int x, int y) const
{
if (!renderer())
return 0;
return TreeScope::elementFromPoint(x, y);
}
| 13,963 |
59,954 | 0 | int snd_seq_device_new(struct snd_card *card, int device, const char *id,
int argsize, struct snd_seq_device **result)
{
struct snd_seq_device *dev;
int err;
static struct snd_device_ops dops = {
.dev_free = snd_seq_device_dev_free,
.dev_register = snd_seq_device_dev_register,
.dev_disconnect = snd_seq_device_dev_disconnect,
};
if (result)
*result = NULL;
if (snd_BUG_ON(!id))
return -EINVAL;
dev = kzalloc(sizeof(*dev) + argsize, GFP_KERNEL);
if (!dev)
return -ENOMEM;
/* set up device info */
dev->card = card;
dev->device = device;
dev->id = id;
dev->argsize = argsize;
device_initialize(&dev->dev);
dev->dev.parent = &card->card_dev;
dev->dev.bus = &snd_seq_bus_type;
dev->dev.release = snd_seq_dev_release;
dev_set_name(&dev->dev, "%s-%d-%d", dev->id, card->number, device);
/* add this device to the list */
err = snd_device_new(card, SNDRV_DEV_SEQUENCER, dev, &dops);
if (err < 0) {
put_device(&dev->dev);
return err;
}
if (result)
*result = dev;
return 0;
}
| 13,964 |
90,516 | 0 | SYSCALL_DEFINE1(userfaultfd, int, flags)
{
struct userfaultfd_ctx *ctx;
int fd;
BUG_ON(!current->mm);
/* Check the UFFD_* constants for consistency. */
BUILD_BUG_ON(UFFD_CLOEXEC != O_CLOEXEC);
BUILD_BUG_ON(UFFD_NONBLOCK != O_NONBLOCK);
if (flags & ~UFFD_SHARED_FCNTL_FLAGS)
return -EINVAL;
ctx = kmem_cache_alloc(userfaultfd_ctx_cachep, GFP_KERNEL);
if (!ctx)
return -ENOMEM;
refcount_set(&ctx->refcount, 1);
ctx->flags = flags;
ctx->features = 0;
ctx->state = UFFD_STATE_WAIT_API;
ctx->released = false;
ctx->mmap_changing = false;
ctx->mm = current->mm;
/* prevent the mm struct to be freed */
mmgrab(ctx->mm);
fd = anon_inode_getfd("[userfaultfd]", &userfaultfd_fops, ctx,
O_RDWR | (flags & UFFD_SHARED_FCNTL_FLAGS));
if (fd < 0) {
mmdrop(ctx->mm);
kmem_cache_free(userfaultfd_ctx_cachep, ctx);
}
return fd;
}
| 13,965 |
170,402 | 0 | static jboolean Region_isEmpty(JNIEnv* env, jobject region) {
bool result = GetSkRegion(env, region)->isEmpty();
return boolTojboolean(result);
}
| 13,966 |
46,711 | 0 | static int sha1_export(struct shash_desc *desc, void *out)
{
struct s390_sha_ctx *sctx = shash_desc_ctx(desc);
struct sha1_state *octx = out;
octx->count = sctx->count;
memcpy(octx->state, sctx->state, sizeof(octx->state));
memcpy(octx->buffer, sctx->buf, sizeof(octx->buffer));
return 0;
}
| 13,967 |
105,311 | 0 | void AutofillDownloadManager::SetNegativeUploadRate(double rate) {
if (rate == negative_upload_rate_)
return;
negative_upload_rate_ = rate;
DCHECK_GE(rate, 0.0);
DCHECK_LE(rate, 1.0);
DCHECK(profile_);
PrefService* preferences = profile_->GetPrefs();
preferences->SetDouble(prefs::kAutofillNegativeUploadRate, rate);
}
| 13,968 |
13,743 | 0 | ZEND_API int add_next_index_null(zval *arg) /* {{{ */
{
zval *tmp;
MAKE_STD_ZVAL(tmp);
ZVAL_NULL(tmp);
return zend_hash_next_index_insert(Z_ARRVAL_P(arg), &tmp, sizeof(zval *), NULL);
}
/* }}} */
| 13,969 |
35,690 | 0 | static int lua_auth_checker_harness_first(request_rec *r)
{
return lua_request_rec_hook_harness(r, "auth_checker", AP_LUA_HOOK_FIRST);
}
| 13,970 |
91,676 | 0 | static unsigned int encap_hash_key_make(void *p)
{
const struct bgp_attr_encap_subtlv *encap = p;
return jhash(encap->value, encap->length, 0);
}
| 13,971 |
133,449 | 0 | ErrorScreen* OobeUI::GetErrorScreen() {
return error_screen_.get();
}
| 13,972 |
20,648 | 0 | static inline void vti_set_rr6(unsigned long rr6)
{
ia64_set_rr(RR6, rr6);
ia64_srlz_i();
}
| 13,973 |
3,669 | 0 | int test_sqr(BIO *bp, BN_CTX *ctx)
{
BIGNUM *a, *c, *d, *e;
int i, ret = 0;
a = BN_new();
c = BN_new();
d = BN_new();
e = BN_new();
if (a == NULL || c == NULL || d == NULL || e == NULL) {
goto err;
}
for (i = 0; i < num0; i++) {
BN_bntest_rand(a, 40 + i * 10, 0, 0);
a->neg = rand_neg();
BN_sqr(c, a, ctx);
if (bp != NULL) {
if (!results) {
BN_print(bp, a);
BIO_puts(bp, " * ");
BN_print(bp, a);
BIO_puts(bp, " - ");
}
BN_print(bp, c);
BIO_puts(bp, "\n");
}
BN_div(d, e, c, a, ctx);
BN_sub(d, d, a);
if (!BN_is_zero(d) || !BN_is_zero(e)) {
fprintf(stderr, "Square test failed!\n");
goto err;
}
}
/* Regression test for a BN_sqr overflow bug. */
BN_hex2bn(&a,
"80000000000000008000000000000001"
"FFFFFFFFFFFFFFFE0000000000000000");
BN_sqr(c, a, ctx);
if (bp != NULL) {
if (!results) {
BN_print(bp, a);
BIO_puts(bp, " * ");
BN_print(bp, a);
BIO_puts(bp, " - ");
}
BN_print(bp, c);
BIO_puts(bp, "\n");
}
BN_mul(d, a, a, ctx);
if (BN_cmp(c, d)) {
fprintf(stderr, "Square test failed: BN_sqr and BN_mul produce "
"different results!\n");
goto err;
}
/* Regression test for a BN_sqr overflow bug. */
BN_hex2bn(&a,
"80000000000000000000000080000001"
"FFFFFFFE000000000000000000000000");
BN_sqr(c, a, ctx);
if (bp != NULL) {
if (!results) {
BN_print(bp, a);
BIO_puts(bp, " * ");
BN_print(bp, a);
BIO_puts(bp, " - ");
}
BN_print(bp, c);
BIO_puts(bp, "\n");
}
BN_mul(d, a, a, ctx);
if (BN_cmp(c, d)) {
fprintf(stderr, "Square test failed: BN_sqr and BN_mul produce "
"different results!\n");
goto err;
}
ret = 1;
err:
if (a != NULL)
BN_free(a);
if (c != NULL)
BN_free(c);
if (d != NULL)
BN_free(d);
if (e != NULL)
BN_free(e);
return ret;
}
| 13,974 |
131,853 | 0 | static void voidCallbackFunctionAttributeAttributeSetterCallback(v8::Local<v8::String>, v8::Local<v8::Value> jsValue, const v8::PropertyCallbackInfo<void>& info)
{
TRACE_EVENT_SET_SAMPLING_STATE("Blink", "DOMSetter");
TestObjectPythonV8Internal::voidCallbackFunctionAttributeAttributeSetter(jsValue, info);
TRACE_EVENT_SET_SAMPLING_STATE("V8", "V8Execution");
}
| 13,975 |
40,930 | 0 | double json_real_value(const json_t *json)
{
if(!json_is_real(json))
return 0;
return json_to_real(json)->value;
}
| 13,976 |
136,207 | 0 | static const QualifiedName& frameOwnerURLAttributeName(const HTMLFrameOwnerElement& frameOwner)
{
return isHTMLObjectElement(frameOwner) ? HTMLNames::dataAttr : HTMLNames::srcAttr;
}
| 13,977 |
144,656 | 0 | void WebContentsImpl::ResumeLoadingCreatedWebContents() {
if (delayed_open_url_params_.get()) {
OpenURL(*delayed_open_url_params_.get());
delayed_open_url_params_.reset(nullptr);
return;
}
if (is_resume_pending_) {
is_resume_pending_ = false;
GetRenderViewHost()->GetWidget()->Init();
GetMainFrame()->Init();
}
}
| 13,978 |
94,922 | 0 | void usbnet_tx_timeout (struct net_device *net)
{
struct usbnet *dev = netdev_priv(net);
unlink_urbs (dev, &dev->txq);
tasklet_schedule (&dev->bh);
/* this needs to be handled individually because the generic layer
* doesn't know what is sufficient and could not restore private
* information if a remedy of an unconditional reset were used.
*/
if (dev->driver_info->recover)
(dev->driver_info->recover)(dev);
}
| 13,979 |
178,257 | 1 | hash_foreach_mangle_dict_of_strings (gpointer key, gpointer val, gpointer user_data)
{
GHashTable *out = (GHashTable*) user_data;
GHashTable *in_dict = (GHashTable *) val;
HashAndString *data = g_new0 (HashAndString, 1);
data->string = (gchar*) key;
data->hash = g_hash_table_new_full (g_str_hash, g_str_equal,
g_free, g_free);
g_hash_table_foreach (in_dict, hash_foreach_prepend_string, data);
g_hash_table_insert(out, g_strdup ((gchar*) key), data->hash);
}
| 13,980 |
145,526 | 0 | void ResourceDispatcherHostImpl::RemovePendingLoader(
const LoaderMap::iterator& iter) {
ResourceRequestInfoImpl* info = iter->second->GetRequestInfo();
IncrementOutstandingRequestsMemory(-1, *info);
pending_loaders_.erase(iter);
}
| 13,981 |
144,020 | 0 | png_read_end(png_structp png_ptr, png_infop info_ptr)
{
png_debug(1, "in png_read_end");
if (png_ptr == NULL)
return;
png_crc_finish(png_ptr, 0); /* Finish off CRC from last IDAT chunk */
do
{
#ifdef PNG_USE_LOCAL_ARRAYS
PNG_CONST PNG_IHDR;
PNG_CONST PNG_IDAT;
PNG_CONST PNG_IEND;
PNG_CONST PNG_PLTE;
#ifdef PNG_READ_bKGD_SUPPORTED
PNG_CONST PNG_bKGD;
#endif
#ifdef PNG_READ_cHRM_SUPPORTED
PNG_CONST PNG_cHRM;
#endif
#ifdef PNG_READ_gAMA_SUPPORTED
PNG_CONST PNG_gAMA;
#endif
#ifdef PNG_READ_hIST_SUPPORTED
PNG_CONST PNG_hIST;
#endif
#ifdef PNG_READ_iCCP_SUPPORTED
PNG_CONST PNG_iCCP;
#endif
#ifdef PNG_READ_iTXt_SUPPORTED
PNG_CONST PNG_iTXt;
#endif
#ifdef PNG_READ_oFFs_SUPPORTED
PNG_CONST PNG_oFFs;
#endif
#ifdef PNG_READ_pCAL_SUPPORTED
PNG_CONST PNG_pCAL;
#endif
#ifdef PNG_READ_pHYs_SUPPORTED
PNG_CONST PNG_pHYs;
#endif
#ifdef PNG_READ_sBIT_SUPPORTED
PNG_CONST PNG_sBIT;
#endif
#ifdef PNG_READ_sCAL_SUPPORTED
PNG_CONST PNG_sCAL;
#endif
#ifdef PNG_READ_sPLT_SUPPORTED
PNG_CONST PNG_sPLT;
#endif
#ifdef PNG_READ_sRGB_SUPPORTED
PNG_CONST PNG_sRGB;
#endif
#ifdef PNG_READ_tEXt_SUPPORTED
PNG_CONST PNG_tEXt;
#endif
#ifdef PNG_READ_tIME_SUPPORTED
PNG_CONST PNG_tIME;
#endif
#ifdef PNG_READ_tRNS_SUPPORTED
PNG_CONST PNG_tRNS;
#endif
#ifdef PNG_READ_zTXt_SUPPORTED
PNG_CONST PNG_zTXt;
#endif
#endif /* PNG_USE_LOCAL_ARRAYS */
png_uint_32 length = png_read_chunk_header(png_ptr);
PNG_CONST png_bytep chunk_name = png_ptr->chunk_name;
if (!png_memcmp(chunk_name, png_IHDR, 4))
png_handle_IHDR(png_ptr, info_ptr, length);
else if (!png_memcmp(chunk_name, png_IEND, 4))
png_handle_IEND(png_ptr, info_ptr, length);
#ifdef PNG_HANDLE_AS_UNKNOWN_SUPPORTED
else if (png_handle_as_unknown(png_ptr, chunk_name))
{
if (!png_memcmp(chunk_name, png_IDAT, 4))
{
if ((length > 0) || (png_ptr->mode & PNG_HAVE_CHUNK_AFTER_IDAT))
png_error(png_ptr, "Too many IDAT's found");
}
png_handle_unknown(png_ptr, info_ptr, length);
if (!png_memcmp(chunk_name, png_PLTE, 4))
png_ptr->mode |= PNG_HAVE_PLTE;
}
#endif
else if (!png_memcmp(chunk_name, png_IDAT, 4))
{
/* Zero length IDATs are legal after the last IDAT has been
* read, but not after other chunks have been read.
*/
if ((length > 0) || (png_ptr->mode & PNG_HAVE_CHUNK_AFTER_IDAT))
png_error(png_ptr, "Too many IDAT's found");
png_crc_finish(png_ptr, length);
}
else if (!png_memcmp(chunk_name, png_PLTE, 4))
png_handle_PLTE(png_ptr, info_ptr, length);
#ifdef PNG_READ_bKGD_SUPPORTED
else if (!png_memcmp(chunk_name, png_bKGD, 4))
png_handle_bKGD(png_ptr, info_ptr, length);
#endif
#ifdef PNG_READ_cHRM_SUPPORTED
else if (!png_memcmp(chunk_name, png_cHRM, 4))
png_handle_cHRM(png_ptr, info_ptr, length);
#endif
#ifdef PNG_READ_gAMA_SUPPORTED
else if (!png_memcmp(chunk_name, png_gAMA, 4))
png_handle_gAMA(png_ptr, info_ptr, length);
#endif
#ifdef PNG_READ_hIST_SUPPORTED
else if (!png_memcmp(chunk_name, png_hIST, 4))
png_handle_hIST(png_ptr, info_ptr, length);
#endif
#ifdef PNG_READ_oFFs_SUPPORTED
else if (!png_memcmp(chunk_name, png_oFFs, 4))
png_handle_oFFs(png_ptr, info_ptr, length);
#endif
#ifdef PNG_READ_pCAL_SUPPORTED
else if (!png_memcmp(chunk_name, png_pCAL, 4))
png_handle_pCAL(png_ptr, info_ptr, length);
#endif
#ifdef PNG_READ_sCAL_SUPPORTED
else if (!png_memcmp(chunk_name, png_sCAL, 4))
png_handle_sCAL(png_ptr, info_ptr, length);
#endif
#ifdef PNG_READ_pHYs_SUPPORTED
else if (!png_memcmp(chunk_name, png_pHYs, 4))
png_handle_pHYs(png_ptr, info_ptr, length);
#endif
#ifdef PNG_READ_sBIT_SUPPORTED
else if (!png_memcmp(chunk_name, png_sBIT, 4))
png_handle_sBIT(png_ptr, info_ptr, length);
#endif
#ifdef PNG_READ_sRGB_SUPPORTED
else if (!png_memcmp(chunk_name, png_sRGB, 4))
png_handle_sRGB(png_ptr, info_ptr, length);
#endif
#ifdef PNG_READ_iCCP_SUPPORTED
else if (!png_memcmp(chunk_name, png_iCCP, 4))
png_handle_iCCP(png_ptr, info_ptr, length);
#endif
#ifdef PNG_READ_sPLT_SUPPORTED
else if (!png_memcmp(chunk_name, png_sPLT, 4))
png_handle_sPLT(png_ptr, info_ptr, length);
#endif
#ifdef PNG_READ_tEXt_SUPPORTED
else if (!png_memcmp(chunk_name, png_tEXt, 4))
png_handle_tEXt(png_ptr, info_ptr, length);
#endif
#ifdef PNG_READ_tIME_SUPPORTED
else if (!png_memcmp(chunk_name, png_tIME, 4))
png_handle_tIME(png_ptr, info_ptr, length);
#endif
#ifdef PNG_READ_tRNS_SUPPORTED
else if (!png_memcmp(chunk_name, png_tRNS, 4))
png_handle_tRNS(png_ptr, info_ptr, length);
#endif
#ifdef PNG_READ_zTXt_SUPPORTED
else if (!png_memcmp(chunk_name, png_zTXt, 4))
png_handle_zTXt(png_ptr, info_ptr, length);
#endif
#ifdef PNG_READ_iTXt_SUPPORTED
else if (!png_memcmp(chunk_name, png_iTXt, 4))
png_handle_iTXt(png_ptr, info_ptr, length);
#endif
else
png_handle_unknown(png_ptr, info_ptr, length);
} while (!(png_ptr->mode & PNG_HAVE_IEND));
}
| 13,982 |
10,179 | 0 | Ins_SROUND( INS_ARG )
{
DO_SROUND
}
| 13,983 |
128,294 | 0 | void FrameView::paintScrollbar(GraphicsContext* context, Scrollbar* bar, const IntRect& rect)
{
bool needsBackgorund = bar->isCustomScrollbar() && m_frame->isMainFrame();
if (needsBackgorund) {
IntRect toFill = bar->frameRect();
toFill.intersect(rect);
context->fillRect(toFill, baseBackgroundColor());
}
ScrollView::paintScrollbar(context, bar, rect);
}
| 13,984 |
22,408 | 0 | int default_wake_function(wait_queue_t *curr, unsigned mode, int wake_flags,
void *key)
{
return try_to_wake_up(curr->private, mode, wake_flags);
}
| 13,985 |
38,770 | 0 | checkcondition_bit(void *checkval, ITEM *item)
{
return GETBIT(checkval, HASHVAL(item->val));
}
| 13,986 |
166,604 | 0 | void WebGL2RenderingContextBase::clearBufferiv(GLenum buffer,
GLint drawbuffer,
const Vector<GLint>& value,
GLuint src_offset) {
if (isContextLost() ||
!ValidateClearBuffer("clearBufferiv", buffer, value.size(), src_offset))
return;
ScopedRGBEmulationColorMask emulation_color_mask(this, color_mask_,
drawing_buffer_.get());
ContextGL()->ClearBufferiv(buffer, drawbuffer, value.data() + src_offset);
UpdateBuffersToAutoClear(kClearBufferiv, buffer, drawbuffer);
}
| 13,987 |
94,679 | 0 | int i2d_PKCS8_fp(FILE *fp, X509_SIG *p8)
{
return ASN1_i2d_fp_of(X509_SIG,i2d_X509_SIG,fp,p8);
}
| 13,988 |
89,672 | 0 | u8 *nfc_llcp_build_tlv(u8 type, u8 *value, u8 value_length, u8 *tlv_length)
{
u8 *tlv, length;
pr_debug("type %d\n", type);
if (type >= LLCP_TLV_MAX)
return NULL;
length = llcp_tlv_length[type];
if (length == 0 && value_length == 0)
return NULL;
else if (length == 0)
length = value_length;
*tlv_length = 2 + length;
tlv = kzalloc(2 + length, GFP_KERNEL);
if (tlv == NULL)
return tlv;
tlv[0] = type;
tlv[1] = length;
memcpy(tlv + 2, value, length);
return tlv;
}
| 13,989 |
172,319 | 0 | int set_iface_flags(const char *ifname, int dev_up) {
struct ifreq ifr;
int ret;
int sock = socket(PF_INET, SOCK_DGRAM, 0);
if (sock < 0) {
ALOGD("Bad socket: %d\n", sock);
return -errno;
}
memset(&ifr, 0, sizeof(ifr));
strlcpy(ifr.ifr_name, ifname, IFNAMSIZ);
if (ioctl(sock, SIOCGIFFLAGS, &ifr) != 0) {
ret = errno ? -errno : -999;
ALOGE("Could not read interface %s flags: %d\n", ifname, errno);
close(sock);
return ret;
} else {
}
if (dev_up) {
if (ifr.ifr_flags & IFF_UP) {
close(sock);
return 0;
}
ifr.ifr_flags |= IFF_UP;
} else {
if (!(ifr.ifr_flags & IFF_UP)) {
close(sock);
return 0;
}
ifr.ifr_flags &= ~IFF_UP;
}
if (ioctl(sock, SIOCSIFFLAGS, &ifr) != 0) {
ALOGE("Could not set interface %s flags: %d\n", ifname, errno);
ret = errno ? -errno : -999;
close(sock);
return ret;
} else {
ALOGD("set interface %s flags (%s)\n", ifname, dev_up ? "UP" : "DOWN");
}
close(sock);
return 0;
}
| 13,990 |
184,145 | 1 | bool AudioHandler::VerifyMixerConnection() {
PulseAudioMixer::State mixer_state = mixer_->CheckState();
if (mixer_state == PulseAudioMixer::READY)
return true;
if (connected_) {
// Something happened and the mixer is no longer valid after having been
// initialized earlier.
connected_ = false;
LOG(ERROR) << "Lost connection to PulseAudio";
} else {
LOG(ERROR) << "Mixer not valid";
}
if ((mixer_state == PulseAudioMixer::INITIALIZING) ||
(mixer_state == PulseAudioMixer::SHUTTING_DOWN))
return false;
if (reconnect_tries_ < kMaxReconnectTries) {
reconnect_tries_++;
VLOG(1) << "Re-connecting to PulseAudio attempt " << reconnect_tries_ << "/"
<< kMaxReconnectTries;
mixer_.reset(new PulseAudioMixer());
connected_ = mixer_->InitSync();
if (connected_) {
reconnect_tries_ = 0;
return true;
}
LOG(ERROR) << "Unable to re-connect to PulseAudio";
}
return false;
}
| 13,991 |
81,977 | 0 | static int parseOptions(int argc, char **argv) {
int i;
for (i = 1; i < argc; i++) {
int lastarg = i==argc-1;
if (!strcmp(argv[i],"-h") && !lastarg) {
sdsfree(config.hostip);
config.hostip = sdsnew(argv[++i]);
} else if (!strcmp(argv[i],"-h") && lastarg) {
usage();
} else if (!strcmp(argv[i],"--help")) {
usage();
} else if (!strcmp(argv[i],"-x")) {
config.stdinarg = 1;
} else if (!strcmp(argv[i],"-p") && !lastarg) {
config.hostport = atoi(argv[++i]);
} else if (!strcmp(argv[i],"-s") && !lastarg) {
config.hostsocket = argv[++i];
} else if (!strcmp(argv[i],"-r") && !lastarg) {
config.repeat = strtoll(argv[++i],NULL,10);
} else if (!strcmp(argv[i],"-i") && !lastarg) {
double seconds = atof(argv[++i]);
config.interval = seconds*1000000;
} else if (!strcmp(argv[i],"-n") && !lastarg) {
config.dbnum = atoi(argv[++i]);
} else if (!strcmp(argv[i],"-a") && !lastarg) {
fputs("Warning: Using a password with '-a' option on the command line interface may not be safe.\n", stderr);
config.auth = argv[++i];
} else if (!strcmp(argv[i],"-u") && !lastarg) {
parseRedisUri(argv[++i]);
} else if (!strcmp(argv[i],"--raw")) {
config.output = OUTPUT_RAW;
} else if (!strcmp(argv[i],"--no-raw")) {
config.output = OUTPUT_STANDARD;
} else if (!strcmp(argv[i],"--csv")) {
config.output = OUTPUT_CSV;
} else if (!strcmp(argv[i],"--latency")) {
config.latency_mode = 1;
} else if (!strcmp(argv[i],"--latency-dist")) {
config.latency_dist_mode = 1;
} else if (!strcmp(argv[i],"--mono")) {
spectrum_palette = spectrum_palette_mono;
spectrum_palette_size = spectrum_palette_mono_size;
} else if (!strcmp(argv[i],"--latency-history")) {
config.latency_mode = 1;
config.latency_history = 1;
} else if (!strcmp(argv[i],"--lru-test") && !lastarg) {
config.lru_test_mode = 1;
config.lru_test_sample_size = strtoll(argv[++i],NULL,10);
} else if (!strcmp(argv[i],"--slave")) {
config.slave_mode = 1;
} else if (!strcmp(argv[i],"--stat")) {
config.stat_mode = 1;
} else if (!strcmp(argv[i],"--scan")) {
config.scan_mode = 1;
} else if (!strcmp(argv[i],"--pattern") && !lastarg) {
config.pattern = argv[++i];
} else if (!strcmp(argv[i],"--intrinsic-latency") && !lastarg) {
config.intrinsic_latency_mode = 1;
config.intrinsic_latency_duration = atoi(argv[++i]);
} else if (!strcmp(argv[i],"--rdb") && !lastarg) {
config.getrdb_mode = 1;
config.rdb_filename = argv[++i];
} else if (!strcmp(argv[i],"--pipe")) {
config.pipe_mode = 1;
} else if (!strcmp(argv[i],"--pipe-timeout") && !lastarg) {
config.pipe_timeout = atoi(argv[++i]);
} else if (!strcmp(argv[i],"--bigkeys")) {
config.bigkeys = 1;
} else if (!strcmp(argv[i],"--hotkeys")) {
config.hotkeys = 1;
} else if (!strcmp(argv[i],"--eval") && !lastarg) {
config.eval = argv[++i];
} else if (!strcmp(argv[i],"--ldb")) {
config.eval_ldb = 1;
config.output = OUTPUT_RAW;
} else if (!strcmp(argv[i],"--ldb-sync-mode")) {
config.eval_ldb = 1;
config.eval_ldb_sync = 1;
config.output = OUTPUT_RAW;
} else if (!strcmp(argv[i],"-c")) {
config.cluster_mode = 1;
} else if (!strcmp(argv[i],"-d") && !lastarg) {
sdsfree(config.mb_delim);
config.mb_delim = sdsnew(argv[++i]);
} else if (!strcmp(argv[i],"-v") || !strcmp(argv[i], "--version")) {
sds version = cliVersion();
printf("redis-cli %s\n", version);
sdsfree(version);
exit(0);
} else {
if (argv[i][0] == '-') {
fprintf(stderr,
"Unrecognized option or bad number of args for: '%s'\n",
argv[i]);
exit(1);
} else {
/* Likely the command name, stop here. */
break;
}
}
}
/* --ldb requires --eval. */
if (config.eval_ldb && config.eval == NULL) {
fprintf(stderr,"Options --ldb and --ldb-sync-mode require --eval.\n");
fprintf(stderr,"Try %s --help for more information.\n", argv[0]);
exit(1);
}
return i;
}
| 13,992 |
185,003 | 1 | void DriveFileStreamReader::InitializeAfterGetFileContentByPathInitialized(
const base::FilePath& drive_file_path,
const InitializeCompletionCallback& callback,
FileError error,
scoped_ptr<DriveEntryProto> entry,
const base::FilePath& local_cache_file_path) {
DCHECK(BrowserThread::CurrentlyOn(BrowserThread::IO));
if (error != FILE_ERROR_OK) {
callback.Run(error, scoped_ptr<DriveEntryProto>());
return;
}
DCHECK(entry);
if (local_cache_file_path.empty()) {
// The file is not cached, and being downloaded.
reader_proxy_.reset(
new internal::NetworkReaderProxy(
entry->file_info().size(),
base::Bind(&internal::CancelGetFile,
drive_file_system_getter_, drive_file_path)));
callback.Run(FILE_ERROR_OK, entry.Pass());
return;
}
// Otherwise, open the stream for file.
scoped_ptr<net::FileStream> file_stream(new net::FileStream(NULL));
net::FileStream* file_stream_ptr = file_stream.get();
net::CompletionCallback open_completion_callback = base::Bind(
&DriveFileStreamReader::InitializeAfterLocalFileOpen,
weak_ptr_factory_.GetWeakPtr(),
callback,
base::Passed(&entry),
base::Passed(&file_stream));
int result = file_stream_ptr->Open(
local_cache_file_path,
base::PLATFORM_FILE_OPEN | base::PLATFORM_FILE_READ |
base::PLATFORM_FILE_ASYNC,
open_completion_callback);
if (result == net::ERR_IO_PENDING) {
// If the result ERR_IO_PENDING, the callback will be invoked later.
// Do nothing here.
return;
}
open_completion_callback.Run(result);
}
| 13,993 |
180,567 | 1 | static void unix_detach_fds(struct scm_cookie *scm, struct sk_buff *skb)
{
int i;
scm->fp = UNIXCB(skb).fp;
UNIXCB(skb).fp = NULL;
for (i = scm->fp->count-1; i >= 0; i--)
unix_notinflight(scm->fp->fp[i]);
}
| 13,994 |
18,751 | 0 | static int dccp_v4_init_sock(struct sock *sk)
{
static __u8 dccp_v4_ctl_sock_initialized;
int err = dccp_init_sock(sk, dccp_v4_ctl_sock_initialized);
if (err == 0) {
if (unlikely(!dccp_v4_ctl_sock_initialized))
dccp_v4_ctl_sock_initialized = 1;
inet_csk(sk)->icsk_af_ops = &dccp_ipv4_af_ops;
}
return err;
}
| 13,995 |
52,181 | 0 | PHP_NAMED_FUNCTION(php_if_ftruncate)
{
zval *fp;
long size;
php_stream *stream;
if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "rl", &fp, &size) == FAILURE) {
RETURN_FALSE;
}
PHP_STREAM_TO_ZVAL(stream, &fp);
if (!php_stream_truncate_supported(stream)) {
php_error_docref(NULL TSRMLS_CC, E_WARNING, "Can't truncate this stream!");
RETURN_FALSE;
}
RETURN_BOOL(0 == php_stream_truncate_set_size(stream, size));
}
| 13,996 |
75,264 | 0 | static uint32 get_bits(vorb *f, int n)
{
uint32 z;
if (f->valid_bits < 0) return 0;
if (f->valid_bits < n) {
if (n > 24) {
z = get_bits(f, 24);
z += get_bits(f, n-24) << 24;
return z;
}
if (f->valid_bits == 0) f->acc = 0;
while (f->valid_bits < n) {
int z = get8_packet_raw(f);
if (z == EOP) {
f->valid_bits = INVALID_BITS;
return 0;
}
f->acc += z << f->valid_bits;
f->valid_bits += 8;
}
}
if (f->valid_bits < 0) return 0;
z = f->acc & ((1 << n)-1);
f->acc >>= n;
f->valid_bits -= n;
return z;
}
| 13,997 |
69,903 | 0 | connection_ap_process_transparent(entry_connection_t *conn)
{
socks_request_t *socks;
tor_assert(conn);
tor_assert(conn->socks_request);
socks = conn->socks_request;
/* pretend that a socks handshake completed so we don't try to
* send a socks reply down a transparent conn */
socks->command = SOCKS_COMMAND_CONNECT;
socks->has_finished = 1;
log_debug(LD_APP,"entered.");
if (connection_ap_get_original_destination(conn, socks) < 0) {
log_warn(LD_APP,"Fetching original destination failed. Closing.");
connection_mark_unattached_ap(conn,
END_STREAM_REASON_CANT_FETCH_ORIG_DEST);
return -1;
}
/* we have the original destination */
control_event_stream_status(conn, STREAM_EVENT_NEW, 0);
return connection_ap_rewrite_and_attach_if_allowed(conn, NULL, NULL);
}
| 13,998 |
149,606 | 0 | void PreconnectManager::StartPreconnectUrl(
const GURL& url,
bool allow_credentials,
net::NetworkIsolationKey network_isolation_key) {
DCHECK_CURRENTLY_ON(content::BrowserThread::UI);
if (!url.SchemeIsHTTPOrHTTPS())
return;
PreresolveJobId job_id = preresolve_jobs_.Add(std::make_unique<PreresolveJob>(
url.GetOrigin(), 1, allow_credentials, std::move(network_isolation_key),
nullptr));
queued_jobs_.push_front(job_id);
TryToLaunchPreresolveJobs();
}
| 13,999 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.