unique_id
int64
13
189k
target
int64
0
1
code
stringlengths
20
241k
__index_level_0__
int64
0
18.9k
149,377
0
bool CheckClientDownloadRequest::IsSupportedDownload( const download::DownloadItem& item, const base::FilePath& target_path, DownloadCheckResultReason* reason, ClientDownloadRequest::DownloadType* type) { if (item.GetUrlChain().empty()) { *reason = REASON_EMPTY_URL_CHAIN; return false; } const GURL& final_url = item.GetUrlChain().back(); if (!final_url.is_valid() || final_url.is_empty()) { *reason = REASON_INVALID_URL; return false; } if (!final_url.IsStandard() && !final_url.SchemeIsBlob() && !final_url.SchemeIs(url::kDataScheme)) { *reason = REASON_UNSUPPORTED_URL_SCHEME; return false; } if (final_url.SchemeIsFile()) { *reason = final_url.has_host() ? REASON_REMOTE_FILE : REASON_LOCAL_FILE; return false; } if (!FileTypePolicies::GetInstance()->IsCheckedBinaryFile(target_path)) { *reason = REASON_NOT_BINARY_FILE; return false; } *type = download_type_util::GetDownloadType(target_path); return true; }
2,000
29,913
0
ssize_t proc_gid_map_write(struct file *file, const char __user *buf, size_t size, loff_t *ppos) { struct seq_file *seq = file->private_data; struct user_namespace *ns = seq->private; struct user_namespace *seq_ns = seq_user_ns(seq); if (!ns->parent) return -EPERM; if ((seq_ns != ns) && (seq_ns != ns->parent)) return -EPERM; return map_write(file, buf, size, ppos, CAP_SETGID, &ns->gid_map, &ns->parent->gid_map); }
2,001
82,577
0
void jswrap_graphics_init() { #ifdef USE_LCD_FSMC JsVar *parent = jspNewObject("LCD", "Graphics"); if (parent) { JsVar *parentObj = jsvSkipName(parent); JsGraphics gfx; graphicsStructInit(&gfx); gfx.data.type = JSGRAPHICSTYPE_FSMC; gfx.graphicsVar = parentObj; gfx.data.width = 320; gfx.data.height = 240; gfx.data.bpp = 16; lcdInit_FSMC(&gfx); lcdSetCallbacks_FSMC(&gfx); graphicsSplash(&gfx); graphicsSetVar(&gfx); jsvUnLock2(parentObj, parent); } #endif }
2,002
159,825
0
static bool IsSlash(UChar c) { return (c == '/' || c == '\\'); }
2,003
101,796
0
void Browser::Search() { const GURL& url = GetSelectedTabContents()->GetURL(); if (url.SchemeIs(chrome::kChromeUIScheme) && url.host() == chrome::kChromeUINewTabHost) { CloseTab(); return; } if (window_->IsFullscreen()) { ToggleFullscreenMode(); MessageLoop::current()->PostTask( FROM_HERE, method_factory_.NewRunnableMethod(&Browser::Search)); return; } NewTab(); }
2,004
41,435
0
static int ovl_readlink(struct dentry *dentry, char __user *buf, int bufsiz) { struct path realpath; struct inode *realinode; ovl_path_real(dentry, &realpath); realinode = realpath.dentry->d_inode; if (!realinode->i_op->readlink) return -EINVAL; touch_atime(&realpath); return realinode->i_op->readlink(realpath.dentry, buf, bufsiz); }
2,005
48,683
0
static void *session_realloc(void *p, size_t size, void *ctx) { h2_session *session = ctx; ap_log_cerror(APLOG_MARK, APLOG_TRACE6, 0, session->c, "h2_session(%ld): realloc(%ld)", session->id, (long)size); return realloc(p, size); }
2,006
137,098
0
void InputType::SanitizeValueInResponseToMinOrMaxAttributeChange() {}
2,007
174,524
0
dhcp_message_add_addr(struct dhcp_message *dhcp, uint8_t type, struct in_addr addr) { uint8_t *p; size_t len; p = dhcp->options; while (*p != DHO_END) { p++; p += *p + 1; } len = p - (uint8_t *)dhcp; if (len + 6 > sizeof(*dhcp)) { errno = ENOMEM; return -1; } PUTADDR(type, addr); *p = DHO_END; return 0; }
2,008
18,115
0
pango_glyph_string_copy (PangoGlyphString *string) { PangoGlyphString *new_string; if (string == NULL) return NULL; new_string = g_slice_new (PangoGlyphString); *new_string = *string; new_string->glyphs = g_memdup (string->glyphs, string->space * sizeof (PangoGlyphInfo)); new_string->log_clusters = g_memdup (string->log_clusters, string->space * sizeof (gint)); return new_string; }
2,009
64,743
0
int dns_packet_read(DnsPacket *p, size_t sz, const void **ret, size_t *start) { assert(p); if (p->rindex + sz > p->size) return -EMSGSIZE; if (ret) *ret = (uint8_t*) DNS_PACKET_DATA(p) + p->rindex; if (start) *start = p->rindex; p->rindex += sz; return 0; }
2,010
50,168
0
static void alloc_globals_dtor(zend_alloc_globals *alloc_globals) { zend_mm_shutdown(alloc_globals->mm_heap, 1, 1); }
2,011
84,238
0
selaReadStream(FILE *fp) { l_int32 i, n, version; SEL *sel; SELA *sela; PROCNAME("selaReadStream"); if (!fp) return (SELA *)ERROR_PTR("stream not defined", procName, NULL); if (fscanf(fp, "\nSela Version %d\n", &version) != 1) return (SELA *)ERROR_PTR("not a sela file", procName, NULL); if (version != SEL_VERSION_NUMBER) return (SELA *)ERROR_PTR("invalid sel version", procName, NULL); if (fscanf(fp, "Number of Sels = %d\n\n", &n) != 1) return (SELA *)ERROR_PTR("not a sela file", procName, NULL); if ((sela = selaCreate(n)) == NULL) return (SELA *)ERROR_PTR("sela not made", procName, NULL); sela->nalloc = n; for (i = 0; i < n; i++) { if ((sel = selReadStream(fp)) == NULL) { selaDestroy(&sela); return (SELA *)ERROR_PTR("sel not read", procName, NULL); } selaAddSel(sela, sel, NULL, 0); } return sela; }
2,012
33,554
0
static inline void shmem_unacct_blocks(unsigned long flags, long pages) { if (flags & VM_NORESERVE) vm_unacct_memory(pages * VM_ACCT(PAGE_CACHE_SIZE)); }
2,013
118,236
0
views::View* AutofillDialogViews::GetNotificationAreaForTesting() { return notification_area_; }
2,014
154,908
0
~ScopedUnpackStateButAlignmentReset() { if (skip_pixels_ != 0) { api_->glPixelStoreiFn(GL_UNPACK_SKIP_PIXELS, skip_pixels_); } if (skip_rows_ != 0) { api_->glPixelStoreiFn(GL_UNPACK_SKIP_ROWS, skip_rows_); } if (skip_images_ != 0) { api_->glPixelStoreiFn(GL_UNPACK_SKIP_IMAGES, skip_images_); } if (row_length_ != 0) { api_->glPixelStoreiFn(GL_UNPACK_ROW_LENGTH, row_length_); } if (image_height_ != 0) { api_->glPixelStoreiFn(GL_UNPACK_IMAGE_HEIGHT, image_height_); } }
2,015
176,284
0
static Handle<Object> GetInternalImpl(Handle<JSObject> holder, uint32_t entry) { return Subclass::GetImpl(holder->GetIsolate(), holder->elements(), entry); }
2,016
46,520
0
int test_probable_prime_coprime(BIO *bp, BN_CTX *ctx) { int i, j, ret = 0; BIGNUM r; BN_ULONG primes[5] = { 2, 3, 5, 7, 11 }; BN_init(&r); for (i = 0; i < 1000; i++) { if (!bn_probable_prime_dh_coprime(&r, 1024, ctx)) goto err; for (j = 0; j < 5; j++) { if (BN_mod_word(&r, primes[j]) == 0) { BIO_printf(bp, "Number generated is not coprime to %ld:\n", primes[j]); BN_print_fp(stdout, &r); BIO_printf(bp, "\n"); goto err; } } } ret = 1; err: BN_clear(&r); return ret; }
2,017
73,343
0
void ass_frame_unref(ASS_Image *img) { if (!img || --((ASS_ImagePriv *) img)->ref_count) return; do { ASS_ImagePriv *priv = (ASS_ImagePriv *) img; img = img->next; if (priv->source) ass_cache_dec_ref(priv->source); else ass_aligned_free(priv->result.bitmap); free(priv); } while (img); }
2,018
282
0
cluster_unintern (struct cluster_list *cluster) { if (cluster->refcnt) cluster->refcnt--; if (cluster->refcnt == 0) { hash_release (cluster_hash, cluster); cluster_free (cluster); } }
2,019
168,352
0
void BrowserView::SetIntentPickerViewVisibility(bool visible) { LocationBarView* location_bar = GetLocationBarView(); if (!location_bar->intent_picker_view()) return; if (location_bar->intent_picker_view()->visible() != visible) { location_bar->intent_picker_view()->SetVisible(visible); location_bar->Layout(); } }
2,020
179,803
1
static void sctp_sock_migrate(struct sock *oldsk, struct sock *newsk, struct sctp_association *assoc, sctp_socket_type_t type) { struct sctp_sock *oldsp = sctp_sk(oldsk); struct sctp_sock *newsp = sctp_sk(newsk); struct sctp_bind_bucket *pp; /* hash list port iterator */ struct sctp_endpoint *newep = newsp->ep; struct sk_buff *skb, *tmp; struct sctp_ulpevent *event; struct sctp_bind_hashbucket *head; struct list_head tmplist; /* Migrate socket buffer sizes and all the socket level options to the * new socket. */ newsk->sk_sndbuf = oldsk->sk_sndbuf; newsk->sk_rcvbuf = oldsk->sk_rcvbuf; /* Brute force copy old sctp opt. */ if (oldsp->do_auto_asconf) { memcpy(&tmplist, &newsp->auto_asconf_list, sizeof(tmplist)); inet_sk_copy_descendant(newsk, oldsk); memcpy(&newsp->auto_asconf_list, &tmplist, sizeof(tmplist)); } else inet_sk_copy_descendant(newsk, oldsk); /* Restore the ep value that was overwritten with the above structure * copy. */ newsp->ep = newep; newsp->hmac = NULL; /* Hook this new socket in to the bind_hash list. */ head = &sctp_port_hashtable[sctp_phashfn(sock_net(oldsk), inet_sk(oldsk)->inet_num)]; local_bh_disable(); spin_lock(&head->lock); pp = sctp_sk(oldsk)->bind_hash; sk_add_bind_node(newsk, &pp->owner); sctp_sk(newsk)->bind_hash = pp; inet_sk(newsk)->inet_num = inet_sk(oldsk)->inet_num; spin_unlock(&head->lock); local_bh_enable(); /* Copy the bind_addr list from the original endpoint to the new * endpoint so that we can handle restarts properly */ sctp_bind_addr_dup(&newsp->ep->base.bind_addr, &oldsp->ep->base.bind_addr, GFP_KERNEL); /* Move any messages in the old socket's receive queue that are for the * peeled off association to the new socket's receive queue. */ sctp_skb_for_each(skb, &oldsk->sk_receive_queue, tmp) { event = sctp_skb2event(skb); if (event->asoc == assoc) { __skb_unlink(skb, &oldsk->sk_receive_queue); __skb_queue_tail(&newsk->sk_receive_queue, skb); sctp_skb_set_owner_r_frag(skb, newsk); } } /* Clean up any messages pending delivery due to partial * delivery. Three cases: * 1) No partial deliver; no work. * 2) Peeling off partial delivery; keep pd_lobby in new pd_lobby. * 3) Peeling off non-partial delivery; move pd_lobby to receive_queue. */ skb_queue_head_init(&newsp->pd_lobby); atomic_set(&sctp_sk(newsk)->pd_mode, assoc->ulpq.pd_mode); if (atomic_read(&sctp_sk(oldsk)->pd_mode)) { struct sk_buff_head *queue; /* Decide which queue to move pd_lobby skbs to. */ if (assoc->ulpq.pd_mode) { queue = &newsp->pd_lobby; } else queue = &newsk->sk_receive_queue; /* Walk through the pd_lobby, looking for skbs that * need moved to the new socket. */ sctp_skb_for_each(skb, &oldsp->pd_lobby, tmp) { event = sctp_skb2event(skb); if (event->asoc == assoc) { __skb_unlink(skb, &oldsp->pd_lobby); __skb_queue_tail(queue, skb); sctp_skb_set_owner_r_frag(skb, newsk); } } /* Clear up any skbs waiting for the partial * delivery to finish. */ if (assoc->ulpq.pd_mode) sctp_clear_pd(oldsk, NULL); } sctp_skb_for_each(skb, &assoc->ulpq.reasm, tmp) sctp_skb_set_owner_r_frag(skb, newsk); sctp_skb_for_each(skb, &assoc->ulpq.lobby, tmp) sctp_skb_set_owner_r_frag(skb, newsk); /* Set the type of socket to indicate that it is peeled off from the * original UDP-style socket or created with the accept() call on a * TCP-style socket.. */ newsp->type = type; /* Mark the new socket "in-use" by the user so that any packets * that may arrive on the association after we've moved it are * queued to the backlog. This prevents a potential race between * backlog processing on the old socket and new-packet processing * on the new socket. * * The caller has just allocated newsk so we can guarantee that other * paths won't try to lock it and then oldsk. */ lock_sock_nested(newsk, SINGLE_DEPTH_NESTING); sctp_assoc_migrate(assoc, newsk); /* If the association on the newsk is already closed before accept() * is called, set RCV_SHUTDOWN flag. */ if (sctp_state(assoc, CLOSED) && sctp_style(newsk, TCP)) newsk->sk_shutdown |= RCV_SHUTDOWN; newsk->sk_state = SCTP_SS_ESTABLISHED; release_sock(newsk); }
2,021
123,071
0
bool RenderWidgetHostImpl::TryGetBackingStore(const gfx::Size& desired_size, BackingStore** backing_store) { if (view_->HasAcceleratedSurface(desired_size)) { *backing_store = NULL; return true; } *backing_store = BackingStoreManager::GetBackingStore(this, desired_size); return !!*backing_store; }
2,022
39,632
0
static int futex_proxy_trylock_atomic(u32 __user *pifutex, struct futex_hash_bucket *hb1, struct futex_hash_bucket *hb2, union futex_key *key1, union futex_key *key2, struct futex_pi_state **ps, int set_waiters) { struct futex_q *top_waiter = NULL; u32 curval; int ret; if (get_futex_value_locked(&curval, pifutex)) return -EFAULT; /* * Find the top_waiter and determine if there are additional waiters. * If the caller intends to requeue more than 1 waiter to pifutex, * force futex_lock_pi_atomic() to set the FUTEX_WAITERS bit now, * as we have means to handle the possible fault. If not, don't set * the bit unecessarily as it will force the subsequent unlock to enter * the kernel. */ top_waiter = futex_top_waiter(hb1, key1); /* There are no waiters, nothing for us to do. */ if (!top_waiter) return 0; /* Ensure we requeue to the expected futex. */ if (!match_futex(top_waiter->requeue_pi_key, key2)) return -EINVAL; /* * Try to take the lock for top_waiter. Set the FUTEX_WAITERS bit in * the contended case or if set_waiters is 1. The pi_state is returned * in ps in contended cases. */ ret = futex_lock_pi_atomic(pifutex, hb2, key2, ps, top_waiter->task, set_waiters); if (ret == 1) requeue_pi_wake_futex(top_waiter, key2, hb2); return ret; }
2,023
178,780
1
static struct page *alloc_huge_page(struct vm_area_struct *vma, unsigned long addr, int avoid_reserve) { struct hstate *h = hstate_vma(vma); struct page *page; struct address_space *mapping = vma->vm_file->f_mapping; struct inode *inode = mapping->host; long chg; /* * Processes that did not create the mapping will have no reserves and * will not have accounted against quota. Check that the quota can be * made before satisfying the allocation * MAP_NORESERVE mappings may also need pages and quota allocated * if no reserve mapping overlaps. */ chg = vma_needs_reservation(h, vma, addr); if (chg < 0) return ERR_PTR(-VM_FAULT_OOM); if (chg) if (hugetlb_get_quota(inode->i_mapping, chg)) return ERR_PTR(-VM_FAULT_SIGBUS); spin_lock(&hugetlb_lock); page = dequeue_huge_page_vma(h, vma, addr, avoid_reserve); spin_unlock(&hugetlb_lock); if (!page) { page = alloc_buddy_huge_page(h, NUMA_NO_NODE); if (!page) { hugetlb_put_quota(inode->i_mapping, chg); return ERR_PTR(-VM_FAULT_SIGBUS); } } set_page_private(page, (unsigned long) mapping); vma_commit_reservation(h, vma, addr); return page; }
2,024
91,777
0
void comps_objmrtree_set_n(COMPS_ObjMRTree *rt, char *key, size_t len, void *ndata) { __comps_objmrtree_set(rt, key, len, ndata); }
2,025
179,262
1
int ext4_orphan_del(handle_t *handle, struct inode *inode) { struct list_head *prev; struct ext4_inode_info *ei = EXT4_I(inode); struct ext4_sb_info *sbi; __u32 ino_next; struct ext4_iloc iloc; int err = 0; if (!EXT4_SB(inode->i_sb)->s_journal) return 0; mutex_lock(&EXT4_SB(inode->i_sb)->s_orphan_lock); if (list_empty(&ei->i_orphan)) goto out; ino_next = NEXT_ORPHAN(inode); prev = ei->i_orphan.prev; sbi = EXT4_SB(inode->i_sb); jbd_debug(4, "remove inode %lu from orphan list\n", inode->i_ino); list_del_init(&ei->i_orphan); /* If we're on an error path, we may not have a valid * transaction handle with which to update the orphan list on * disk, but we still need to remove the inode from the linked * list in memory. */ if (!handle) goto out; err = ext4_reserve_inode_write(handle, inode, &iloc); if (err) goto out_err; if (prev == &sbi->s_orphan) { jbd_debug(4, "superblock will point to %u\n", ino_next); BUFFER_TRACE(sbi->s_sbh, "get_write_access"); err = ext4_journal_get_write_access(handle, sbi->s_sbh); if (err) goto out_brelse; sbi->s_es->s_last_orphan = cpu_to_le32(ino_next); err = ext4_handle_dirty_super(handle, inode->i_sb); } else { struct ext4_iloc iloc2; struct inode *i_prev = &list_entry(prev, struct ext4_inode_info, i_orphan)->vfs_inode; jbd_debug(4, "orphan inode %lu will point to %u\n", i_prev->i_ino, ino_next); err = ext4_reserve_inode_write(handle, i_prev, &iloc2); if (err) goto out_brelse; NEXT_ORPHAN(i_prev) = ino_next; err = ext4_mark_iloc_dirty(handle, i_prev, &iloc2); } if (err) goto out_brelse; NEXT_ORPHAN(inode) = 0; err = ext4_mark_iloc_dirty(handle, inode, &iloc); out_err: ext4_std_error(inode->i_sb, err); out: mutex_unlock(&EXT4_SB(inode->i_sb)->s_orphan_lock); return err; out_brelse: brelse(iloc.bh); goto out_err; }
2,026
175,409
0
status_t SampleTable::setTimeToSampleParams( off64_t data_offset, size_t data_size) { if (mHasTimeToSample || data_size < 8) { return ERROR_MALFORMED; } uint8_t header[8]; if (mDataSource->readAt( data_offset, header, sizeof(header)) < (ssize_t)sizeof(header)) { return ERROR_IO; } if (U32_AT(header) != 0) { return ERROR_MALFORMED; } mTimeToSampleCount = U32_AT(&header[4]); if (mTimeToSampleCount > UINT32_MAX / (2 * sizeof(uint32_t))) { ALOGE("Time-to-sample table size too large."); return ERROR_OUT_OF_RANGE; } uint64_t allocSize = (uint64_t)mTimeToSampleCount * 2 * sizeof(uint32_t); mTotalSize += allocSize; if (mTotalSize > kMaxTotalSize) { ALOGE("Time-to-sample table size would make sample table too large.\n" " Requested time-to-sample table size = %llu\n" " Eventual sample table size >= %llu\n" " Allowed sample table size = %llu\n", (unsigned long long)allocSize, (unsigned long long)mTotalSize, (unsigned long long)kMaxTotalSize); return ERROR_OUT_OF_RANGE; } mTimeToSample = new (std::nothrow) uint32_t[mTimeToSampleCount * 2]; if (!mTimeToSample) { ALOGE("Cannot allocate time-to-sample table with %llu entries.", (unsigned long long)mTimeToSampleCount); return ERROR_OUT_OF_RANGE; } if (mDataSource->readAt(data_offset + 8, mTimeToSample, (size_t)allocSize) < (ssize_t)allocSize) { ALOGE("Incomplete data read for time-to-sample table."); return ERROR_IO; } for (size_t i = 0; i < mTimeToSampleCount * 2; ++i) { mTimeToSample[i] = ntohl(mTimeToSample[i]); } mHasTimeToSample = true; return OK; }
2,027
182,290
1
int ff_mov_write_packet(AVFormatContext *s, AVPacket *pkt) { MOVMuxContext *mov = s->priv_data; AVIOContext *pb = s->pb; MOVTrack *trk = &mov->tracks[pkt->stream_index]; AVCodecParameters *par = trk->par; unsigned int samples_in_chunk = 0; int size = pkt->size, ret = 0; uint8_t *reformatted_data = NULL; ret = check_pkt(s, pkt); if (ret < 0) return ret; if (mov->flags & FF_MOV_FLAG_FRAGMENT) { int ret; if (mov->moov_written || mov->flags & FF_MOV_FLAG_EMPTY_MOOV) { if (mov->frag_interleave && mov->fragments > 0) { if (trk->entry - trk->entries_flushed >= mov->frag_interleave) { if ((ret = mov_flush_fragment_interleaving(s, trk)) < 0) return ret; } } if (!trk->mdat_buf) { if ((ret = avio_open_dyn_buf(&trk->mdat_buf)) < 0) return ret; } pb = trk->mdat_buf; } else { if (!mov->mdat_buf) { if ((ret = avio_open_dyn_buf(&mov->mdat_buf)) < 0) return ret; } pb = mov->mdat_buf; } } if (par->codec_id == AV_CODEC_ID_AMR_NB) { /* We must find out how many AMR blocks there are in one packet */ static const uint16_t packed_size[16] = {13, 14, 16, 18, 20, 21, 27, 32, 6, 0, 0, 0, 0, 0, 0, 1}; int len = 0; while (len < size && samples_in_chunk < 100) { len += packed_size[(pkt->data[len] >> 3) & 0x0F]; samples_in_chunk++; } if (samples_in_chunk > 1) { av_log(s, AV_LOG_ERROR, "fatal error, input is not a single packet, implement a AVParser for it\n"); return -1; } } else if (par->codec_id == AV_CODEC_ID_ADPCM_MS || par->codec_id == AV_CODEC_ID_ADPCM_IMA_WAV) { samples_in_chunk = trk->par->frame_size; } else if (trk->sample_size) samples_in_chunk = size / trk->sample_size; else samples_in_chunk = 1; /* copy extradata if it exists */ if (trk->vos_len == 0 && par->extradata_size > 0 && !TAG_IS_AVCI(trk->tag) && (par->codec_id != AV_CODEC_ID_DNXHD)) { trk->vos_len = par->extradata_size; trk->vos_data = av_malloc(trk->vos_len); if (!trk->vos_data) { ret = AVERROR(ENOMEM); goto err; } memcpy(trk->vos_data, par->extradata, trk->vos_len); } if (par->codec_id == AV_CODEC_ID_AAC && pkt->size > 2 && (AV_RB16(pkt->data) & 0xfff0) == 0xfff0) { if (!s->streams[pkt->stream_index]->nb_frames) { av_log(s, AV_LOG_ERROR, "Malformed AAC bitstream detected: " "use the audio bitstream filter 'aac_adtstoasc' to fix it " "('-bsf:a aac_adtstoasc' option with ffmpeg)\n"); return -1; } av_log(s, AV_LOG_WARNING, "aac bitstream error\n"); } if (par->codec_id == AV_CODEC_ID_H264 && trk->vos_len > 0 && *(uint8_t *)trk->vos_data != 1 && !TAG_IS_AVCI(trk->tag)) { /* from x264 or from bytestream H.264 */ /* NAL reformatting needed */ if (trk->hint_track >= 0 && trk->hint_track < mov->nb_streams) { ff_avc_parse_nal_units_buf(pkt->data, &reformatted_data, &size); avio_write(pb, reformatted_data, size); } else { if (trk->cenc.aes_ctr) { size = ff_mov_cenc_avc_parse_nal_units(&trk->cenc, pb, pkt->data, size); if (size < 0) { ret = size; goto err; } } else { size = ff_avc_parse_nal_units(pb, pkt->data, pkt->size); } } } else if (par->codec_id == AV_CODEC_ID_HEVC && trk->vos_len > 6 && (AV_RB24(trk->vos_data) == 1 || AV_RB32(trk->vos_data) == 1)) { /* extradata is Annex B, assume the bitstream is too and convert it */ if (trk->hint_track >= 0 && trk->hint_track < mov->nb_streams) { ff_hevc_annexb2mp4_buf(pkt->data, &reformatted_data, &size, 0, NULL); avio_write(pb, reformatted_data, size); } else { size = ff_hevc_annexb2mp4(pb, pkt->data, pkt->size, 0, NULL); } #if CONFIG_AC3_PARSER } else if (par->codec_id == AV_CODEC_ID_EAC3) { size = handle_eac3(mov, pkt, trk); if (size < 0) return size; else if (!size) goto end; avio_write(pb, pkt->data, size); #endif } else { if (trk->cenc.aes_ctr) { if (par->codec_id == AV_CODEC_ID_H264 && par->extradata_size > 4) { int nal_size_length = (par->extradata[4] & 0x3) + 1; ret = ff_mov_cenc_avc_write_nal_units(s, &trk->cenc, nal_size_length, pb, pkt->data, size); } else { ret = ff_mov_cenc_write_packet(&trk->cenc, pb, pkt->data, size); } if (ret) { goto err; } } else { avio_write(pb, pkt->data, size); } } if ((par->codec_id == AV_CODEC_ID_DNXHD || par->codec_id == AV_CODEC_ID_AC3) && !trk->vos_len) { /* copy frame to create needed atoms */ trk->vos_len = size; trk->vos_data = av_malloc(size); if (!trk->vos_data) { ret = AVERROR(ENOMEM); goto err; } memcpy(trk->vos_data, pkt->data, size); } if (trk->entry >= trk->cluster_capacity) { unsigned new_capacity = 2 * (trk->entry + MOV_INDEX_CLUSTER_SIZE); if (av_reallocp_array(&trk->cluster, new_capacity, sizeof(*trk->cluster))) { ret = AVERROR(ENOMEM); goto err; } trk->cluster_capacity = new_capacity; } trk->cluster[trk->entry].pos = avio_tell(pb) - size; trk->cluster[trk->entry].samples_in_chunk = samples_in_chunk; trk->cluster[trk->entry].chunkNum = 0; trk->cluster[trk->entry].size = size; trk->cluster[trk->entry].entries = samples_in_chunk; trk->cluster[trk->entry].dts = pkt->dts; trk->cluster[trk->entry].pts = pkt->pts; if (!trk->entry && trk->start_dts != AV_NOPTS_VALUE) { if (!trk->frag_discont) { /* First packet of a new fragment. We already wrote the duration * of the last packet of the previous fragment based on track_duration, * which might not exactly match our dts. Therefore adjust the dts * of this packet to be what the previous packets duration implies. */ trk->cluster[trk->entry].dts = trk->start_dts + trk->track_duration; /* We also may have written the pts and the corresponding duration * in sidx/tfrf/tfxd tags; make sure the sidx pts and duration match up with * the next fragment. This means the cts of the first sample must * be the same in all fragments, unless end_pts was updated by * the packet causing the fragment to be written. */ if ((mov->flags & FF_MOV_FLAG_DASH && !(mov->flags & FF_MOV_FLAG_GLOBAL_SIDX)) || mov->mode == MODE_ISM) pkt->pts = pkt->dts + trk->end_pts - trk->cluster[trk->entry].dts; } else { /* New fragment, but discontinuous from previous fragments. * Pretend the duration sum of the earlier fragments is * pkt->dts - trk->start_dts. */ trk->frag_start = pkt->dts - trk->start_dts; trk->end_pts = AV_NOPTS_VALUE; trk->frag_discont = 0; } } if (!trk->entry && trk->start_dts == AV_NOPTS_VALUE && !mov->use_editlist && s->avoid_negative_ts == AVFMT_AVOID_NEG_TS_MAKE_ZERO) { /* Not using edit lists and shifting the first track to start from zero. * If the other streams start from a later timestamp, we won't be able * to signal the difference in starting time without an edit list. * Thus move the timestamp for this first sample to 0, increasing * its duration instead. */ trk->cluster[trk->entry].dts = trk->start_dts = 0; } if (trk->start_dts == AV_NOPTS_VALUE) { trk->start_dts = pkt->dts; if (trk->frag_discont) { if (mov->use_editlist) { /* Pretend the whole stream started at pts=0, with earlier fragments * already written. If the stream started at pts=0, the duration sum * of earlier fragments would have been pkt->pts. */ trk->frag_start = pkt->pts; trk->start_dts = pkt->dts - pkt->pts; } else { /* Pretend the whole stream started at dts=0, with earlier fragments * already written, with a duration summing up to pkt->dts. */ trk->frag_start = pkt->dts; trk->start_dts = 0; } trk->frag_discont = 0; } else if (pkt->dts && mov->moov_written) av_log(s, AV_LOG_WARNING, "Track %d starts with a nonzero dts %"PRId64", while the moov " "already has been written. Set the delay_moov flag to handle " "this case.\n", pkt->stream_index, pkt->dts); } trk->track_duration = pkt->dts - trk->start_dts + pkt->duration; trk->last_sample_is_subtitle_end = 0; if (pkt->pts == AV_NOPTS_VALUE) { av_log(s, AV_LOG_WARNING, "pts has no value\n"); pkt->pts = pkt->dts; } if (pkt->dts != pkt->pts) trk->flags |= MOV_TRACK_CTTS; trk->cluster[trk->entry].cts = pkt->pts - pkt->dts; trk->cluster[trk->entry].flags = 0; if (trk->start_cts == AV_NOPTS_VALUE) trk->start_cts = pkt->pts - pkt->dts; if (trk->end_pts == AV_NOPTS_VALUE) trk->end_pts = trk->cluster[trk->entry].dts + trk->cluster[trk->entry].cts + pkt->duration; else trk->end_pts = FFMAX(trk->end_pts, trk->cluster[trk->entry].dts + trk->cluster[trk->entry].cts + pkt->duration); if (par->codec_id == AV_CODEC_ID_VC1) { mov_parse_vc1_frame(pkt, trk); } else if (pkt->flags & AV_PKT_FLAG_KEY) { if (mov->mode == MODE_MOV && par->codec_id == AV_CODEC_ID_MPEG2VIDEO && trk->entry > 0) { // force sync sample for the first key frame mov_parse_mpeg2_frame(pkt, &trk->cluster[trk->entry].flags); if (trk->cluster[trk->entry].flags & MOV_PARTIAL_SYNC_SAMPLE) trk->flags |= MOV_TRACK_STPS; } else { trk->cluster[trk->entry].flags = MOV_SYNC_SAMPLE; } if (trk->cluster[trk->entry].flags & MOV_SYNC_SAMPLE) trk->has_keyframes++; } if (pkt->flags & AV_PKT_FLAG_DISPOSABLE) { trk->cluster[trk->entry].flags |= MOV_DISPOSABLE_SAMPLE; trk->has_disposable++; } trk->entry++; trk->sample_count += samples_in_chunk; mov->mdat_size += size; if (trk->hint_track >= 0 && trk->hint_track < mov->nb_streams) ff_mov_add_hinted_packet(s, pkt, trk->hint_track, trk->entry, reformatted_data, size); end: err: av_free(reformatted_data); return ret; }
2,028
153,996
0
GLsync GLES2DecoderImpl::DoFenceSync(GLenum condition, GLbitfield flags) { const char* function_name = "glFenceSync"; if (condition != GL_SYNC_GPU_COMMANDS_COMPLETE) { LOCAL_SET_GL_ERROR(GL_INVALID_ENUM, function_name, "invalid condition"); return 0; } if (flags != 0) { LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, function_name, "invalid flags"); return 0; } return api()->glFenceSyncFn(condition, flags); }
2,029
80,929
0
static inline u16 evmcs_read16(unsigned long field) { return 0; }
2,030
93,610
0
nvmet_fc_free_ls_iodlist(struct nvmet_fc_tgtport *tgtport) { struct nvmet_fc_ls_iod *iod = tgtport->iod; int i; for (i = 0; i < NVMET_LS_CTX_COUNT; iod++, i++) { fc_dma_unmap_single(tgtport->dev, iod->rspdma, NVME_FC_MAX_LS_BUFFER_SIZE, DMA_TO_DEVICE); kfree(iod->rqstbuf); list_del(&iod->ls_list); } kfree(tgtport->iod); }
2,031
101,031
0
UsageTracker* QuotaManager::GetUsageTracker(StorageType type) const { switch (type) { case kStorageTypeTemporary: return temporary_usage_tracker_.get(); case kStorageTypePersistent: return persistent_usage_tracker_.get(); default: NOTREACHED(); } return NULL; }
2,032
119,587
0
static inline InlineBox* createInlineBoxForRenderer(RenderObject* obj, bool isRootLineBox, bool isOnlyRun = false) { if (isRootLineBox) return toRenderBlock(obj)->createAndAppendRootInlineBox(); if (obj->isText()) { InlineTextBox* textBox = toRenderText(obj)->createInlineTextBox(); if (obj->isBR()) textBox->setIsText(isOnlyRun || obj->document().inNoQuirksMode()); return textBox; } if (obj->isBox()) return toRenderBox(obj)->createInlineBox(); return toRenderInline(obj)->createAndAppendInlineFlowBox(); }
2,033
126,034
0
PasswordStoreLoginsChangedObserver::PasswordStoreLoginsChangedObserver( AutomationProvider* automation, IPC::Message* reply_message, PasswordStoreChange::Type expected_type, const std::string& result_key) : automation_(automation->AsWeakPtr()), reply_message_(reply_message), expected_type_(expected_type), result_key_(result_key), done_event_(false, false) { AddRef(); }
2,034
142,902
0
void HTMLMediaElement::SetNetworkState(NetworkState state) { if (network_state_ == state) return; network_state_ = state; if (GetMediaControls()) GetMediaControls()->NetworkStateChanged(); }
2,035
717
0
int dns_dn_label_to_str(const char *dn, int dn_len, char *str, int str_len) { char *ptr; int i, sz; if (str_len < dn_len - 1) return -1; ptr = str; for (i = 0; i < dn_len-1; ++i) { sz = dn[i]; if (i) *ptr++ = '.'; memcpy(ptr, dn+i+1, sz); ptr += sz; i += sz; } *ptr++ = '\0'; return (ptr - str); }
2,036
73,751
0
smb_ofile_close_and_next(smb_ofile_t *of) { smb_ofile_t *next_of; smb_tree_t *tree; ASSERT(of); ASSERT(of->f_magic == SMB_OFILE_MAGIC); mutex_enter(&of->f_mutex); switch (of->f_state) { case SMB_OFILE_STATE_OPEN: /* The file is still open. */ of->f_refcnt++; ASSERT(of->f_refcnt); tree = of->f_tree; mutex_exit(&of->f_mutex); smb_llist_exit(&of->f_tree->t_ofile_list); smb_ofile_close(of, 0); smb_ofile_release(of); smb_llist_enter(&tree->t_ofile_list, RW_READER); next_of = smb_llist_head(&tree->t_ofile_list); break; case SMB_OFILE_STATE_CLOSING: case SMB_OFILE_STATE_CLOSED: /* * The ofile exists but is closed or * in the process being closed. */ mutex_exit(&of->f_mutex); next_of = smb_llist_next(&of->f_tree->t_ofile_list, of); break; default: ASSERT(0); mutex_exit(&of->f_mutex); next_of = smb_llist_next(&of->f_tree->t_ofile_list, of); break; } return (next_of); }
2,037
87,960
0
static inline void next_request(blk_status_t err) { unsigned long saved_flags; spin_lock_irqsave(&pcd_lock, saved_flags); if (!blk_update_request(pcd_req, err, blk_rq_cur_bytes(pcd_req))) { __blk_mq_end_request(pcd_req, err); pcd_req = NULL; } pcd_busy = 0; pcd_request(); spin_unlock_irqrestore(&pcd_lock, saved_flags); }
2,038
111,562
0
static JSValueRef touchCancelCallback(JSContextRef context, JSObjectRef function, JSObjectRef thisObject, size_t argumentCount, const JSValueRef arguments[], JSValueRef* exception) { notImplemented(); return JSValueMakeUndefined(context); }
2,039
178,100
1
static int mptsas_process_scsi_io_request(MPTSASState *s, MPIMsgSCSIIORequest *scsi_io, hwaddr addr) { MPTSASRequest *req; MPIMsgSCSIIOReply reply; SCSIDevice *sdev; int status; mptsas_fix_scsi_io_endianness(scsi_io); trace_mptsas_process_scsi_io_request(s, scsi_io->Bus, scsi_io->TargetID, scsi_io->LUN[1], scsi_io->DataLength); status = mptsas_scsi_device_find(s, scsi_io->Bus, scsi_io->TargetID, scsi_io->LUN, &sdev); if (status) { goto bad; } req = g_new(MPTSASRequest, 1); QTAILQ_INSERT_TAIL(&s->pending, req, next); req->scsi_io = *scsi_io; req->dev = s; status = mptsas_build_sgl(s, req, addr); if (status) { goto free_bad; } if (req->qsg.size < scsi_io->DataLength) { trace_mptsas_sgl_overflow(s, scsi_io->MsgContext, scsi_io->DataLength, req->qsg.size); status = MPI_IOCSTATUS_INVALID_SGL; goto free_bad; } req->sreq = scsi_req_new(sdev, scsi_io->MsgContext, scsi_io->LUN[1], scsi_io->CDB, req); if (req->sreq->cmd.xfer > scsi_io->DataLength) { goto overrun; } switch (scsi_io->Control & MPI_SCSIIO_CONTROL_DATADIRECTION_MASK) { case MPI_SCSIIO_CONTROL_NODATATRANSFER: if (req->sreq->cmd.mode != SCSI_XFER_NONE) { goto overrun; } break; case MPI_SCSIIO_CONTROL_WRITE: if (req->sreq->cmd.mode != SCSI_XFER_TO_DEV) { goto overrun; } break; case MPI_SCSIIO_CONTROL_READ: if (req->sreq->cmd.mode != SCSI_XFER_FROM_DEV) { goto overrun; } break; } if (scsi_req_enqueue(req->sreq)) { scsi_req_continue(req->sreq); } return 0; overrun: trace_mptsas_scsi_overflow(s, scsi_io->MsgContext, req->sreq->cmd.xfer, scsi_io->DataLength); status = MPI_IOCSTATUS_SCSI_DATA_OVERRUN; free_bad: mptsas_free_request(req); bad: memset(&reply, 0, sizeof(reply)); reply.TargetID = scsi_io->TargetID; reply.Bus = scsi_io->Bus; reply.MsgLength = sizeof(reply) / 4; reply.Function = scsi_io->Function; reply.CDBLength = scsi_io->CDBLength; reply.SenseBufferLength = scsi_io->SenseBufferLength; reply.MsgContext = scsi_io->MsgContext; reply.SCSIState = MPI_SCSI_STATE_NO_SCSI_STATUS; reply.IOCStatus = status; mptsas_fix_scsi_io_reply_endianness(&reply); mptsas_reply(s, (MPIDefaultReply *)&reply); return 0; }
2,040
87,939
0
static void fm10k_receive_skb(struct fm10k_q_vector *q_vector, struct sk_buff *skb) { napi_gro_receive(&q_vector->napi, skb); }
2,041
143,397
0
void Job::StartCreateV8Resolver( const scoped_refptr<ProxyResolverScriptData>& script_data, scoped_ptr<ProxyResolverV8>* resolver, const CompletionCallback& callback) { CheckIsOnOriginThread(); resolver_out_ = resolver; script_data_ = script_data; Start(CREATE_V8_RESOLVER, true /*blocking*/, callback); }
2,042
144,364
0
int ExtensionInstallPrompt::Prompt::GetDialogButtons() const { if (type_ == POST_INSTALL_PERMISSIONS_PROMPT && ShouldDisplayRevokeButton()) { return kButtons[type_] | ui::DIALOG_BUTTON_OK; } return kButtons[type_]; }
2,043
105,181
0
AttributeChange(PassRefPtr<Element> element, const QualifiedName& name, const String& value) : m_element(element), m_name(name), m_value(value) { }
2,044
139,507
0
static bool ExecuteSelectToMark(LocalFrame& frame, Event*, EditorCommandSource, const String&) { const EphemeralRange mark = frame.GetEditor().Mark().ToNormalizedEphemeralRange(); EphemeralRange selection = frame.GetEditor().SelectedRange(); if (mark.IsNull() || selection.IsNull()) return false; frame.Selection().SetSelection( SelectionInDOMTree::Builder() .SetBaseAndExtent(UnionEphemeralRanges(mark, selection)) .Build(), FrameSelection::kCloseTyping); return true; }
2,045
108,584
0
GtkWidget* AddMenuEntry(GtkWidget* menu_widget, const char* text, GCallback callback, Shell* shell) { GtkWidget* entry = gtk_menu_item_new_with_label(text); g_signal_connect(entry, "activate", callback, shell); gtk_menu_shell_append(GTK_MENU_SHELL(menu_widget), entry); return entry; }
2,046
148,726
0
V4L2JpegEncodeAccelerator::JpegBufferRecord::JpegBufferRecord() : at_device(false) { memset(address, 0, sizeof(address)); memset(length, 0, sizeof(length)); }
2,047
82,478
0
bool jsvIsNative(const JsVar *v) { return v && (v->flags&JSV_NATIVE)!=0; }
2,048
144,829
0
TabStripModel* tsm() { return browser()->tab_strip_model(); }
2,049
137,084
0
double InputType::Minimum() const { return CreateStepRange(kRejectAny).Minimum().ToDouble(); }
2,050
6,978
0
tt_cmap4_validate( FT_Byte* table, FT_Validator valid ) { FT_Byte* p; FT_UInt length; FT_Byte *ends, *starts, *offsets, *deltas, *glyph_ids; FT_UInt num_segs; FT_Error error = FT_Err_Ok; if ( table + 2 + 2 > valid->limit ) FT_INVALID_TOO_SHORT; p = table + 2; /* skip format */ length = TT_NEXT_USHORT( p ); if ( length < 16 ) FT_INVALID_TOO_SHORT; /* in certain fonts, the `length' field is invalid and goes */ /* out of bound. We try to correct this here... */ if ( table + length > valid->limit ) { if ( valid->level >= FT_VALIDATE_TIGHT ) FT_INVALID_TOO_SHORT; length = (FT_UInt)( valid->limit - table ); } p = table + 6; num_segs = TT_NEXT_USHORT( p ); /* read segCountX2 */ if ( valid->level >= FT_VALIDATE_PARANOID ) { /* check that we have an even value here */ if ( num_segs & 1 ) FT_INVALID_DATA; } num_segs /= 2; if ( length < 16 + num_segs * 2 * 4 ) FT_INVALID_TOO_SHORT; /* check the search parameters - even though we never use them */ /* */ if ( valid->level >= FT_VALIDATE_PARANOID ) { /* check the values of `searchRange', `entrySelector', `rangeShift' */ FT_UInt search_range = TT_NEXT_USHORT( p ); FT_UInt entry_selector = TT_NEXT_USHORT( p ); FT_UInt range_shift = TT_NEXT_USHORT( p ); if ( ( search_range | range_shift ) & 1 ) /* must be even values */ FT_INVALID_DATA; search_range /= 2; range_shift /= 2; /* `search range' is the greatest power of 2 that is <= num_segs */ if ( search_range > num_segs || search_range * 2 < num_segs || search_range + range_shift != num_segs || search_range != ( 1U << entry_selector ) ) FT_INVALID_DATA; } ends = table + 14; starts = table + 16 + num_segs * 2; deltas = starts + num_segs * 2; offsets = deltas + num_segs * 2; glyph_ids = offsets + num_segs * 2; /* check last segment; its end count value must be 0xFFFF */ if ( valid->level >= FT_VALIDATE_PARANOID ) { p = ends + ( num_segs - 1 ) * 2; if ( TT_PEEK_USHORT( p ) != 0xFFFFU ) FT_INVALID_DATA; } { FT_UInt start, end, offset, n; FT_UInt last_start = 0, last_end = 0; FT_Int delta; FT_Byte* p_start = starts; FT_Byte* p_end = ends; FT_Byte* p_delta = deltas; FT_Byte* p_offset = offsets; for ( n = 0; n < num_segs; n++ ) { p = p_offset; start = TT_NEXT_USHORT( p_start ); end = TT_NEXT_USHORT( p_end ); delta = TT_NEXT_SHORT( p_delta ); offset = TT_NEXT_USHORT( p_offset ); if ( start > end ) FT_INVALID_DATA; /* this test should be performed at default validation level; */ /* unfortunately, some popular Asian fonts have overlapping */ /* ranges in their charmaps */ /* */ if ( start <= last_end && n > 0 ) { if ( valid->level >= FT_VALIDATE_TIGHT ) FT_INVALID_DATA; else { /* allow overlapping segments, provided their start points */ /* and end points, respectively, are in ascending order */ /* */ if ( last_start > start || last_end > end ) error |= TT_CMAP_FLAG_UNSORTED; else error |= TT_CMAP_FLAG_OVERLAPPING; } } if ( offset && offset != 0xFFFFU ) { p += offset; /* start of glyph ID array */ /* check that we point within the glyph IDs table only */ if ( valid->level >= FT_VALIDATE_TIGHT ) { if ( p < glyph_ids || p + ( end - start + 1 ) * 2 > table + length ) FT_INVALID_DATA; } /* Some fonts handle the last segment incorrectly. In */ /* theory, 0xFFFF might point to an ordinary glyph -- */ /* a cmap 4 is versatile and could be used for any */ /* encoding, not only Unicode. However, reality shows */ /* that far too many fonts are sloppy and incorrectly */ /* set all fields but `start' and `end' for the last */ /* segment if it contains only a single character. */ /* */ /* We thus omit the test here, delaying it to the */ /* routines which actually access the cmap. */ else if ( n != num_segs - 1 || !( start == 0xFFFFU && end == 0xFFFFU ) ) { if ( p < glyph_ids || p + ( end - start + 1 ) * 2 > valid->limit ) FT_INVALID_DATA; } /* check glyph indices within the segment range */ if ( valid->level >= FT_VALIDATE_TIGHT ) { FT_UInt i, idx; for ( i = start; i < end; i++ ) { idx = FT_NEXT_USHORT( p ); if ( idx != 0 ) { idx = (FT_UInt)( idx + delta ) & 0xFFFFU; if ( idx >= TT_VALID_GLYPH_COUNT( valid ) ) FT_INVALID_GLYPH_ID; } } } } else if ( offset == 0xFFFFU ) { /* some fonts (erroneously?) use a range offset of 0xFFFF */ /* to mean missing glyph in cmap table */ /* */ if ( valid->level >= FT_VALIDATE_PARANOID || n != num_segs - 1 || !( start == 0xFFFFU && end == 0xFFFFU ) ) FT_INVALID_DATA; } last_start = start; last_end = end; } } return error; }
2,051
93,147
0
print_trans2(netdissect_options *ndo, const u_char *words, const u_char *dat, const u_char *buf, const u_char *maxbuf) { u_int bcc; static const struct smbfnsint *fn = &trans2_fns[0]; const u_char *data, *param; const u_char *w = words + 1; const char *f1 = NULL, *f2 = NULL; int pcnt, dcnt; ND_TCHECK(words[0]); if (request) { ND_TCHECK2(w[14 * 2], 2); pcnt = EXTRACT_LE_16BITS(w + 9 * 2); param = buf + EXTRACT_LE_16BITS(w + 10 * 2); dcnt = EXTRACT_LE_16BITS(w + 11 * 2); data = buf + EXTRACT_LE_16BITS(w + 12 * 2); fn = smbfindint(EXTRACT_LE_16BITS(w + 14 * 2), trans2_fns); } else { if (words[0] == 0) { ND_PRINT((ndo, "%s\n", fn->name)); ND_PRINT((ndo, "Trans2Interim\n")); return; } ND_TCHECK2(w[7 * 2], 2); pcnt = EXTRACT_LE_16BITS(w + 3 * 2); param = buf + EXTRACT_LE_16BITS(w + 4 * 2); dcnt = EXTRACT_LE_16BITS(w + 6 * 2); data = buf + EXTRACT_LE_16BITS(w + 7 * 2); } ND_PRINT((ndo, "%s param_length=%d data_length=%d\n", fn->name, pcnt, dcnt)); if (request) { if (words[0] == 8) { smb_fdata(ndo, words + 1, "Trans2Secondary\nTotParam=[d]\nTotData=[d]\nParamCnt=[d]\nParamOff=[d]\nParamDisp=[d]\nDataCnt=[d]\nDataOff=[d]\nDataDisp=[d]\nHandle=[d]\n", maxbuf, unicodestr); return; } else { smb_fdata(ndo, words + 1, "TotParam=[d]\nTotData=[d]\nMaxParam=[d]\nMaxData=[d]\nMaxSetup=[b][P1]\nFlags=[w]\nTimeOut=[D]\nRes1=[w]\nParamCnt=[d]\nParamOff=[d]\nDataCnt=[d]\nDataOff=[d]\nSetupCnt=[b][P1]\n", words + 1 + 14 * 2, unicodestr); } f1 = fn->descript.req_f1; f2 = fn->descript.req_f2; } else { smb_fdata(ndo, words + 1, "TotParam=[d]\nTotData=[d]\nRes1=[w]\nParamCnt=[d]\nParamOff=[d]\nParamDisp[d]\nDataCnt=[d]\nDataOff=[d]\nDataDisp=[d]\nSetupCnt=[b][P1]\n", words + 1 + 10 * 2, unicodestr); f1 = fn->descript.rep_f1; f2 = fn->descript.rep_f2; } ND_TCHECK2(*dat, 2); bcc = EXTRACT_LE_16BITS(dat); ND_PRINT((ndo, "smb_bcc=%u\n", bcc)); if (fn->descript.fn) (*fn->descript.fn)(ndo, param, data, pcnt, dcnt); else { smb_fdata(ndo, param, f1 ? f1 : "Parameters=\n", param + pcnt, unicodestr); smb_fdata(ndo, data, f2 ? f2 : "Data=\n", data + dcnt, unicodestr); } return; trunc: ND_PRINT((ndo, "%s", tstr)); }
2,052
57,466
0
static int ext4_valid_extent_idx(struct inode *inode, struct ext4_extent_idx *ext_idx) { ext4_fsblk_t block = idx_pblock(ext_idx); return ext4_data_block_valid(EXT4_SB(inode->i_sb), block, 1); }
2,053
168,955
0
void TerminateServiceWorkerOnIO( base::WeakPtr<ServiceWorkerContextCore> context_weak, int64_t version_id) { if (ServiceWorkerContextCore* context = context_weak.get()) { if (ServiceWorkerVersion* version = context->GetLiveVersion(version_id)) version->StopWorker(base::DoNothing()); } }
2,054
177,239
0
void ACodec::initiateConfigureComponent(const sp<AMessage> &msg) { msg->setWhat(kWhatConfigureComponent); msg->setTarget(this); msg->post(); }
2,055
21,416
0
sector_t swapdev_block(int type, pgoff_t offset) { struct block_device *bdev; if ((unsigned int)type >= nr_swapfiles) return 0; if (!(swap_info[type]->flags & SWP_WRITEOK)) return 0; return map_swap_entry(swp_entry(type, offset), &bdev); }
2,056
164,107
0
bool AppCacheDatabase::FindLastStorageIds( int64_t* last_group_id, int64_t* last_cache_id, int64_t* last_response_id, int64_t* last_deletable_response_rowid) { DCHECK(last_group_id && last_cache_id && last_response_id && last_deletable_response_rowid); *last_group_id = 0; *last_cache_id = 0; *last_response_id = 0; *last_deletable_response_rowid = 0; if (!LazyOpen(kDontCreate)) return false; static const char kMaxGroupIdSql[] = "SELECT MAX(group_id) FROM Groups"; static const char kMaxCacheIdSql[] = "SELECT MAX(cache_id) FROM Caches"; static const char kMaxResponseIdFromEntriesSql[] = "SELECT MAX(response_id) FROM Entries"; static const char kMaxResponseIdFromDeletablesSql[] = "SELECT MAX(response_id) FROM DeletableResponseIds"; static const char kMaxDeletableResponseRowIdSql[] = "SELECT MAX(rowid) FROM DeletableResponseIds"; int64_t max_group_id; int64_t max_cache_id; int64_t max_response_id_from_entries; int64_t max_response_id_from_deletables; int64_t max_deletable_response_rowid; if (!RunUniqueStatementWithInt64Result(kMaxGroupIdSql, &max_group_id) || !RunUniqueStatementWithInt64Result(kMaxCacheIdSql, &max_cache_id) || !RunUniqueStatementWithInt64Result(kMaxResponseIdFromEntriesSql, &max_response_id_from_entries) || !RunUniqueStatementWithInt64Result(kMaxResponseIdFromDeletablesSql, &max_response_id_from_deletables) || !RunUniqueStatementWithInt64Result(kMaxDeletableResponseRowIdSql, &max_deletable_response_rowid)) { return false; } *last_group_id = max_group_id; *last_cache_id = max_cache_id; *last_response_id = std::max(max_response_id_from_entries, max_response_id_from_deletables); *last_deletable_response_rowid = max_deletable_response_rowid; return true; }
2,057
169,555
0
v8::Local<v8::Object> ToEnumObject(v8::Isolate* isolate, EnumType start_after, EnumType end_at) { v8::Local<v8::Object> object = v8::Object::New(isolate); for (int i = start_after + 1; i <= end_at; ++i) { v8::Local<v8::String> value = v8::String::NewFromUtf8( isolate, ui::ToString(static_cast<EnumType>(i)).c_str()); object->Set(value, value); } return object; }
2,058
105,020
0
SyncBackendHostTest() : ui_thread_(BrowserThread::UI, &ui_loop_), io_thread_(BrowserThread::IO) {}
2,059
27,308
0
static bool ldm_create_data_partitions (struct parsed_partitions *pp, const struct ldmdb *ldb) { struct list_head *item; struct vblk *vb; struct vblk *disk; struct vblk_part *part; int part_num = 1; BUG_ON (!pp || !ldb); disk = ldm_get_disk_objid (ldb); if (!disk) { ldm_crit ("Can't find the ID of this disk in the database."); return false; } strlcat(pp->pp_buf, " [LDM]", PAGE_SIZE); /* Create the data partitions */ list_for_each (item, &ldb->v_part) { vb = list_entry (item, struct vblk, list); part = &vb->vblk.part; if (part->disk_id != disk->obj_id) continue; put_partition (pp, part_num, ldb->ph.logical_disk_start + part->start, part->size); part_num++; } strlcat(pp->pp_buf, "\n", PAGE_SIZE); return true; }
2,060
126,962
0
bool AudioInputRendererHost::OnMessageReceived(const IPC::Message& message, bool* message_was_ok) { bool handled = true; IPC_BEGIN_MESSAGE_MAP_EX(AudioInputRendererHost, message, *message_was_ok) IPC_MESSAGE_HANDLER(AudioInputHostMsg_StartDevice, OnStartDevice) IPC_MESSAGE_HANDLER(AudioInputHostMsg_CreateStream, OnCreateStream) IPC_MESSAGE_HANDLER(AudioInputHostMsg_AssociateStreamWithConsumer, OnAssociateStreamWithConsumer) IPC_MESSAGE_HANDLER(AudioInputHostMsg_RecordStream, OnRecordStream) IPC_MESSAGE_HANDLER(AudioInputHostMsg_CloseStream, OnCloseStream) IPC_MESSAGE_HANDLER(AudioInputHostMsg_SetVolume, OnSetVolume) IPC_MESSAGE_UNHANDLED(handled = false) IPC_END_MESSAGE_MAP_EX() return handled; }
2,061
46,627
0
static void ccm_calculate_auth_mac(struct aead_request *req, u8 mac[]) { struct crypto_aead *aead = crypto_aead_reqtfm(req); struct crypto_aes_ctx *ctx = crypto_aead_ctx(aead); struct __packed { __be16 l; __be32 h; u16 len; } ltag; struct scatter_walk walk; u32 len = req->assoclen; u32 macp = 0; /* prepend the AAD with a length tag */ if (len < 0xff00) { ltag.l = cpu_to_be16(len); ltag.len = 2; } else { ltag.l = cpu_to_be16(0xfffe); put_unaligned_be32(len, &ltag.h); ltag.len = 6; } ce_aes_ccm_auth_data(mac, (u8 *)&ltag, ltag.len, &macp, ctx->key_enc, num_rounds(ctx)); scatterwalk_start(&walk, req->assoc); do { u32 n = scatterwalk_clamp(&walk, len); u8 *p; if (!n) { scatterwalk_start(&walk, sg_next(walk.sg)); n = scatterwalk_clamp(&walk, len); } p = scatterwalk_map(&walk); ce_aes_ccm_auth_data(mac, p, n, &macp, ctx->key_enc, num_rounds(ctx)); len -= n; scatterwalk_unmap(p); scatterwalk_advance(&walk, n); scatterwalk_done(&walk, 0, len); } while (len); }
2,062
170,797
0
status_t SampleTable::setSyncSampleParams(off64_t data_offset, size_t data_size) { if (mSyncSampleOffset >= 0 || data_size < 8) { return ERROR_MALFORMED; } mSyncSampleOffset = data_offset; uint8_t header[8]; if (mDataSource->readAt( data_offset, header, sizeof(header)) < (ssize_t)sizeof(header)) { return ERROR_IO; } if (U32_AT(header) != 0) { return ERROR_MALFORMED; } mNumSyncSamples = U32_AT(&header[4]); if (mNumSyncSamples < 2) { ALOGV("Table of sync samples is empty or has only a single entry!"); } uint64_t allocSize = mNumSyncSamples * (uint64_t)sizeof(uint32_t); if (allocSize > SIZE_MAX) { return ERROR_OUT_OF_RANGE; } mSyncSamples = new uint32_t[mNumSyncSamples]; size_t size = mNumSyncSamples * sizeof(uint32_t); if (mDataSource->readAt(mSyncSampleOffset + 8, mSyncSamples, size) != (ssize_t)size) { return ERROR_IO; } for (size_t i = 0; i < mNumSyncSamples; ++i) { mSyncSamples[i] = ntohl(mSyncSamples[i]) - 1; } return OK; }
2,063
121,763
0
void UDPSocketLibevent::WriteWatcher::OnFileCanWriteWithoutBlocking(int) { if (!socket_->write_callback_.is_null()) socket_->DidCompleteWrite(); }
2,064
112,097
0
void SyncManager::SyncInternal::OnSyncEngineEvent( const SyncEngineEvent& event) { DCHECK(thread_checker_.CalledOnValidThread()); if (event.what_happened == SyncEngineEvent::SYNC_CYCLE_ENDED) { ModelSafeRoutingInfo enabled_types; registrar_->GetModelSafeRoutingInfo(&enabled_types); { ReadTransaction trans(FROM_HERE, GetUserShare()); Cryptographer* cryptographer = trans.GetCryptographer(); if (cryptographer->has_pending_keys()) { DVLOG(1) << "OnPassPhraseRequired Sent"; sync_pb::EncryptedData pending_keys = cryptographer->GetPendingKeys(); FOR_EACH_OBSERVER(SyncManager::Observer, observers_, OnPassphraseRequired(sync_api::REASON_DECRYPTION, pending_keys)); } else if (!cryptographer->is_ready() && event.snapshot.initial_sync_ended().Has(syncable::NIGORI)) { DVLOG(1) << "OnPassphraseRequired sent because cryptographer is not " << "ready"; FOR_EACH_OBSERVER(SyncManager::Observer, observers_, OnPassphraseRequired(sync_api::REASON_ENCRYPTION, sync_pb::EncryptedData())); } NotifyCryptographerState(cryptographer); allstatus_.SetEncryptedTypes(cryptographer->GetEncryptedTypes()); } if (!initialized_) { LOG(INFO) << "OnSyncCycleCompleted not sent because sync api is not " << "initialized"; return; } if (!event.snapshot.has_more_to_sync()) { WriteTransaction trans(FROM_HERE, GetUserShare()); WriteNode nigori_node(&trans); if (nigori_node.InitByTagLookup(kNigoriTag) == sync_api::BaseNode::INIT_OK) { Cryptographer* cryptographer = trans.GetCryptographer(); UpdateNigoriEncryptionState(cryptographer, &nigori_node); } DVLOG(1) << "Sending OnSyncCycleCompleted"; FOR_EACH_OBSERVER(SyncManager::Observer, observers_, OnSyncCycleCompleted(event.snapshot)); } bool is_notifiable_commit = (event.snapshot.syncer_status().num_successful_commits > 0); if (is_notifiable_commit) { if (sync_notifier_.get()) { const ModelTypeSet changed_types = syncable::ModelTypePayloadMapToEnumSet( event.snapshot.source().types); sync_notifier_->SendNotification(changed_types); } else { DVLOG(1) << "Not sending notification: sync_notifier_ is NULL"; } } } if (event.what_happened == SyncEngineEvent::STOP_SYNCING_PERMANENTLY) { FOR_EACH_OBSERVER(SyncManager::Observer, observers_, OnStopSyncingPermanently()); return; } if (event.what_happened == SyncEngineEvent::CLEAR_SERVER_DATA_SUCCEEDED) { FOR_EACH_OBSERVER(SyncManager::Observer, observers_, OnClearServerDataSucceeded()); return; } if (event.what_happened == SyncEngineEvent::CLEAR_SERVER_DATA_FAILED) { FOR_EACH_OBSERVER(SyncManager::Observer, observers_, OnClearServerDataFailed()); return; } if (event.what_happened == SyncEngineEvent::UPDATED_TOKEN) { FOR_EACH_OBSERVER(SyncManager::Observer, observers_, OnUpdatedToken(event.updated_token)); return; } if (event.what_happened == SyncEngineEvent::ACTIONABLE_ERROR) { FOR_EACH_OBSERVER(SyncManager::Observer, observers_, OnActionableError( event.snapshot.errors().sync_protocol_error)); return; } }
2,065
60,889
0
enumerate_children_callback (GObject *source_object, GAsyncResult *res, gpointer user_data) { DirectoryLoadState *state; GFileEnumerator *enumerator; GError *error; state = user_data; if (state->directory == NULL) { /* Operation was cancelled. Bail out */ directory_load_state_free (state); return; } error = NULL; enumerator = g_file_enumerate_children_finish (G_FILE (source_object), res, &error); if (enumerator == NULL) { directory_load_done (state->directory, error); g_error_free (error); directory_load_state_free (state); return; } else { state->enumerator = enumerator; g_file_enumerator_next_files_async (state->enumerator, DIRECTORY_LOAD_ITEMS_PER_CALLBACK, G_PRIORITY_DEFAULT, state->cancellable, more_files_callback, state); } }
2,066
152,778
0
SparseHistogram::SparseHistogram(const std::string& name) : HistogramBase(name), samples_(new SampleMap(HashMetricName(name))), logged_samples_(new SampleMap(samples_->id())) {}
2,067
60,868
0
count_more_files_callback (GObject *source_object, GAsyncResult *res, gpointer user_data) { DirectoryCountState *state; NautilusDirectory *directory; GError *error; GList *files; state = user_data; directory = state->directory; if (g_cancellable_is_cancelled (state->cancellable)) { /* Operation was cancelled. Bail out */ async_job_end (directory, "directory count"); nautilus_directory_async_state_changed (directory); directory_count_state_free (state); return; } g_assert (directory->details->count_in_progress != NULL); g_assert (directory->details->count_in_progress == state); error = NULL; files = g_file_enumerator_next_files_finish (state->enumerator, res, &error); state->file_count += count_non_skipped_files (files); if (files == NULL) { count_children_done (directory, state->count_file, TRUE, state->file_count); directory_count_state_free (state); } else { g_file_enumerator_next_files_async (state->enumerator, DIRECTORY_LOAD_ITEMS_PER_CALLBACK, G_PRIORITY_DEFAULT, state->cancellable, count_more_files_callback, state); } g_list_free_full (files, g_object_unref); if (error) { g_error_free (error); } }
2,068
66,702
0
static int dvb_usbv2_resume_common(struct dvb_usb_device *d) { int ret = 0, i, active_fe; struct dvb_frontend *fe; dev_dbg(&d->udev->dev, "%s:\n", __func__); for (i = 0; i < MAX_NO_OF_ADAPTER_PER_DEVICE; i++) { active_fe = d->adapter[i].active_fe; if (d->adapter[i].dvb_adap.priv && active_fe != -1) { fe = d->adapter[i].fe[active_fe]; ret = dvb_frontend_resume(fe); /* resume usb streaming */ usb_urb_submitv2(&d->adapter[i].stream, NULL); if (d->props->streaming_ctrl) d->props->streaming_ctrl(fe, 1); d->adapter[i].suspend_resume_active = false; } } /* start remote controller poll */ if (d->rc_polling_active) schedule_delayed_work(&d->rc_query_work, msecs_to_jiffies(d->rc.interval)); return ret; }
2,069
112,303
0
explicit FindViewByID(int route_id) : route_id_(route_id), view_(NULL) { }
2,070
118,582
0
void AppLauncherHandler::CreateAppInfo( const Extension* extension, ExtensionService* service, base::DictionaryValue* value) { value->Clear(); base::string16 short_name = base::UTF8ToUTF16(extension->short_name()); base::i18n::UnadjustStringForLocaleDirection(&short_name); NewTabUI::SetUrlTitleAndDirection( value, short_name, extensions::AppLaunchInfo::GetFullLaunchURL(extension)); base::string16 name = base::UTF8ToUTF16(extension->name()); base::i18n::UnadjustStringForLocaleDirection(&name); NewTabUI::SetFullNameAndDirection(name, value); bool enabled = service->IsExtensionEnabled(extension->id()) && !extensions::ExtensionRegistry::Get(service->GetBrowserContext()) ->GetExtensionById(extension->id(), extensions::ExtensionRegistry::TERMINATED); extensions::GetExtensionBasicInfo(extension, enabled, value); value->SetBoolean("mayDisable", extensions::ExtensionSystem::Get( service->profile())->management_policy()->UserMayModifySettings( extension, NULL)); bool icon_big_exists = true; GURL icon_big = extensions::ExtensionIconSource::GetIconURL( extension, extension_misc::EXTENSION_ICON_LARGE, ExtensionIconSet::MATCH_BIGGER, false, &icon_big_exists); value->SetString("icon_big", icon_big.spec()); value->SetBoolean("icon_big_exists", icon_big_exists); bool icon_small_exists = true; GURL icon_small = extensions::ExtensionIconSource::GetIconURL( extension, extension_misc::EXTENSION_ICON_BITTY, ExtensionIconSet::MATCH_BIGGER, false, &icon_small_exists); value->SetString("icon_small", icon_small.spec()); value->SetBoolean("icon_small_exists", icon_small_exists); value->SetInteger("launch_container", extensions::AppLaunchInfo::GetLaunchContainer(extension)); ExtensionPrefs* prefs = ExtensionPrefs::Get(service->profile()); value->SetInteger("launch_type", extensions::GetLaunchType(prefs, extension)); value->SetBoolean("is_component", extension->location() == extensions::Manifest::COMPONENT); value->SetBoolean("is_webstore", extension->id() == extension_misc::kWebStoreAppId); AppSorting* sorting = prefs->app_sorting(); syncer::StringOrdinal page_ordinal = sorting->GetPageOrdinal(extension->id()); if (!page_ordinal.IsValid()) { page_ordinal = extension->id() == extension_misc::kWebStoreAppId ? sorting->CreateFirstAppPageOrdinal() : sorting->GetNaturalAppPageOrdinal(); sorting->SetPageOrdinal(extension->id(), page_ordinal); } value->SetInteger("page_index", sorting->PageStringOrdinalAsInteger(page_ordinal)); syncer::StringOrdinal app_launch_ordinal = sorting->GetAppLaunchOrdinal(extension->id()); if (!app_launch_ordinal.IsValid()) { app_launch_ordinal = extension->id() == extension_misc::kWebStoreAppId ? sorting->CreateFirstAppLaunchOrdinal(page_ordinal) : sorting->CreateNextAppLaunchOrdinal(page_ordinal); sorting->SetAppLaunchOrdinal(extension->id(), app_launch_ordinal); } value->SetString("app_launch_ordinal", app_launch_ordinal.ToInternalValue()); }
2,071
129,021
0
StateBase* writeDenseArray(uint32_t numProperties, uint32_t length, StateBase* state) { m_writer.writeDenseArray(numProperties, length); return pop(state); }
2,072
149,262
0
bool HTMLFormControlElement::willValidate() const { return ListedElement::WillValidate(); }
2,073
153,956
0
bool GLES2DecoderImpl::DoBindOrCopyTexImageIfNeeded(Texture* texture, GLenum textarget, GLuint texture_unit) { if (texture && !texture->IsAttachedToFramebuffer()) { Texture::ImageState image_state; gl::GLImage* image = texture->GetLevelImage(textarget, 0, &image_state); if (image && image_state == Texture::UNBOUND) { ScopedGLErrorSuppressor suppressor( "GLES2DecoderImpl::DoBindOrCopyTexImageIfNeeded", error_state_.get()); if (texture_unit) api()->glActiveTextureFn(texture_unit); api()->glBindTextureFn(textarget, texture->service_id()); if (image->ShouldBindOrCopy() == gl::GLImage::BIND) { bool rv = image->BindTexImage(textarget); DCHECK(rv) << "BindTexImage() failed"; image_state = Texture::BOUND; } else { DoCopyTexImage(texture, textarget, image); } if (!texture_unit) { RestoreCurrentTextureBindings(&state_, textarget, state_.active_texture_unit); return false; } return true; } } return false; }
2,074
40,544
0
static int __net_init netlink_net_init(struct net *net) { #ifdef CONFIG_PROC_FS if (!proc_create("netlink", 0, net->proc_net, &netlink_seq_fops)) return -ENOMEM; #endif return 0; }
2,075
6,867
0
void inet_set_tos(int fd, const struct sockaddr_storage *from, int tos) { #ifdef IP_TOS if (from->ss_family == AF_INET) setsockopt(fd, IPPROTO_IP, IP_TOS, &tos, sizeof(tos)); #endif #ifdef IPV6_TCLASS if (from->ss_family == AF_INET6) { if (IN6_IS_ADDR_V4MAPPED(&((struct sockaddr_in6 *)from)->sin6_addr)) /* v4-mapped addresses need IP_TOS */ setsockopt(fd, IPPROTO_IP, IP_TOS, &tos, sizeof(tos)); else setsockopt(fd, IPPROTO_IPV6, IPV6_TCLASS, &tos, sizeof(tos)); } #endif }
2,076
12,084
0
int EC_POINT_mul(const EC_GROUP *group, EC_POINT *r, const BIGNUM *g_scalar, const EC_POINT *point, const BIGNUM *p_scalar, BN_CTX *ctx) { /* just a convenient interface to EC_POINTs_mul() */ const EC_POINT *points[1]; const BIGNUM *scalars[1]; points[0] = point; scalars[0] = p_scalar; return EC_POINTs_mul(group, r, g_scalar, (point != NULL && p_scalar != NULL), points, scalars, ctx); }
2,077
48,705
0
int h2_stream_input_is_open(const h2_stream *stream) { return input_open(stream); }
2,078
83,412
0
RemoveListItem(list_item_t **pfirst, match_fn_t match, LPVOID ctx) { LPVOID data = NULL; list_item_t **pnext; for (pnext = pfirst; *pnext; pnext = &(*pnext)->next) { list_item_t *item = *pnext; if (!match(item->data, ctx)) { continue; } /* Found item, remove from the list and free memory */ *pnext = item->next; data = item->data; free(item); break; } return data; }
2,079
33,781
0
long vhost_dev_check_owner(struct vhost_dev *dev) { /* Are you the owner? If not, I don't think you mean to do that */ return dev->mm == current->mm ? 0 : -EPERM; }
2,080
92,713
0
void task_numa_free(struct task_struct *p) { struct numa_group *grp = p->numa_group; void *numa_faults = p->numa_faults; unsigned long flags; int i; if (grp) { spin_lock_irqsave(&grp->lock, flags); for (i = 0; i < NR_NUMA_HINT_FAULT_STATS * nr_node_ids; i++) grp->faults[i] -= p->numa_faults[i]; grp->total_faults -= p->total_numa_faults; grp->nr_tasks--; spin_unlock_irqrestore(&grp->lock, flags); RCU_INIT_POINTER(p->numa_group, NULL); put_numa_group(grp); } p->numa_faults = NULL; kfree(numa_faults); }
2,081
24,684
0
int posix_cpu_timer_del(struct k_itimer *timer) { struct task_struct *p = timer->it.cpu.task; int ret = 0; if (likely(p != NULL)) { read_lock(&tasklist_lock); if (unlikely(p->signal == NULL)) { /* * We raced with the reaping of the task. * The deletion should have cleared us off the list. */ BUG_ON(!list_empty(&timer->it.cpu.entry)); } else { spin_lock(&p->sighand->siglock); if (timer->it.cpu.firing) ret = TIMER_RETRY; else list_del(&timer->it.cpu.entry); spin_unlock(&p->sighand->siglock); } read_unlock(&tasklist_lock); if (!ret) put_task_struct(p); } return ret; }
2,082
93,663
0
static void cqspi_configure_cs_and_sizes(struct spi_nor *nor) { struct cqspi_flash_pdata *f_pdata = nor->priv; struct cqspi_st *cqspi = f_pdata->cqspi; void __iomem *iobase = cqspi->iobase; unsigned int reg; /* configure page size and block size. */ reg = readl(iobase + CQSPI_REG_SIZE); reg &= ~(CQSPI_REG_SIZE_PAGE_MASK << CQSPI_REG_SIZE_PAGE_LSB); reg &= ~(CQSPI_REG_SIZE_BLOCK_MASK << CQSPI_REG_SIZE_BLOCK_LSB); reg &= ~CQSPI_REG_SIZE_ADDRESS_MASK; reg |= (nor->page_size << CQSPI_REG_SIZE_PAGE_LSB); reg |= (ilog2(nor->mtd.erasesize) << CQSPI_REG_SIZE_BLOCK_LSB); reg |= (nor->addr_width - 1); writel(reg, iobase + CQSPI_REG_SIZE); /* configure the chip select */ cqspi_chipselect(nor); /* Store the new configuration of the controller */ cqspi->current_page_size = nor->page_size; cqspi->current_erase_size = nor->mtd.erasesize; cqspi->current_addr_width = nor->addr_width; }
2,083
16,673
0
ReadUserLogFileState::~ReadUserLogFileState( void ) { }
2,084
22,970
0
static void nfs_free_unique_id(struct rb_root *root, struct nfs_unique_id *id) { rb_erase(&id->rb_node, root); }
2,085
130,495
0
GetErrorQuery::GetErrorQuery( QueryManager* manager, GLenum target, int32 shm_id, uint32 shm_offset) : Query(manager, target, shm_id, shm_offset) { }
2,086
60,665
0
static int sctp_getsockopt_events(struct sock *sk, int len, char __user *optval, int __user *optlen) { if (len == 0) return -EINVAL; if (len > sizeof(struct sctp_event_subscribe)) len = sizeof(struct sctp_event_subscribe); if (put_user(len, optlen)) return -EFAULT; if (copy_to_user(optval, &sctp_sk(sk)->subscribe, len)) return -EFAULT; return 0; }
2,087
133,720
0
static bool isNonCanonicalCharacter(UChar c) { return (c == '\\' || c == '0' || c == '\0' || c >= 127); }
2,088
179,542
1
static __u8 *ch_report_fixup(struct hid_device *hdev, __u8 *rdesc, unsigned int *rsize) { if (*rsize >= 17 && rdesc[11] == 0x3c && rdesc[12] == 0x02) { hid_info(hdev, "fixing up Cherry Cymotion report descriptor\n"); rdesc[11] = rdesc[16] = 0xff; rdesc[12] = rdesc[17] = 0x03; } return rdesc; }
2,089
44,149
0
int pipe_proc_fn(struct ctl_table *table, int write, void __user *buf, size_t *lenp, loff_t *ppos) { int ret; ret = proc_dointvec_minmax(table, write, buf, lenp, ppos); if (ret < 0 || !write) return ret; pipe_max_size = round_pipe_size(pipe_max_size); return ret; }
2,090
144,824
0
void SimulateFreezeSignal(content::WebContents* contents) { static_cast<TabLifecycleUnitSource::TabLifecycleUnit*>( TabLifecycleUnitExternal::FromWebContents(contents)) ->UpdateLifecycleState(mojom::LifecycleState::kFrozen); }
2,091
115,142
0
bool GraphicsContext3D::ImageExtractor::extractImage(bool premultiplyAlpha, bool ignoreGammaAndColorProfile) { UNUSED_PARAM(ignoreGammaAndColorProfile); if (!m_image) return false; if (m_image->data()) m_qtImage = QImage::fromData(reinterpret_cast<const uchar*>(m_image->data()->data()), m_image->data()->size()); else { QPixmap* nativePixmap = m_image->nativeImageForCurrentFrame(); if (!nativePixmap) return false; m_qtImage = *nativePixmap->handle()->buffer(); } m_alphaOp = AlphaDoNothing; switch (m_qtImage.format()) { case QImage::Format_RGB32: break; case QImage::Format_ARGB32: if (premultiplyAlpha) m_alphaOp = AlphaDoPremultiply; break; case QImage::Format_ARGB32_Premultiplied: if (!premultiplyAlpha) m_alphaOp = AlphaDoUnmultiply; break; default: m_qtImage = m_qtImage.convertToFormat(premultiplyAlpha ? QImage::Format_ARGB32_Premultiplied : QImage::Format_ARGB32); break; } m_imageWidth = m_image->width(); m_imageHeight = m_image->height(); if (!m_imageWidth || !m_imageHeight) return false; m_imagePixelData = m_qtImage.constBits(); m_imageSourceFormat = SourceFormatBGRA8; m_imageSourceUnpackAlignment = 0; return true; }
2,092
59,069
0
static int tcmu_glfs_flush(struct tcmu_device *dev, struct tcmulib_cmd *cmd) { struct glfs_state *state = tcmu_get_dev_private(dev); glfs_cbk_cookie *cookie; cookie = calloc(1, sizeof(*cookie)); if (!cookie) { tcmu_err("Could not allocate cookie: %m\n"); goto out; } cookie->dev = dev; cookie->cmd = cmd; cookie->length = 0; cookie->op = TCMU_GLFS_FLUSH; if (glfs_fdatasync_async(state->gfd, glfs_async_cbk, cookie) < 0) { tcmu_err("glfs_fdatasync_async failed: %m\n"); goto out; } return 0; out: free(cookie); return SAM_STAT_TASK_SET_FULL; }
2,093
82,205
0
mrb_obj_protected_methods(mrb_state *mrb, mrb_value self) { mrb_bool recur = TRUE; mrb_get_args(mrb, "|b", &recur); return mrb_obj_methods(mrb, recur, self, NOEX_PROTECTED); /* protected attribute not define */ }
2,094
85,153
0
static bool __has_merged_page(struct f2fs_bio_info *io, struct inode *inode, struct page *page, nid_t ino) { struct bio_vec *bvec; struct page *target; int i; if (!io->bio) return false; if (!inode && !page && !ino) return true; bio_for_each_segment_all(bvec, io->bio, i) { if (bvec->bv_page->mapping) target = bvec->bv_page; else target = fscrypt_control_page(bvec->bv_page); if (inode && inode == target->mapping->host) return true; if (page && page == target) return true; if (ino && ino == ino_of_node(target)) return true; } return false; }
2,095
109,731
0
void Document::maybeHandleHttpRefresh(const String& content, HttpRefreshType httpRefreshType) { if (m_isViewSource || !m_frame) return; double delay; String refreshURL; if (!parseHTTPRefresh(content, httpRefreshType == HttpRefreshFromMetaTag, delay, refreshURL)) return; if (refreshURL.isEmpty()) refreshURL = url().string(); else refreshURL = completeURL(refreshURL).string(); if (protocolIsJavaScript(refreshURL)) { String message = "Refused to refresh " + m_url.elidedString() + " to a javascript: URL"; addConsoleMessage(SecurityMessageSource, ErrorMessageLevel, message); return; } if (httpRefreshType == HttpRefreshFromMetaTag && isSandboxed(SandboxAutomaticFeatures)) { String message = "Refused to execute the redirect specified via '<meta http-equiv='refresh' content='...'>'. The document is sandboxed, and the 'allow-scripts' keyword is not set."; addConsoleMessage(SecurityMessageSource, ErrorMessageLevel, message); return; } m_frame->navigationScheduler()->scheduleRedirect(delay, refreshURL); }
2,096
182,416
1
int git_delta_apply( void **out, size_t *out_len, const unsigned char *base, size_t base_len, const unsigned char *delta, size_t delta_len) { const unsigned char *delta_end = delta + delta_len; size_t base_sz, res_sz, alloc_sz; unsigned char *res_dp; *out = NULL; *out_len = 0; /* * Check that the base size matches the data we were given; * if not we would underflow while accessing data from the * base object, resulting in data corruption or segfault. */ if ((hdr_sz(&base_sz, &delta, delta_end) < 0) || (base_sz != base_len)) { giterr_set(GITERR_INVALID, "failed to apply delta: base size does not match given data"); return -1; } if (hdr_sz(&res_sz, &delta, delta_end) < 0) { giterr_set(GITERR_INVALID, "failed to apply delta: base size does not match given data"); return -1; } GITERR_CHECK_ALLOC_ADD(&alloc_sz, res_sz, 1); res_dp = git__malloc(alloc_sz); GITERR_CHECK_ALLOC(res_dp); res_dp[res_sz] = '\0'; *out = res_dp; *out_len = res_sz; while (delta < delta_end) { unsigned char cmd = *delta++; if (cmd & 0x80) { /* cmd is a copy instruction; copy from the base. */ size_t off = 0, len = 0; if (cmd & 0x01) off = *delta++; if (cmd & 0x02) off |= *delta++ << 8UL; if (cmd & 0x04) off |= *delta++ << 16UL; if (cmd & 0x08) off |= ((unsigned) *delta++ << 24UL); if (cmd & 0x10) len = *delta++; if (cmd & 0x20) len |= *delta++ << 8UL; if (cmd & 0x40) len |= *delta++ << 16UL; if (!len) len = 0x10000; if (base_len < off + len || res_sz < len) goto fail; memcpy(res_dp, base + off, len); res_dp += len; res_sz -= len; } else if (cmd) { /* * cmd is a literal insert instruction; copy from * the delta stream itself. */ if (delta_end - delta < cmd || res_sz < cmd) goto fail; memcpy(res_dp, delta, cmd); delta += cmd; res_dp += cmd; res_sz -= cmd; } else { /* cmd == 0 is reserved for future encodings. */ goto fail; } } if (delta != delta_end || res_sz) goto fail; return 0; fail: git__free(*out); *out = NULL; *out_len = 0; giterr_set(GITERR_INVALID, "failed to apply delta"); return -1; }
2,097
163,844
0
const Extension* ExtensionBrowserTest::LoadExtensionIncognito( const base::FilePath& path) { return LoadExtensionWithFlags(path, kFlagEnableFileAccess | kFlagEnableIncognito); }
2,098
170,493
0
void Parcel::restoreAllowFds(bool lastValue) { mAllowFds = lastValue; }
2,099