unique_id
int64 13
189k
| target
int64 0
1
| code
stringlengths 20
241k
| __index_level_0__
int64 0
18.9k
|
---|---|---|---|
125,415 | 0 | void GDataFileSystem::OnGetFileFromCache(const GetFileFromCacheParams& params,
GDataFileError error,
const std::string& resource_id,
const std::string& md5,
const FilePath& cache_file_path) {
DCHECK(BrowserThread::CurrentlyOn(BrowserThread::UI));
if (error == GDATA_FILE_OK) {
if (!params.get_file_callback.is_null()) {
params.get_file_callback.Run(error,
cache_file_path,
params.mime_type,
REGULAR_FILE);
}
return;
}
documents_service_->GetDocumentEntry(
resource_id,
base::Bind(&GDataFileSystem::OnGetDocumentEntry,
ui_weak_ptr_,
cache_file_path,
GetFileFromCacheParams(params.virtual_file_path,
params.local_tmp_path,
params.content_url,
params.resource_id,
params.md5,
params.mime_type,
params.get_file_callback,
params.get_download_data_callback)));
}
| 3,400 |
97,858 | 0 | void AutoFillHelper::SuggestionsReceived(int query_id,
const std::vector<string16>& values,
const std::vector<string16>& labels,
const std::vector<string16>& icons,
const std::vector<int>& unique_ids) {
WebKit::WebView* web_view = render_view_->webview();
if (!web_view || query_id != autofill_query_id_)
return;
web_view->hidePopups();
if (values.empty())
return;
std::vector<string16> v(values);
std::vector<string16> l(labels);
std::vector<string16> i(icons);
std::vector<int> ids(unique_ids);
int separator_index = -1;
if (form_manager_.FormWithNodeIsAutoFilled(autofill_query_node_)) {
v.push_back(l10n_util::GetStringUTF16(IDS_AUTOFILL_CLEAR_FORM_MENU_ITEM));
l.push_back(string16());
i.push_back(string16());
ids.push_back(0);
suggestions_clear_index_ = v.size() - 1;
separator_index = values.size();
}
bool show_options = false;
for (size_t i = 0; i < ids.size(); ++i) {
if (ids[i] != 0) {
show_options = true;
break;
}
}
if (show_options) {
v.push_back(l10n_util::GetStringUTF16(IDS_AUTOFILL_OPTIONS));
l.push_back(string16());
i.push_back(string16());
ids.push_back(0);
suggestions_options_index_ = v.size() - 1;
separator_index = values.size();
}
if (!v.empty()) {
web_view->applyAutoFillSuggestions(
autofill_query_node_, v, l, i, ids, separator_index);
}
}
| 3,401 |
136,826 | 0 | void LocalDOMWindow::print(ScriptState* script_state) {
if (!GetFrame())
return;
Page* page = GetFrame()->GetPage();
if (!page)
return;
if (script_state &&
v8::MicrotasksScope::IsRunningMicrotasks(script_state->GetIsolate())) {
UseCounter::Count(document(), WebFeature::kDuring_Microtask_Print);
}
if (GetFrame()->IsLoading()) {
should_print_when_finished_loading_ = true;
return;
}
UseCounter::CountCrossOriginIframe(*document(),
WebFeature::kCrossOriginWindowPrint);
should_print_when_finished_loading_ = false;
page->GetChromeClient().Print(GetFrame());
}
| 3,402 |
173,446 | 0 | OMX_ERRORTYPE omx_vdec::allocate_color_convert_buf::free_output_buffer(
OMX_BUFFERHEADERTYPE *bufhdr)
{
unsigned int index = 0;
if (!enabled)
return omx->free_output_buffer(bufhdr);
if (enabled && omx->is_component_secure())
return OMX_ErrorNone;
if (!allocated_count || !bufhdr) {
DEBUG_PRINT_ERROR("Color convert no buffer to be freed %p",bufhdr);
return OMX_ErrorBadParameter;
}
index = bufhdr - m_out_mem_ptr_client;
if (index >= omx->drv_ctx.op_buf.actualcount) {
DEBUG_PRINT_ERROR("Incorrect index color convert free_output_buffer");
return OMX_ErrorBadParameter;
}
if (pmem_fd[index] > 0) {
munmap(pmem_baseaddress[index], buffer_size_req);
close(pmem_fd[index]);
}
pmem_fd[index] = -1;
#ifdef USE_ION
omx->free_ion_memory(&op_buf_ion_info[index]);
#endif
m_heap_ptr[index].video_heap_ptr = NULL;
if (allocated_count > 0)
allocated_count--;
else
allocated_count = 0;
if (!allocated_count) {
pthread_mutex_lock(&omx->c_lock);
c2d.close();
init_members();
pthread_mutex_unlock(&omx->c_lock);
}
return omx->free_output_buffer(&omx->m_out_mem_ptr[index]);
}
| 3,403 |
7,848 | 0 | static void mig_sleep_cpu(void *opq)
{
qemu_mutex_unlock_iothread();
g_usleep(30*1000);
qemu_mutex_lock_iothread();
}
| 3,404 |
46,106 | 0 | void dtls1_double_timeout(SSL *s)
{
s->d1->timeout_duration *= 2;
if (s->d1->timeout_duration > 60)
s->d1->timeout_duration = 60;
dtls1_start_timer(s);
}
| 3,405 |
57,505 | 0 | static int ext4_do_update_inode(handle_t *handle,
struct inode *inode,
struct ext4_iloc *iloc)
{
struct ext4_inode *raw_inode = ext4_raw_inode(iloc);
struct ext4_inode_info *ei = EXT4_I(inode);
struct buffer_head *bh = iloc->bh;
int err = 0, rc, block;
/* For fields not not tracking in the in-memory inode,
* initialise them to zero for new inodes. */
if (ext4_test_inode_state(inode, EXT4_STATE_NEW))
memset(raw_inode, 0, EXT4_SB(inode->i_sb)->s_inode_size);
ext4_get_inode_flags(ei);
raw_inode->i_mode = cpu_to_le16(inode->i_mode);
if (!(test_opt(inode->i_sb, NO_UID32))) {
raw_inode->i_uid_low = cpu_to_le16(low_16_bits(inode->i_uid));
raw_inode->i_gid_low = cpu_to_le16(low_16_bits(inode->i_gid));
/*
* Fix up interoperability with old kernels. Otherwise, old inodes get
* re-used with the upper 16 bits of the uid/gid intact
*/
if (!ei->i_dtime) {
raw_inode->i_uid_high =
cpu_to_le16(high_16_bits(inode->i_uid));
raw_inode->i_gid_high =
cpu_to_le16(high_16_bits(inode->i_gid));
} else {
raw_inode->i_uid_high = 0;
raw_inode->i_gid_high = 0;
}
} else {
raw_inode->i_uid_low =
cpu_to_le16(fs_high2lowuid(inode->i_uid));
raw_inode->i_gid_low =
cpu_to_le16(fs_high2lowgid(inode->i_gid));
raw_inode->i_uid_high = 0;
raw_inode->i_gid_high = 0;
}
raw_inode->i_links_count = cpu_to_le16(inode->i_nlink);
EXT4_INODE_SET_XTIME(i_ctime, inode, raw_inode);
EXT4_INODE_SET_XTIME(i_mtime, inode, raw_inode);
EXT4_INODE_SET_XTIME(i_atime, inode, raw_inode);
EXT4_EINODE_SET_XTIME(i_crtime, ei, raw_inode);
if (ext4_inode_blocks_set(handle, raw_inode, ei))
goto out_brelse;
raw_inode->i_dtime = cpu_to_le32(ei->i_dtime);
raw_inode->i_flags = cpu_to_le32(ei->i_flags);
if (EXT4_SB(inode->i_sb)->s_es->s_creator_os !=
cpu_to_le32(EXT4_OS_HURD))
raw_inode->i_file_acl_high =
cpu_to_le16(ei->i_file_acl >> 32);
raw_inode->i_file_acl_lo = cpu_to_le32(ei->i_file_acl);
ext4_isize_set(raw_inode, ei->i_disksize);
if (ei->i_disksize > 0x7fffffffULL) {
struct super_block *sb = inode->i_sb;
if (!EXT4_HAS_RO_COMPAT_FEATURE(sb,
EXT4_FEATURE_RO_COMPAT_LARGE_FILE) ||
EXT4_SB(sb)->s_es->s_rev_level ==
cpu_to_le32(EXT4_GOOD_OLD_REV)) {
/* If this is the first large file
* created, add a flag to the superblock.
*/
err = ext4_journal_get_write_access(handle,
EXT4_SB(sb)->s_sbh);
if (err)
goto out_brelse;
ext4_update_dynamic_rev(sb);
EXT4_SET_RO_COMPAT_FEATURE(sb,
EXT4_FEATURE_RO_COMPAT_LARGE_FILE);
sb->s_dirt = 1;
ext4_handle_sync(handle);
err = ext4_handle_dirty_metadata(handle, NULL,
EXT4_SB(sb)->s_sbh);
}
}
raw_inode->i_generation = cpu_to_le32(inode->i_generation);
if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
if (old_valid_dev(inode->i_rdev)) {
raw_inode->i_block[0] =
cpu_to_le32(old_encode_dev(inode->i_rdev));
raw_inode->i_block[1] = 0;
} else {
raw_inode->i_block[0] = 0;
raw_inode->i_block[1] =
cpu_to_le32(new_encode_dev(inode->i_rdev));
raw_inode->i_block[2] = 0;
}
} else
for (block = 0; block < EXT4_N_BLOCKS; block++)
raw_inode->i_block[block] = ei->i_data[block];
raw_inode->i_disk_version = cpu_to_le32(inode->i_version);
if (ei->i_extra_isize) {
if (EXT4_FITS_IN_INODE(raw_inode, ei, i_version_hi))
raw_inode->i_version_hi =
cpu_to_le32(inode->i_version >> 32);
raw_inode->i_extra_isize = cpu_to_le16(ei->i_extra_isize);
}
BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
rc = ext4_handle_dirty_metadata(handle, NULL, bh);
if (!err)
err = rc;
ext4_clear_inode_state(inode, EXT4_STATE_NEW);
ext4_update_inode_fsync_trans(handle, inode, 0);
out_brelse:
brelse(bh);
ext4_std_error(inode->i_sb, err);
return err;
}
| 3,406 |
55,691 | 0 | void wake_up_new_task(struct task_struct *p)
{
struct rq_flags rf;
struct rq *rq;
/* Initialize new task's runnable average */
init_entity_runnable_average(&p->se);
raw_spin_lock_irqsave(&p->pi_lock, rf.flags);
#ifdef CONFIG_SMP
/*
* Fork balancing, do it here and not earlier because:
* - cpus_allowed can change in the fork path
* - any previously selected cpu might disappear through hotplug
*/
set_task_cpu(p, select_task_rq(p, task_cpu(p), SD_BALANCE_FORK, 0));
#endif
/* Post initialize new task's util average when its cfs_rq is set */
post_init_entity_util_avg(&p->se);
rq = __task_rq_lock(p, &rf);
activate_task(rq, p, 0);
p->on_rq = TASK_ON_RQ_QUEUED;
trace_sched_wakeup_new(p);
check_preempt_curr(rq, p, WF_FORK);
#ifdef CONFIG_SMP
if (p->sched_class->task_woken) {
/*
* Nothing relies on rq->lock after this, so its fine to
* drop it.
*/
lockdep_unpin_lock(&rq->lock, rf.cookie);
p->sched_class->task_woken(rq, p);
lockdep_repin_lock(&rq->lock, rf.cookie);
}
#endif
task_rq_unlock(rq, p, &rf);
}
| 3,407 |
114,325 | 0 | bool WebGraphicsContext3DCommandBufferImpl::isGLES2Compliant() {
return true;
}
| 3,408 |
166,709 | 0 | void ThreadHeap::ResetHeapCounters() {
DCHECK(thread_state_->InAtomicMarkingPause());
ThreadHeap::ReportMemoryUsageForTracing();
ProcessHeap::DecreaseTotalAllocatedObjectSize(stats_.AllocatedObjectSize());
ProcessHeap::DecreaseTotalMarkedObjectSize(stats_.MarkedObjectSize());
stats_.Reset();
}
| 3,409 |
145,598 | 0 | bool WriteResponsePayloadsV2(
NtlmBufferWriter* authenticate_writer,
base::span<const uint8_t, kResponseLenV1> lm_response,
base::span<const uint8_t, kNtlmProofLenV2> v2_proof,
base::span<const uint8_t> v2_proof_input,
base::span<const uint8_t> updated_target_info) {
return authenticate_writer->WriteBytes(lm_response) &&
authenticate_writer->WriteBytes(v2_proof) &&
authenticate_writer->WriteBytes(v2_proof_input) &&
authenticate_writer->WriteBytes(updated_target_info) &&
authenticate_writer->WriteUInt32(0);
}
| 3,410 |
26,886 | 0 | static void *proc_self_follow_link(struct dentry *dentry, struct nameidata *nd)
{
struct pid_namespace *ns = dentry->d_sb->s_fs_info;
pid_t tgid = task_tgid_nr_ns(current, ns);
char *name = ERR_PTR(-ENOENT);
if (tgid) {
name = __getname();
if (!name)
name = ERR_PTR(-ENOMEM);
else
sprintf(name, "%d", tgid);
}
nd_set_link(nd, name);
return NULL;
}
| 3,411 |
81,189 | 0 | static inline void unlock_timer(struct k_itimer *timr, unsigned long flags)
{
spin_unlock_irqrestore(&timr->it_lock, flags);
}
| 3,412 |
111,822 | 0 | void SyncBackendHost::Core::OnEncryptedTypesChanged(
syncable::ModelTypeSet encrypted_types,
bool encrypt_everything) {
if (!sync_loop_)
return;
DCHECK_EQ(MessageLoop::current(), sync_loop_);
host_.Call(
FROM_HERE,
&SyncBackendHost::NotifyEncryptedTypesChanged,
encrypted_types, encrypt_everything);
}
| 3,413 |
135,159 | 0 | void Document::clearAXObjectCache()
{
ASSERT(&axObjectCacheOwner() == this);
if (m_axObjectCache)
m_axObjectCache->dispose();
m_axObjectCache.clear();
}
| 3,414 |
140,943 | 0 | void Document::AddAXContext(AXContext* context) {
DCHECK_EQ(&AXObjectCacheOwner(), this);
if (!GetLayoutView())
return;
ax_contexts_.push_back(context);
if (ax_contexts_.size() != 1)
return;
if (!ax_object_cache_)
ax_object_cache_ = AXObjectCache::Create(*this);
}
| 3,415 |
11,872 | 0 | static void print_rsync_version(enum logcode f)
{
char *subprotocol = "";
char const *got_socketpair = "no ";
char const *have_inplace = "no ";
char const *hardlinks = "no ";
char const *prealloc = "no ";
char const *symtimes = "no ";
char const *acls = "no ";
char const *xattrs = "no ";
char const *links = "no ";
char const *iconv = "no ";
char const *ipv6 = "no ";
STRUCT_STAT *dumstat;
#if SUBPROTOCOL_VERSION != 0
if (asprintf(&subprotocol, ".PR%d", SUBPROTOCOL_VERSION) < 0)
out_of_memory("print_rsync_version");
#endif
#ifdef HAVE_SOCKETPAIR
got_socketpair = "";
#endif
#ifdef HAVE_FTRUNCATE
have_inplace = "";
#endif
#ifdef SUPPORT_HARD_LINKS
hardlinks = "";
#endif
#ifdef SUPPORT_PREALLOCATION
prealloc = "";
#endif
#ifdef SUPPORT_ACLS
acls = "";
#endif
#ifdef SUPPORT_XATTRS
xattrs = "";
#endif
#ifdef SUPPORT_LINKS
links = "";
#endif
#ifdef INET6
ipv6 = "";
#endif
#ifdef ICONV_OPTION
iconv = "";
#endif
#ifdef CAN_SET_SYMLINK_TIMES
symtimes = "";
#endif
rprintf(f, "%s version %s protocol version %d%s\n",
RSYNC_NAME, RSYNC_VERSION, PROTOCOL_VERSION, subprotocol);
rprintf(f, "Copyright (C) 1996-2015 by Andrew Tridgell, Wayne Davison, and others.\n");
rprintf(f, "Web site: http://rsync.samba.org/\n");
rprintf(f, "Capabilities:\n");
rprintf(f, " %d-bit files, %d-bit inums, %d-bit timestamps, %d-bit long ints,\n",
(int)(sizeof (OFF_T) * 8),
(int)(sizeof dumstat->st_ino * 8), /* Don't check ino_t! */
(int)(sizeof (time_t) * 8),
(int)(sizeof (int64) * 8));
rprintf(f, " %ssocketpairs, %shardlinks, %ssymlinks, %sIPv6, batchfiles, %sinplace,\n",
got_socketpair, hardlinks, links, ipv6, have_inplace);
rprintf(f, " %sappend, %sACLs, %sxattrs, %siconv, %ssymtimes, %sprealloc\n",
have_inplace, acls, xattrs, iconv, symtimes, prealloc);
#ifdef MAINTAINER_MODE
rprintf(f, "Panic Action: \"%s\"\n", get_panic_action());
#endif
#if SIZEOF_INT64 < 8
rprintf(f, "WARNING: no 64-bit integers on this platform!\n");
#endif
if (sizeof (int64) != SIZEOF_INT64) {
rprintf(f,
"WARNING: size mismatch in SIZEOF_INT64 define (%d != %d)\n",
(int) SIZEOF_INT64, (int) sizeof (int64));
}
rprintf(f,"\n");
rprintf(f,"rsync comes with ABSOLUTELY NO WARRANTY. This is free software, and you\n");
rprintf(f,"are welcome to redistribute it under certain conditions. See the GNU\n");
rprintf(f,"General Public Licence for details.\n");
}
| 3,416 |
133,201 | 0 | void SchedulerHelper::RemoveTaskObserver(
base::MessageLoop::TaskObserver* task_observer) {
CheckOnValidThread();
if (task_queue_manager_)
task_queue_manager_->RemoveTaskObserver(task_observer);
}
| 3,417 |
33,086 | 0 | static int attach_one_algo(struct xfrm_algo **algpp, u8 *props,
struct xfrm_algo_desc *(*get_byname)(const char *, int),
struct nlattr *rta)
{
struct xfrm_algo *p, *ualg;
struct xfrm_algo_desc *algo;
if (!rta)
return 0;
ualg = nla_data(rta);
algo = get_byname(ualg->alg_name, 1);
if (!algo)
return -ENOSYS;
*props = algo->desc.sadb_alg_id;
p = kmemdup(ualg, xfrm_alg_len(ualg), GFP_KERNEL);
if (!p)
return -ENOMEM;
strcpy(p->alg_name, algo->name);
*algpp = p;
return 0;
}
| 3,418 |
89,458 | 0 | int nfc_genl_se_removed(struct nfc_dev *dev, u32 se_idx)
{
struct sk_buff *msg;
void *hdr;
msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
if (!msg)
return -ENOMEM;
hdr = genlmsg_put(msg, 0, 0, &nfc_genl_family, 0,
NFC_EVENT_SE_REMOVED);
if (!hdr)
goto free_msg;
if (nla_put_u32(msg, NFC_ATTR_DEVICE_INDEX, dev->idx) ||
nla_put_u32(msg, NFC_ATTR_SE_INDEX, se_idx))
goto nla_put_failure;
genlmsg_end(msg, hdr);
genlmsg_multicast(&nfc_genl_family, msg, 0, 0, GFP_KERNEL);
return 0;
nla_put_failure:
free_msg:
nlmsg_free(msg);
return -EMSGSIZE;
}
| 3,419 |
68,371 | 0 | perf_event_output_backward(struct perf_event *event,
struct perf_sample_data *data,
struct pt_regs *regs)
{
__perf_event_output(event, data, regs, perf_output_begin_backward);
}
| 3,420 |
152,951 | 0 | void PDFiumEngine::AppendBlankPages(int num_pages) {
DCHECK_NE(num_pages, 0);
if (!doc_)
return;
selection_.clear();
pending_pages_.clear();
while (pages_.size() > 1) {
delete pages_.back();
pages_.pop_back();
FPDFPage_Delete(doc_, pages_.size());
}
std::vector<pp::Rect> page_rects;
pp::Size page_size = GetPageSize(0);
page_size.Enlarge(kPageShadowLeft + kPageShadowRight,
kPageShadowTop + kPageShadowBottom);
pp::Size old_document_size = document_size_;
document_size_ = pp::Size(page_size.width(), 0);
for (int i = 0; i < num_pages; ++i) {
if (i != 0) {
document_size_.Enlarge(0, kPageSeparatorThickness);
}
pp::Rect rect(pp::Point(0, document_size_.height()), page_size);
page_rects.push_back(rect);
document_size_.Enlarge(0, page_size.height());
}
for (int i = 1; i < num_pages; ++i) {
pp::Rect page_rect(page_rects[i]);
page_rect.Inset(kPageShadowLeft, kPageShadowTop,
kPageShadowRight, kPageShadowBottom);
double width_in_points = ConvertUnitDouble(page_rect.width(),
kPixelsPerInch,
kPointsPerInch);
double height_in_points = ConvertUnitDouble(page_rect.height(),
kPixelsPerInch,
kPointsPerInch);
FPDFPage_New(doc_, i, width_in_points, height_in_points);
pages_.push_back(new PDFiumPage(this, i, page_rect, true));
}
CalculateVisiblePages();
if (document_size_ != old_document_size)
client_->DocumentSizeUpdated(document_size_);
}
| 3,421 |
179,892 | 1 | static int setup_dev_console(const struct lxc_rootfs *rootfs,
const struct lxc_console *console)
{
char path[MAXPATHLEN];
struct stat s;
int ret;
ret = snprintf(path, sizeof(path), "%s/dev/console", rootfs->mount);
if (ret >= sizeof(path)) {
ERROR("console path too long");
return -1;
}
if (access(path, F_OK)) {
WARN("rootfs specified but no console found at '%s'", path);
return 0;
}
if (console->master < 0) {
INFO("no console");
return 0;
}
if (stat(path, &s)) {
SYSERROR("failed to stat '%s'", path);
return -1;
}
if (chmod(console->name, s.st_mode)) {
SYSERROR("failed to set mode '0%o' to '%s'",
s.st_mode, console->name);
return -1;
}
if (mount(console->name, path, "none", MS_BIND, 0)) {
ERROR("failed to mount '%s' on '%s'", console->name, path);
return -1;
}
INFO("console has been setup");
return 0;
}
| 3,422 |
169,294 | 0 | bool DOMMessageQueue::PopMessage(std::string* message) {
DCHECK(message);
if (renderer_crashed_ || message_queue_.empty())
return false;
*message = message_queue_.front();
message_queue_.pop();
return true;
}
| 3,423 |
25,787 | 0 | static void x86_pmu_enable(struct pmu *pmu)
{
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
struct perf_event *event;
struct hw_perf_event *hwc;
int i, added = cpuc->n_added;
if (!x86_pmu_initialized())
return;
if (cpuc->enabled)
return;
if (cpuc->n_added) {
int n_running = cpuc->n_events - cpuc->n_added;
/*
* apply assignment obtained either from
* hw_perf_group_sched_in() or x86_pmu_enable()
*
* step1: save events moving to new counters
* step2: reprogram moved events into new counters
*/
for (i = 0; i < n_running; i++) {
event = cpuc->event_list[i];
hwc = &event->hw;
/*
* we can avoid reprogramming counter if:
* - assigned same counter as last time
* - running on same CPU as last time
* - no other event has used the counter since
*/
if (hwc->idx == -1 ||
match_prev_assignment(hwc, cpuc, i))
continue;
/*
* Ensure we don't accidentally enable a stopped
* counter simply because we rescheduled.
*/
if (hwc->state & PERF_HES_STOPPED)
hwc->state |= PERF_HES_ARCH;
x86_pmu_stop(event, PERF_EF_UPDATE);
}
for (i = 0; i < cpuc->n_events; i++) {
event = cpuc->event_list[i];
hwc = &event->hw;
if (!match_prev_assignment(hwc, cpuc, i))
x86_assign_hw_event(event, cpuc, i);
else if (i < n_running)
continue;
if (hwc->state & PERF_HES_ARCH)
continue;
x86_pmu_start(event, PERF_EF_RELOAD);
}
cpuc->n_added = 0;
perf_events_lapic_init();
}
cpuc->enabled = 1;
barrier();
x86_pmu.enable_all(added);
}
| 3,424 |
40,639 | 0 | static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
int closing, int tx_ring)
{
struct pgv *pg_vec = NULL;
struct packet_sock *po = pkt_sk(sk);
int was_running, order = 0;
struct packet_ring_buffer *rb;
struct sk_buff_head *rb_queue;
__be16 num;
int err = -EINVAL;
/* Added to avoid minimal code churn */
struct tpacket_req *req = &req_u->req;
/* Opening a Tx-ring is NOT supported in TPACKET_V3 */
if (!closing && tx_ring && (po->tp_version > TPACKET_V2)) {
WARN(1, "Tx-ring is not supported.\n");
goto out;
}
rb = tx_ring ? &po->tx_ring : &po->rx_ring;
rb_queue = tx_ring ? &sk->sk_write_queue : &sk->sk_receive_queue;
err = -EBUSY;
if (!closing) {
if (atomic_read(&po->mapped))
goto out;
if (atomic_read(&rb->pending))
goto out;
}
if (req->tp_block_nr) {
/* Sanity tests and some calculations */
err = -EBUSY;
if (unlikely(rb->pg_vec))
goto out;
switch (po->tp_version) {
case TPACKET_V1:
po->tp_hdrlen = TPACKET_HDRLEN;
break;
case TPACKET_V2:
po->tp_hdrlen = TPACKET2_HDRLEN;
break;
case TPACKET_V3:
po->tp_hdrlen = TPACKET3_HDRLEN;
break;
}
err = -EINVAL;
if (unlikely((int)req->tp_block_size <= 0))
goto out;
if (unlikely(req->tp_block_size & (PAGE_SIZE - 1)))
goto out;
if (unlikely(req->tp_frame_size < po->tp_hdrlen +
po->tp_reserve))
goto out;
if (unlikely(req->tp_frame_size & (TPACKET_ALIGNMENT - 1)))
goto out;
rb->frames_per_block = req->tp_block_size/req->tp_frame_size;
if (unlikely(rb->frames_per_block <= 0))
goto out;
if (unlikely((rb->frames_per_block * req->tp_block_nr) !=
req->tp_frame_nr))
goto out;
err = -ENOMEM;
order = get_order(req->tp_block_size);
pg_vec = alloc_pg_vec(req, order);
if (unlikely(!pg_vec))
goto out;
switch (po->tp_version) {
case TPACKET_V3:
/* Transmit path is not supported. We checked
* it above but just being paranoid
*/
if (!tx_ring)
init_prb_bdqc(po, rb, pg_vec, req_u, tx_ring);
break;
default:
break;
}
}
/* Done */
else {
err = -EINVAL;
if (unlikely(req->tp_frame_nr))
goto out;
}
lock_sock(sk);
/* Detach socket from network */
spin_lock(&po->bind_lock);
was_running = po->running;
num = po->num;
if (was_running) {
po->num = 0;
__unregister_prot_hook(sk, false);
}
spin_unlock(&po->bind_lock);
synchronize_net();
err = -EBUSY;
mutex_lock(&po->pg_vec_lock);
if (closing || atomic_read(&po->mapped) == 0) {
err = 0;
spin_lock_bh(&rb_queue->lock);
swap(rb->pg_vec, pg_vec);
rb->frame_max = (req->tp_frame_nr - 1);
rb->head = 0;
rb->frame_size = req->tp_frame_size;
spin_unlock_bh(&rb_queue->lock);
swap(rb->pg_vec_order, order);
swap(rb->pg_vec_len, req->tp_block_nr);
rb->pg_vec_pages = req->tp_block_size/PAGE_SIZE;
po->prot_hook.func = (po->rx_ring.pg_vec) ?
tpacket_rcv : packet_rcv;
skb_queue_purge(rb_queue);
if (atomic_read(&po->mapped))
pr_err("packet_mmap: vma is busy: %d\n",
atomic_read(&po->mapped));
}
mutex_unlock(&po->pg_vec_lock);
spin_lock(&po->bind_lock);
if (was_running) {
po->num = num;
register_prot_hook(sk);
}
spin_unlock(&po->bind_lock);
if (closing && (po->tp_version > TPACKET_V2)) {
/* Because we don't support block-based V3 on tx-ring */
if (!tx_ring)
prb_shutdown_retire_blk_timer(po, tx_ring, rb_queue);
}
release_sock(sk);
if (pg_vec)
free_pg_vec(pg_vec, order, req->tp_block_nr);
out:
return err;
}
| 3,425 |
25,061 | 0 | static struct sock *dccp_v6_request_recv_sock(struct sock *sk,
struct sk_buff *skb,
struct request_sock *req,
struct dst_entry *dst)
{
struct inet6_request_sock *ireq6 = inet6_rsk(req);
struct ipv6_pinfo *newnp, *np = inet6_sk(sk);
struct inet_sock *newinet;
struct dccp6_sock *newdp6;
struct sock *newsk;
struct ipv6_txoptions *opt;
if (skb->protocol == htons(ETH_P_IP)) {
/*
* v6 mapped
*/
newsk = dccp_v4_request_recv_sock(sk, skb, req, dst);
if (newsk == NULL)
return NULL;
newdp6 = (struct dccp6_sock *)newsk;
newinet = inet_sk(newsk);
newinet->pinet6 = &newdp6->inet6;
newnp = inet6_sk(newsk);
memcpy(newnp, np, sizeof(struct ipv6_pinfo));
ipv6_addr_set_v4mapped(newinet->inet_daddr, &newnp->daddr);
ipv6_addr_set_v4mapped(newinet->inet_saddr, &newnp->saddr);
ipv6_addr_copy(&newnp->rcv_saddr, &newnp->saddr);
inet_csk(newsk)->icsk_af_ops = &dccp_ipv6_mapped;
newsk->sk_backlog_rcv = dccp_v4_do_rcv;
newnp->pktoptions = NULL;
newnp->opt = NULL;
newnp->mcast_oif = inet6_iif(skb);
newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
/*
* No need to charge this sock to the relevant IPv6 refcnt debug socks count
* here, dccp_create_openreq_child now does this for us, see the comment in
* that function for the gory details. -acme
*/
/* It is tricky place. Until this moment IPv4 tcp
worked with IPv6 icsk.icsk_af_ops.
Sync it now.
*/
dccp_sync_mss(newsk, inet_csk(newsk)->icsk_pmtu_cookie);
return newsk;
}
opt = np->opt;
if (sk_acceptq_is_full(sk))
goto out_overflow;
if (dst == NULL) {
struct in6_addr *final_p, final;
struct flowi6 fl6;
memset(&fl6, 0, sizeof(fl6));
fl6.flowi6_proto = IPPROTO_DCCP;
ipv6_addr_copy(&fl6.daddr, &ireq6->rmt_addr);
final_p = fl6_update_dst(&fl6, opt, &final);
ipv6_addr_copy(&fl6.saddr, &ireq6->loc_addr);
fl6.flowi6_oif = sk->sk_bound_dev_if;
fl6.fl6_dport = inet_rsk(req)->rmt_port;
fl6.fl6_sport = inet_rsk(req)->loc_port;
security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
dst = ip6_dst_lookup_flow(sk, &fl6, final_p, false);
if (IS_ERR(dst))
goto out;
}
newsk = dccp_create_openreq_child(sk, req, skb);
if (newsk == NULL)
goto out_nonewsk;
/*
* No need to charge this sock to the relevant IPv6 refcnt debug socks
* count here, dccp_create_openreq_child now does this for us, see the
* comment in that function for the gory details. -acme
*/
__ip6_dst_store(newsk, dst, NULL, NULL);
newsk->sk_route_caps = dst->dev->features & ~(NETIF_F_IP_CSUM |
NETIF_F_TSO);
newdp6 = (struct dccp6_sock *)newsk;
newinet = inet_sk(newsk);
newinet->pinet6 = &newdp6->inet6;
newnp = inet6_sk(newsk);
memcpy(newnp, np, sizeof(struct ipv6_pinfo));
ipv6_addr_copy(&newnp->daddr, &ireq6->rmt_addr);
ipv6_addr_copy(&newnp->saddr, &ireq6->loc_addr);
ipv6_addr_copy(&newnp->rcv_saddr, &ireq6->loc_addr);
newsk->sk_bound_dev_if = ireq6->iif;
/* Now IPv6 options...
First: no IPv4 options.
*/
newinet->inet_opt = NULL;
/* Clone RX bits */
newnp->rxopt.all = np->rxopt.all;
/* Clone pktoptions received with SYN */
newnp->pktoptions = NULL;
if (ireq6->pktopts != NULL) {
newnp->pktoptions = skb_clone(ireq6->pktopts, GFP_ATOMIC);
kfree_skb(ireq6->pktopts);
ireq6->pktopts = NULL;
if (newnp->pktoptions)
skb_set_owner_r(newnp->pktoptions, newsk);
}
newnp->opt = NULL;
newnp->mcast_oif = inet6_iif(skb);
newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
/*
* Clone native IPv6 options from listening socket (if any)
*
* Yes, keeping reference count would be much more clever, but we make
* one more one thing there: reattach optmem to newsk.
*/
if (opt != NULL) {
newnp->opt = ipv6_dup_options(newsk, opt);
if (opt != np->opt)
sock_kfree_s(sk, opt, opt->tot_len);
}
inet_csk(newsk)->icsk_ext_hdr_len = 0;
if (newnp->opt != NULL)
inet_csk(newsk)->icsk_ext_hdr_len = (newnp->opt->opt_nflen +
newnp->opt->opt_flen);
dccp_sync_mss(newsk, dst_mtu(dst));
newinet->inet_daddr = newinet->inet_saddr = LOOPBACK4_IPV6;
newinet->inet_rcv_saddr = LOOPBACK4_IPV6;
if (__inet_inherit_port(sk, newsk) < 0) {
sock_put(newsk);
goto out;
}
__inet6_hash(newsk, NULL);
return newsk;
out_overflow:
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
out_nonewsk:
dst_release(dst);
out:
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
if (opt != NULL && opt != np->opt)
sock_kfree_s(sk, opt, opt->tot_len);
return NULL;
}
| 3,426 |
115,797 | 0 | void SendCommand(const std::string& command) {
TabContents* contents = browser()->GetSelectedTabContents();
SafeBrowsingBlockingPage* interstitial_page =
static_cast<SafeBrowsingBlockingPage*>(
InterstitialPage::GetInterstitialPage(contents));
ASSERT_TRUE(interstitial_page);
interstitial_page->CommandReceived(command);
}
| 3,427 |
5,447 | 0 | static void Ins_SFVFS( INS_ARG )
{
Short S;
Long X, Y;
/* Only use low 16bits, then sign extend */
S = (Short)args[1];
Y = (Long)S;
S = (Short)args[0];
X = S;
if ( NORMalize( X, Y, &CUR.GS.freeVector ) == FAILURE )
return;
COMPUTE_Funcs();
}
| 3,428 |
49,489 | 0 | void hid_disconnect(struct hid_device *hdev)
{
device_remove_file(&hdev->dev, &dev_attr_country);
if (hdev->claimed & HID_CLAIMED_INPUT)
hidinput_disconnect(hdev);
if (hdev->claimed & HID_CLAIMED_HIDDEV)
hdev->hiddev_disconnect(hdev);
if (hdev->claimed & HID_CLAIMED_HIDRAW)
hidraw_disconnect(hdev);
hdev->claimed = 0;
}
| 3,429 |
75,788 | 0 | size_t extract_content_length(char *buffer, size_t size)
{
char *clen = strstr(buffer, CONTENT_LENGTH);
size_t len;
char *end;
/* Pattern not found */
if (!clen || clen > buffer + size)
return SIZE_MAX;
/* Content-Length extraction */
len = strtoul(clen + strlen(CONTENT_LENGTH), &end, 10);
if (*end)
return SIZE_MAX;
return len;
}
| 3,430 |
107,822 | 0 | NetworkSelectionView* NetworkScreen::AllocateView() {
return new NetworkSelectionView(this);
}
| 3,431 |
122,520 | 0 | void InspectorClientImpl::dumpUncountedAllocatedObjects(const HashMap<const void*, size_t>& map)
{
if (WebDevToolsAgentImpl* agent = devToolsAgent())
agent->dumpUncountedAllocatedObjects(map);
}
| 3,432 |
98,795 | 0 | void WebPluginDelegateProxy::OnGetPluginElement(int route_id, bool* success) {
*success = false;
NPObject* npobject = NULL;
if (plugin_)
npobject = plugin_->GetPluginElement();
if (!npobject)
return;
new NPObjectStub(
npobject, channel_host_.get(), route_id, 0, page_url_);
*success = true;
}
| 3,433 |
140,044 | 0 | bool HTMLMediaElement::paused() const {
return m_paused;
}
| 3,434 |
135,043 | 0 | void AppCacheBackendImpl::GetResourceList(
int host_id, std::vector<AppCacheResourceInfo>* resource_infos) {
AppCacheHost* host = GetHost(host_id);
if (!host)
return;
host->GetResourceList(resource_infos);
}
| 3,435 |
16,931 | 0 | static int l2_load(BlockDriverState *bs, uint64_t l2_offset,
uint64_t **l2_table)
{
BDRVQcowState *s = bs->opaque;
int ret;
ret = qcow2_cache_get(bs, s->l2_table_cache, l2_offset, (void**) l2_table);
return ret;
}
| 3,436 |
76,510 | 0 | int __kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu,
struct kvm_vcpu_events *events)
{
bool serror_pending = events->exception.serror_pending;
bool has_esr = events->exception.serror_has_esr;
if (serror_pending && has_esr) {
if (!cpus_have_const_cap(ARM64_HAS_RAS_EXTN))
return -EINVAL;
if (!((events->exception.serror_esr) & ~ESR_ELx_ISS_MASK))
kvm_set_sei_esr(vcpu, events->exception.serror_esr);
else
return -EINVAL;
} else if (serror_pending) {
kvm_inject_vabt(vcpu);
}
return 0;
}
| 3,437 |
24,184 | 0 | static void ar6000_dump_skb(struct sk_buff *skb)
{
u_char *ch;
for (ch = A_NETBUF_DATA(skb);
(unsigned long)ch < ((unsigned long)A_NETBUF_DATA(skb) +
A_NETBUF_LEN(skb)); ch++)
{
AR_DEBUG_PRINTF(ATH_DEBUG_WARN,("%2.2x ", *ch));
}
AR_DEBUG_PRINTF(ATH_DEBUG_WARN,("\n"));
}
| 3,438 |
146,075 | 0 | void WebGL2RenderingContextBase::copyBufferSubData(GLenum read_target,
GLenum write_target,
long long read_offset,
long long write_offset,
long long size) {
if (isContextLost())
return;
if (!ValidateValueFitNonNegInt32("copyBufferSubData", "readOffset",
read_offset) ||
!ValidateValueFitNonNegInt32("copyBufferSubData", "writeOffset",
write_offset) ||
!ValidateValueFitNonNegInt32("copyBufferSubData", "size", size)) {
return;
}
WebGLBuffer* read_buffer =
ValidateBufferDataTarget("copyBufferSubData", read_target);
if (!read_buffer)
return;
WebGLBuffer* write_buffer =
ValidateBufferDataTarget("copyBufferSubData", write_target);
if (!write_buffer)
return;
if (read_offset + size > read_buffer->GetSize() ||
write_offset + size > write_buffer->GetSize()) {
SynthesizeGLError(GL_INVALID_VALUE, "copyBufferSubData", "buffer overflow");
return;
}
if ((write_buffer->GetInitialTarget() == GL_ELEMENT_ARRAY_BUFFER &&
read_buffer->GetInitialTarget() != GL_ELEMENT_ARRAY_BUFFER) ||
(write_buffer->GetInitialTarget() != GL_ELEMENT_ARRAY_BUFFER &&
read_buffer->GetInitialTarget() == GL_ELEMENT_ARRAY_BUFFER)) {
SynthesizeGLError(GL_INVALID_OPERATION, "copyBufferSubData",
"Cannot copy into an element buffer destination from a "
"non-element buffer source");
return;
}
if (write_buffer->GetInitialTarget() == 0)
write_buffer->SetInitialTarget(read_buffer->GetInitialTarget());
ContextGL()->CopyBufferSubData(
read_target, write_target, static_cast<GLintptr>(read_offset),
static_cast<GLintptr>(write_offset), static_cast<GLsizeiptr>(size));
}
| 3,439 |
160,910 | 0 | const DOMWindow* DOMWindow::ToDOMWindow() const {
return this;
}
| 3,440 |
67,500 | 0 | static int fsmSymlink(const char *opath, const char *path)
{
int rc = symlink(opath, path);
if (_fsm_debug) {
rpmlog(RPMLOG_DEBUG, " %8s (%s, %s) %s\n", __func__,
opath, path, (rc < 0 ? strerror(errno) : ""));
}
if (rc < 0)
rc = RPMERR_SYMLINK_FAILED;
return rc;
}
| 3,441 |
94,127 | 0 | static void __exit tcm_loop_fabric_exit(void)
{
tcm_loop_deregister_configfs();
tcm_loop_release_core_bus();
kmem_cache_destroy(tcm_loop_cmd_cache);
}
| 3,442 |
129,114 | 0 | scoped_refptr<const PermissionSet> PermissionsData::GetTabSpecificPermissions(
int tab_id) const {
base::AutoLock auto_lock(runtime_lock_);
CHECK_GE(tab_id, 0);
TabPermissionsMap::const_iterator iter =
tab_specific_permissions_.find(tab_id);
return (iter != tab_specific_permissions_.end()) ? iter->second : NULL;
}
| 3,443 |
123,236 | 0 | ResizeLock(aura::RootWindow* root_window,
const gfx::Size new_size,
bool defer_compositor_lock)
: root_window_(root_window),
new_size_(new_size),
compositor_lock_(defer_compositor_lock ?
NULL :
root_window_->compositor()->GetCompositorLock()),
weak_ptr_factory_(this),
defer_compositor_lock_(defer_compositor_lock) {
root_window_->HoldMouseMoves();
BrowserThread::PostDelayedTask(
BrowserThread::UI, FROM_HERE,
base::Bind(&RenderWidgetHostViewAura::ResizeLock::CancelLock,
weak_ptr_factory_.GetWeakPtr()),
base::TimeDelta::FromMilliseconds(kResizeLockTimeoutMs));
}
| 3,444 |
143,817 | 0 | PersistentHistogramAllocatorTest()
: statistics_recorder_(StatisticsRecorder::CreateTemporaryForTesting()) {
CreatePersistentHistogramAllocator();
}
| 3,445 |
161,229 | 0 | void DevToolsSession::MojoConnectionDestroyed() {
binding_.Close();
session_ptr_.reset();
io_session_ptr_.reset();
}
| 3,446 |
91,832 | 0 | void comps_rtree_unset(COMPS_RTree * rt, const char * key) {
COMPS_HSList * subnodes;
COMPS_HSListItem * it;
COMPS_RTreeData * rtdata;
unsigned int offset, len, x;
char found, ended;
COMPS_HSList * path;
struct Relation {
COMPS_HSList * parent_nodes;
COMPS_HSListItem * child_it;
} *relation;
path = comps_hslist_create();
comps_hslist_init(path, NULL, NULL, &free);
len = strlen(key);
offset = 0;
subnodes = rt->subnodes;
while (offset != len) {
found = 0;
for (it = subnodes->first; it != NULL; it=it->next) {
if (((COMPS_RTreeData*)it->data)->key[0] == key[offset]) {
found = 1;
break;
}
}
if (!found) {
comps_hslist_destroy(&path);
return;
}
rtdata = (COMPS_RTreeData*)it->data;
for (x=1; ;x++) {
ended=0;
if (rtdata->key[x] == 0) ended += 1;
if (x == len - offset) ended += 2;
if (ended != 0) break;
if (key[offset+x] != rtdata->key[x]) break;
}
if (ended == 3) {
/* remove node from tree only if there's no descendant*/
if (rtdata->subnodes->last == NULL) {
comps_hslist_remove(subnodes, it);
comps_rtree_data_destroy(rtdata);
free(it);
}
else if (rtdata->data_destructor != NULL) {
(*rtdata->data_destructor)(rtdata->data);
rtdata->is_leaf = 0;
rtdata->data = NULL;
}
if (path->last == NULL) {
comps_hslist_destroy(&path);
return;
}
rtdata = (COMPS_RTreeData*)
((struct Relation*)path->last->data)->child_it->data;
/*remove all predecessor of deleted node (recursive) with no childs*/
while (rtdata->subnodes->last == NULL) {
comps_rtree_data_destroy(rtdata);
comps_hslist_remove(
((struct Relation*)path->last->data)->parent_nodes,
((struct Relation*)path->last->data)->child_it);
free(((struct Relation*)path->last->data)->child_it);
it = path->last;
comps_hslist_remove(path, path->last);
free(it);
rtdata = (COMPS_RTreeData*)
((struct Relation*)path->last->data)->child_it->data;
}
comps_hslist_destroy(&path);
return;
}
else if (ended == 1) offset+=x;
else {
comps_hslist_destroy(&path);
return;
}
if ((relation = malloc(sizeof(struct Relation))) == NULL) {
comps_hslist_destroy(&path);
return;
}
subnodes = ((COMPS_RTreeData*)it->data)->subnodes;
relation->parent_nodes = subnodes;
relation->child_it = it;
comps_hslist_append(path, (void*)relation, 0);
}
comps_hslist_destroy(&path);
return;
}
| 3,447 |
185,049 | 1 | bool Extension::HasAPIPermission(const std::string& function_name) const {
base::AutoLock auto_lock(runtime_data_lock_);
return runtime_data_.GetActivePermissions()->
HasAccessToFunction(function_name);
}
| 3,448 |
120,970 | 0 | void SocketStream::SetClientSocketFactory(
ClientSocketFactory* factory) {
DCHECK(factory);
factory_ = factory;
}
| 3,449 |
11,607 | 0 | decode_udev_encoded_string (const gchar *str)
{
GString *s;
gchar *ret;
const gchar *end_valid;
guint n;
s = g_string_new (NULL);
for (n = 0; str[n] != '\0'; n++)
{
if (str[n] == '\\')
{
gint val;
if (str[n + 1] != 'x' || str[n + 2] == '\0' || str[n + 3] == '\0')
{
g_print ("**** NOTE: malformed encoded string '%s'\n", str);
break;
}
val = (g_ascii_xdigit_value (str[n + 2]) << 4) | g_ascii_xdigit_value (str[n + 3]);
g_string_append_c (s, val);
n += 3;
}
else
{
g_string_append_c (s, str[n]);
}
}
if (!g_utf8_validate (s->str, -1, &end_valid))
{
g_print ("**** NOTE: The string '%s' is not valid UTF-8. Invalid characters begins at '%s'\n", s->str, end_valid);
ret = g_strndup (s->str, end_valid - s->str);
g_string_free (s, TRUE);
}
else
{
ret = g_string_free (s, FALSE);
}
return ret;
}
| 3,450 |
171,510 | 0 | android::SoftOMXComponent *createSoftOMXComponent(
const char *name, const OMX_CALLBACKTYPE *callbacks,
OMX_PTR appData, OMX_COMPONENTTYPE **component) {
return new android::SoftMP3(name, callbacks, appData, component);
}
| 3,451 |
21,883 | 0 | int drm_mode_connector_attach_encoder(struct drm_connector *connector,
struct drm_encoder *encoder)
{
int i;
for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
if (connector->encoder_ids[i] == 0) {
connector->encoder_ids[i] = encoder->base.id;
return 0;
}
}
return -ENOMEM;
}
| 3,452 |
24,388 | 0 | int jbd2_journal_restart(handle_t *handle, int nblocks)
{
return jbd2__journal_restart(handle, nblocks, GFP_NOFS);
}
| 3,453 |
98,451 | 0 | SearchProviderTest()
: default_t_url_(NULL),
term1_(UTF8ToUTF16("term1")),
keyword_t_url_(NULL),
keyword_term_(UTF8ToUTF16("keyword")),
io_thread_(ChromeThread::IO),
quit_when_done_(false) {
io_thread_.Start();
}
| 3,454 |
51,820 | 0 | dissect_usb_vid_control(tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree, void *data)
{
gboolean is_request = (pinfo->srcport == NO_ENDPOINT);
usb_conv_info_t *usb_conv_info;
usb_trans_info_t *usb_trans_info;
int offset = 0;
usb_setup_dissector dissector = NULL;
const usb_setup_dissector_table_t *tmp;
/* Reject the packet if data or usb_trans_info are NULL */
if (data == NULL || ((usb_conv_info_t *)data)->usb_trans_info == NULL)
return 0;
usb_conv_info = (usb_conv_info_t *)data;
usb_trans_info = usb_conv_info->usb_trans_info;
/* See if we can find a class specific dissector for this request */
for (tmp=setup_dissectors; tmp->dissector; tmp++)
{
if (tmp->request == usb_trans_info->setup.request)
{
dissector = tmp->dissector;
break;
}
}
/* No we could not find any class specific dissector for this request
* return FALSE and let USB try any of the standard requests.
*/
if (!dissector)
return 0;
col_set_str(pinfo->cinfo, COL_PROTOCOL, "USBVIDEO");
col_add_fstr(pinfo->cinfo, COL_INFO, "%s %s",
val_to_str(usb_trans_info->setup.request, setup_request_names_vals, "Unknown type %x"),
is_request?"Request ":"Response");
if (is_request)
{
proto_tree_add_item(tree, hf_usb_vid_request, tvb, offset, 1, ENC_LITTLE_ENDIAN);
offset += 1;
}
offset = dissector(pinfo, tree, tvb, offset, is_request, usb_trans_info, usb_conv_info);
return offset;
}
| 3,455 |
76,803 | 0 | decode_NXAST_RAW_OUTPUT_REG(const struct nx_action_output_reg *naor,
enum ofp_version ofp_version OVS_UNUSED,
const struct vl_mff_map *vl_mff_map,
uint64_t *tlv_bitmap, struct ofpbuf *out)
{
struct ofpact_output_reg *output_reg;
enum ofperr error;
if (!is_all_zeros(naor->zero, sizeof naor->zero)) {
return OFPERR_OFPBAC_BAD_ARGUMENT;
}
output_reg = ofpact_put_OUTPUT_REG(out);
output_reg->ofpact.raw = NXAST_RAW_OUTPUT_REG;
output_reg->src.ofs = nxm_decode_ofs(naor->ofs_nbits);
output_reg->src.n_bits = nxm_decode_n_bits(naor->ofs_nbits);
output_reg->max_len = ntohs(naor->max_len);
error = mf_vl_mff_mf_from_nxm_header(ntohl(naor->src), vl_mff_map,
&output_reg->src.field, tlv_bitmap);
if (error) {
return error;
}
return mf_check_src(&output_reg->src, NULL);
}
| 3,456 |
77,039 | 0 | pad_ofpat(struct ofpbuf *openflow, size_t start_ofs)
{
struct ofp_action_header *oah;
ofpbuf_put_zeros(openflow, PAD_SIZE(openflow->size - start_ofs,
OFP_ACTION_ALIGN));
oah = ofpbuf_at_assert(openflow, start_ofs, sizeof *oah);
oah->len = htons(openflow->size - start_ofs);
}
| 3,457 |
170,967 | 0 | uint32_t MediaHTTP::flags() {
return kWantsPrefetching | kIsHTTPBasedSource;
}
| 3,458 |
143,946 | 0 | png_get_IHDR(png_structp png_ptr, png_infop info_ptr,
png_uint_32 *width, png_uint_32 *height, int *bit_depth,
int *color_type, int *interlace_type, int *compression_type,
int *filter_type)
{
png_debug1(1, "in %s retrieval function", "IHDR");
if (png_ptr == NULL || info_ptr == NULL || width == NULL ||
height == NULL || bit_depth == NULL || color_type == NULL)
return (0);
*width = info_ptr->width;
*height = info_ptr->height;
*bit_depth = info_ptr->bit_depth;
*color_type = info_ptr->color_type;
if (compression_type != NULL)
*compression_type = info_ptr->compression_type;
if (filter_type != NULL)
*filter_type = info_ptr->filter_type;
if (interlace_type != NULL)
*interlace_type = info_ptr->interlace_type;
/* This is redundant if we can be sure that the info_ptr values were all
* assigned in png_set_IHDR(). We do the check anyhow in case an
* application has ignored our advice not to mess with the members
* of info_ptr directly.
*/
png_check_IHDR (png_ptr, info_ptr->width, info_ptr->height,
info_ptr->bit_depth, info_ptr->color_type, info_ptr->interlace_type,
info_ptr->compression_type, info_ptr->filter_type);
return (1);
}
| 3,459 |
79,707 | 0 | static int r_bin_mdmp_init(struct r_bin_mdmp_obj *obj) {
r_bin_mdmp_init_parsing (obj);
if (!r_bin_mdmp_init_hdr (obj)) {
eprintf ("[ERROR] Failed to initialise header\n");
return false;
}
if (!r_bin_mdmp_init_directory (obj)) {
eprintf ("[ERROR] Failed to initialise directory structures!\n");
return false;
}
if (!r_bin_mdmp_init_pe_bins (obj)) {
eprintf ("[ERROR] Failed to initialise pe binaries!\n");
return false;
}
return true;
}
| 3,460 |
4,319 | 0 | ZEND_API void php_get_highlight_struct(zend_syntax_highlighter_ini *syntax_highlighter_ini) /* {{{ */
{
syntax_highlighter_ini->highlight_comment = INI_STR("highlight.comment");
syntax_highlighter_ini->highlight_default = INI_STR("highlight.default");
syntax_highlighter_ini->highlight_html = INI_STR("highlight.html");
syntax_highlighter_ini->highlight_keyword = INI_STR("highlight.keyword");
syntax_highlighter_ini->highlight_string = INI_STR("highlight.string");
}
/* }}} */
| 3,461 |
164,018 | 0 | CreateDownloadURLLoaderFactoryGetter(StoragePartitionImpl* storage_partition,
RenderFrameHost* rfh,
bool is_download) {
network::mojom::URLLoaderFactoryPtrInfo proxy_factory_ptr_info;
network::mojom::URLLoaderFactoryRequest proxy_factory_request;
if (rfh) {
bool should_proxy = false;
network::mojom::URLLoaderFactoryPtrInfo maybe_proxy_factory_ptr_info;
network::mojom::URLLoaderFactoryRequest maybe_proxy_factory_request =
MakeRequest(&maybe_proxy_factory_ptr_info);
should_proxy = devtools_instrumentation::WillCreateURLLoaderFactory(
static_cast<RenderFrameHostImpl*>(rfh), true, is_download,
&maybe_proxy_factory_request);
should_proxy |= GetContentClient()->browser()->WillCreateURLLoaderFactory(
rfh->GetSiteInstance()->GetBrowserContext(), rfh,
rfh->GetProcess()->GetID(), false /* is_navigation */,
true /* is_download/ */, url::Origin(), &maybe_proxy_factory_request,
nullptr /* header_client */, nullptr /* bypass_redirect_checks */);
if (should_proxy) {
proxy_factory_ptr_info = std::move(maybe_proxy_factory_ptr_info);
proxy_factory_request = std::move(maybe_proxy_factory_request);
}
}
return base::MakeRefCounted<NetworkDownloadURLLoaderFactoryGetter>(
storage_partition->url_loader_factory_getter(),
std::move(proxy_factory_ptr_info), std::move(proxy_factory_request));
}
| 3,462 |
15,605 | 0 | vmxnet3_rx_filter_may_indicate(VMXNET3State *s, const void *data,
size_t size)
{
struct eth_header *ehdr = PKT_GET_ETH_HDR(data);
if (VMXNET_FLAG_IS_SET(s->rx_mode, VMXNET3_RXM_PROMISC)) {
return true;
}
if (!vmxnet3_is_registered_vlan(s, data)) {
return false;
}
switch (vmxnet_rx_pkt_get_packet_type(s->rx_pkt)) {
case ETH_PKT_UCAST:
if (!VMXNET_FLAG_IS_SET(s->rx_mode, VMXNET3_RXM_UCAST)) {
return false;
}
if (memcmp(s->conf.macaddr.a, ehdr->h_dest, ETH_ALEN)) {
return false;
}
break;
case ETH_PKT_BCAST:
if (!VMXNET_FLAG_IS_SET(s->rx_mode, VMXNET3_RXM_BCAST)) {
return false;
}
break;
case ETH_PKT_MCAST:
if (VMXNET_FLAG_IS_SET(s->rx_mode, VMXNET3_RXM_ALL_MULTI)) {
return true;
}
if (!VMXNET_FLAG_IS_SET(s->rx_mode, VMXNET3_RXM_MCAST)) {
return false;
}
if (!vmxnet3_is_allowed_mcast_group(s, ehdr->h_dest)) {
return false;
}
break;
default:
g_assert_not_reached();
}
return true;
}
| 3,463 |
161,970 | 0 | bool PrintRenderFrameHelper::UpdatePrintSettings(
blink::WebLocalFrame* frame,
const blink::WebNode& node,
const base::DictionaryValue& passed_job_settings) {
const base::DictionaryValue* job_settings = &passed_job_settings;
base::DictionaryValue modified_job_settings;
if (job_settings->empty()) {
print_preview_context_.set_error(PREVIEW_ERROR_BAD_SETTING);
return false;
}
bool source_is_html = !PrintingNodeOrPdfFrame(frame, node);
if (!source_is_html) {
modified_job_settings.MergeDictionary(job_settings);
modified_job_settings.SetBoolean(kSettingHeaderFooterEnabled, false);
modified_job_settings.SetInteger(kSettingMarginsType, NO_MARGINS);
job_settings = &modified_job_settings;
}
int cookie =
print_pages_params_ ? print_pages_params_->params.document_cookie : 0;
PrintMsg_PrintPages_Params settings;
bool canceled = false;
Send(new PrintHostMsg_UpdatePrintSettings(routing_id(), cookie, *job_settings,
&settings, &canceled));
if (canceled) {
notify_browser_of_print_failure_ = false;
return false;
}
if (!job_settings->GetInteger(kPreviewUIID, &settings.params.preview_ui_id)) {
NOTREACHED();
print_preview_context_.set_error(PREVIEW_ERROR_BAD_SETTING);
return false;
}
if (!job_settings->GetInteger(kPreviewRequestID,
&settings.params.preview_request_id) ||
!job_settings->GetBoolean(kIsFirstRequest,
&settings.params.is_first_request)) {
NOTREACHED();
print_preview_context_.set_error(PREVIEW_ERROR_BAD_SETTING);
return false;
}
settings.params.print_to_pdf = IsPrintToPdfRequested(*job_settings);
UpdateFrameMarginsCssInfo(*job_settings);
settings.params.print_scaling_option = GetPrintScalingOption(
frame, node, source_is_html, *job_settings, settings.params);
SetPrintPagesParams(settings);
if (PrintMsg_Print_Params_IsValid(settings.params))
return true;
print_preview_context_.set_error(PREVIEW_ERROR_INVALID_PRINTER_SETTINGS);
return false;
}
| 3,464 |
36,057 | 0 | int8_t udf_next_aext(struct inode *inode, struct extent_position *epos,
struct kernel_lb_addr *eloc, uint32_t *elen, int inc)
{
int8_t etype;
while ((etype = udf_current_aext(inode, epos, eloc, elen, inc)) ==
(EXT_NEXT_EXTENT_ALLOCDECS >> 30)) {
int block;
epos->block = *eloc;
epos->offset = sizeof(struct allocExtDesc);
brelse(epos->bh);
block = udf_get_lb_pblock(inode->i_sb, &epos->block, 0);
epos->bh = udf_tread(inode->i_sb, block);
if (!epos->bh) {
udf_debug("reading block %d failed!\n", block);
return -1;
}
}
return etype;
}
| 3,465 |
168,212 | 0 | void WebBluetoothServiceImpl::SetClientConnectionErrorHandler(
base::OnceClosure closure) {
binding_.set_connection_error_handler(std::move(closure));
}
| 3,466 |
20,484 | 0 | static void ext4_i_callback(struct rcu_head *head)
{
struct inode *inode = container_of(head, struct inode, i_rcu);
INIT_LIST_HEAD(&inode->i_dentry);
kmem_cache_free(ext4_inode_cachep, EXT4_I(inode));
}
| 3,467 |
29,905 | 0 | kgid_t make_kgid(struct user_namespace *ns, gid_t gid)
{
/* Map the gid to a global kernel gid */
return KGIDT_INIT(map_id_down(&ns->gid_map, gid));
}
| 3,468 |
35,532 | 0 | static int em_jmp_far(struct x86_emulate_ctxt *ctxt)
{
int rc;
unsigned short sel, old_sel;
struct desc_struct old_desc, new_desc;
const struct x86_emulate_ops *ops = ctxt->ops;
u8 cpl = ctxt->ops->cpl(ctxt);
/* Assignment of RIP may only fail in 64-bit mode */
if (ctxt->mode == X86EMUL_MODE_PROT64)
ops->get_segment(ctxt, &old_sel, &old_desc, NULL,
VCPU_SREG_CS);
memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
rc = __load_segment_descriptor(ctxt, sel, VCPU_SREG_CS, cpl, false,
&new_desc);
if (rc != X86EMUL_CONTINUE)
return rc;
rc = assign_eip_far(ctxt, ctxt->src.val, new_desc.l);
if (rc != X86EMUL_CONTINUE) {
WARN_ON(!ctxt->mode != X86EMUL_MODE_PROT64);
/* assigning eip failed; restore the old cs */
ops->set_segment(ctxt, old_sel, &old_desc, 0, VCPU_SREG_CS);
return rc;
}
return rc;
}
| 3,469 |
21,108 | 0 | void mem_cgroup_print_bad_page(struct page *page)
{
struct page_cgroup *pc;
pc = lookup_page_cgroup_used(page);
if (pc) {
printk(KERN_ALERT "pc:%p pc->flags:%lx pc->mem_cgroup:%p\n",
pc, pc->flags, pc->mem_cgroup);
}
}
| 3,470 |
132,762 | 0 | protocol::VideoStub* PepperVideoRenderer2D::GetVideoStub() {
DCHECK(CalledOnValidThread());
return software_video_renderer_->GetVideoStub();
}
| 3,471 |
68,640 | 0 | lzh_decode(struct lzh_stream *strm, int last)
{
struct lzh_dec *ds = strm->ds;
int avail_in;
int r;
if (ds->error)
return (ds->error);
avail_in = strm->avail_in;
do {
if (ds->state < ST_GET_LITERAL)
r = lzh_read_blocks(strm, last);
else
r = lzh_decode_blocks(strm, last);
} while (r == 100);
strm->total_in += avail_in - strm->avail_in;
return (r);
}
| 3,472 |
70,509 | 0 | static void yy_flex_strncpy (char* s1, yyconst char * s2, int n , yyscan_t yyscanner)
{
struct yyguts_t * yyg = (struct yyguts_t*)yyscanner;
(void)yyg;
int i;
for ( i = 0; i < n; ++i )
s1[i] = s2[i];
}
| 3,473 |
134,335 | 0 | void TabStrip::GenerateIdealBounds() {
int new_tab_y = 0;
if (touch_layout_.get()) {
if (tabs_.view_size() == 0)
return;
int new_tab_x = tabs_.ideal_bounds(tabs_.view_size() - 1).right() +
newtab_button_h_offset();
newtab_button_bounds_.set_origin(gfx::Point(new_tab_x, new_tab_y));
return;
}
double unselected, selected;
GetDesiredTabWidths(tab_count(), GetMiniTabCount(), &unselected, &selected);
current_unselected_width_ = unselected;
current_selected_width_ = selected;
int tab_height = Tab::GetStandardSize().height();
int first_non_mini_index = 0;
double tab_x = GenerateIdealBoundsForMiniTabs(&first_non_mini_index);
for (int i = first_non_mini_index; i < tab_count(); ++i) {
Tab* tab = tab_at(i);
DCHECK(!tab->data().mini);
double tab_width = tab->IsActive() ? selected : unselected;
double end_of_tab = tab_x + tab_width;
int rounded_tab_x = Round(tab_x);
set_ideal_bounds(
i,
gfx::Rect(rounded_tab_x, 0, Round(end_of_tab) - rounded_tab_x,
tab_height));
tab_x = end_of_tab + tab_h_offset();
}
int new_tab_x;
if (abs(Round(unselected) - Tab::GetStandardSize().width()) > 1 &&
!in_tab_close_) {
new_tab_x = width() - newtab_button_bounds_.width();
} else {
new_tab_x = Round(tab_x - tab_h_offset()) + newtab_button_h_offset();
}
newtab_button_bounds_.set_origin(gfx::Point(new_tab_x, new_tab_y));
}
| 3,474 |
154,233 | 0 | error::Error GLES2DecoderImpl::HandleGetProgramResourceIndex(
uint32_t immediate_data_size,
const volatile void* cmd_data) {
return error::kUnknownCommand;
}
| 3,475 |
37,540 | 0 | static u64 mmu_spte_get_lockless(u64 *sptep)
{
return __get_spte_lockless(sptep);
}
| 3,476 |
138,378 | 0 | void ServiceManagerConnection::SetFactoryForTest(Factory* factory) {
DCHECK(!g_connection_for_process.Get());
service_manager_connection_factory = factory;
}
| 3,477 |
49,750 | 0 | static void arcmsr_free_ccb_pool(struct AdapterControlBlock *acb)
{
dma_free_coherent(&acb->pdev->dev, acb->uncache_size, acb->dma_coherent, acb->dma_coherent_handle);
}
| 3,478 |
142,693 | 0 | void FrameLoader::loadInSameDocument(const KURL& url, PassRefPtr<SerializedScriptValue> stateObject, FrameLoadType frameLoadType, HistoryLoadType historyLoadType, ClientRedirectPolicy clientRedirect, Document* initiatingDocument)
{
ASSERT(!stateObject || frameLoadType == FrameLoadTypeBackForward);
detachDocumentLoader(m_provisionalDocumentLoader);
if (!m_frame->host())
return;
TemporaryChange<FrameLoadType> loadTypeChange(m_loadType, frameLoadType);
saveScrollState();
KURL oldURL = m_frame->document()->url();
bool hashChange = equalIgnoringFragmentIdentifier(url, oldURL) && url.fragmentIdentifier() != oldURL.fragmentIdentifier();
if (hashChange) {
m_frame->eventHandler().stopAutoscroll();
m_frame->localDOMWindow()->enqueueHashchangeEvent(oldURL, url);
}
m_documentLoader->setIsClientRedirect(clientRedirect == ClientRedirectPolicy::ClientRedirect);
updateForSameDocumentNavigation(url, SameDocumentNavigationDefault, nullptr, ScrollRestorationAuto, frameLoadType, initiatingDocument);
m_documentLoader->initialScrollState().wasScrolledByUser = false;
checkCompleted();
m_frame->localDOMWindow()->statePopped(stateObject ? stateObject : SerializedScriptValue::nullValue());
if (historyLoadType == HistorySameDocumentLoad)
restoreScrollPositionAndViewState();
processFragment(url, NavigationWithinSameDocument);
takeObjectSnapshot();
}
| 3,479 |
175,390 | 0 | int Reverb_init(ReverbContext *pContext){
ALOGV("\tReverb_init start");
CHECK_ARG(pContext != NULL);
if (pContext->hInstance != NULL){
Reverb_free(pContext);
}
pContext->config.inputCfg.accessMode = EFFECT_BUFFER_ACCESS_READ;
if (pContext->auxiliary) {
pContext->config.inputCfg.channels = AUDIO_CHANNEL_OUT_MONO;
} else {
pContext->config.inputCfg.channels = AUDIO_CHANNEL_OUT_STEREO;
}
pContext->config.inputCfg.format = AUDIO_FORMAT_PCM_16_BIT;
pContext->config.inputCfg.samplingRate = 44100;
pContext->config.inputCfg.bufferProvider.getBuffer = NULL;
pContext->config.inputCfg.bufferProvider.releaseBuffer = NULL;
pContext->config.inputCfg.bufferProvider.cookie = NULL;
pContext->config.inputCfg.mask = EFFECT_CONFIG_ALL;
pContext->config.outputCfg.accessMode = EFFECT_BUFFER_ACCESS_ACCUMULATE;
pContext->config.outputCfg.channels = AUDIO_CHANNEL_OUT_STEREO;
pContext->config.outputCfg.format = AUDIO_FORMAT_PCM_16_BIT;
pContext->config.outputCfg.samplingRate = 44100;
pContext->config.outputCfg.bufferProvider.getBuffer = NULL;
pContext->config.outputCfg.bufferProvider.releaseBuffer = NULL;
pContext->config.outputCfg.bufferProvider.cookie = NULL;
pContext->config.outputCfg.mask = EFFECT_CONFIG_ALL;
pContext->leftVolume = REVERB_UNIT_VOLUME;
pContext->rightVolume = REVERB_UNIT_VOLUME;
pContext->prevLeftVolume = REVERB_UNIT_VOLUME;
pContext->prevRightVolume = REVERB_UNIT_VOLUME;
pContext->volumeMode = REVERB_VOLUME_FLAT;
LVREV_ReturnStatus_en LvmStatus=LVREV_SUCCESS; /* Function call status */
LVREV_ControlParams_st params; /* Control Parameters */
LVREV_InstanceParams_st InstParams; /* Instance parameters */
LVREV_MemoryTable_st MemTab; /* Memory allocation table */
bool bMallocFailure = LVM_FALSE;
/* Set the capabilities */
InstParams.MaxBlockSize = MAX_CALL_SIZE;
InstParams.SourceFormat = LVM_STEREO; // Max format, could be mono during process
InstParams.NumDelays = LVREV_DELAYLINES_4;
/* Allocate memory, forcing alignment */
LvmStatus = LVREV_GetMemoryTable(LVM_NULL,
&MemTab,
&InstParams);
LVM_ERROR_CHECK(LvmStatus, "LVREV_GetMemoryTable", "Reverb_init")
if(LvmStatus != LVREV_SUCCESS) return -EINVAL;
ALOGV("\tCreateInstance Succesfully called LVM_GetMemoryTable\n");
/* Allocate memory */
for (int i=0; i<LVM_NR_MEMORY_REGIONS; i++){
if (MemTab.Region[i].Size != 0){
MemTab.Region[i].pBaseAddress = malloc(MemTab.Region[i].Size);
if (MemTab.Region[i].pBaseAddress == LVM_NULL){
ALOGV("\tLVREV_ERROR :Reverb_init CreateInstance Failed to allocate %" PRIu32
" bytes for region %u\n", MemTab.Region[i].Size, i );
bMallocFailure = LVM_TRUE;
}else{
ALOGV("\tReverb_init CreateInstance allocate %" PRIu32
" bytes for region %u at %p\n",
MemTab.Region[i].Size, i, MemTab.Region[i].pBaseAddress);
}
}
}
/* If one or more of the memory regions failed to allocate, free the regions that were
* succesfully allocated and return with an error
*/
if(bMallocFailure == LVM_TRUE){
for (int i=0; i<LVM_NR_MEMORY_REGIONS; i++){
if (MemTab.Region[i].pBaseAddress == LVM_NULL){
ALOGV("\tLVM_ERROR :Reverb_init CreateInstance Failed to allocate %" PRIu32
" bytes for region %u - Not freeing\n", MemTab.Region[i].Size, i );
}else{
ALOGV("\tLVM_ERROR :Reverb_init CreateInstance Failed: but allocated %" PRIu32
" bytes for region %u at %p- free\n",
MemTab.Region[i].Size, i, MemTab.Region[i].pBaseAddress);
free(MemTab.Region[i].pBaseAddress);
}
}
return -EINVAL;
}
ALOGV("\tReverb_init CreateInstance Succesfully malloc'd memory\n");
/* Initialise */
pContext->hInstance = LVM_NULL;
/* Init sets the instance handle */
LvmStatus = LVREV_GetInstanceHandle(&pContext->hInstance,
&MemTab,
&InstParams);
LVM_ERROR_CHECK(LvmStatus, "LVM_GetInstanceHandle", "Reverb_init")
if(LvmStatus != LVREV_SUCCESS) return -EINVAL;
ALOGV("\tReverb_init CreateInstance Succesfully called LVM_GetInstanceHandle\n");
/* Set the initial process parameters */
/* General parameters */
params.OperatingMode = LVM_MODE_ON;
params.SampleRate = LVM_FS_44100;
pContext->SampleRate = LVM_FS_44100;
if(pContext->config.inputCfg.channels == AUDIO_CHANNEL_OUT_MONO){
params.SourceFormat = LVM_MONO;
} else {
params.SourceFormat = LVM_STEREO;
}
/* Reverb parameters */
params.Level = 0;
params.LPF = 23999;
params.HPF = 50;
params.T60 = 1490;
params.Density = 100;
params.Damping = 21;
params.RoomSize = 100;
pContext->SamplesToExitCount = (params.T60 * pContext->config.inputCfg.samplingRate)/1000;
/* Saved strength is used to return the exact strength that was used in the set to the get
* because we map the original strength range of 0:1000 to 1:15, and this will avoid
* quantisation like effect when returning
*/
pContext->SavedRoomLevel = -6000;
pContext->SavedHfLevel = 0;
pContext->bEnabled = LVM_FALSE;
pContext->SavedDecayTime = params.T60;
pContext->SavedDecayHfRatio = params.Damping*20;
pContext->SavedDensity = params.RoomSize*10;
pContext->SavedDiffusion = params.Density*10;
pContext->SavedReverbLevel = -6000;
/* Activate the initial settings */
LvmStatus = LVREV_SetControlParameters(pContext->hInstance,
¶ms);
LVM_ERROR_CHECK(LvmStatus, "LVREV_SetControlParameters", "Reverb_init")
if(LvmStatus != LVREV_SUCCESS) return -EINVAL;
ALOGV("\tReverb_init CreateInstance Succesfully called LVREV_SetControlParameters\n");
ALOGV("\tReverb_init End");
return 0;
} /* end Reverb_init */
| 3,480 |
155,597 | 0 | const gfx::VectorIcon& AuthenticatorBlePinEntrySheetModel::GetStepIllustration(
ImageColorScheme color_scheme) const {
return color_scheme == ImageColorScheme::kDark ? kWebauthnBlePinDarkIcon
: kWebauthnBlePinIcon;
}
| 3,481 |
170,426 | 0 | void Parcel::closeFileDescriptors()
{
size_t i = mObjectsSize;
if (i > 0) {
}
while (i > 0) {
i--;
const flat_binder_object* flat
= reinterpret_cast<flat_binder_object*>(mData+mObjects[i]);
if (flat->type == BINDER_TYPE_FD) {
close(flat->handle);
}
}
}
| 3,482 |
16,864 | 0 | static int bdrv_file_open(BlockDriverState *bs, const char *filename,
QDict **options, int flags, Error **errp)
{
BlockDriver *drv;
const char *drvname;
bool allow_protocol_prefix = false;
Error *local_err = NULL;
int ret;
/* Fetch the file name from the options QDict if necessary */
if (!filename) {
filename = qdict_get_try_str(*options, "filename");
} else if (filename && !qdict_haskey(*options, "filename")) {
qdict_put(*options, "filename", qstring_from_str(filename));
allow_protocol_prefix = true;
} else {
error_setg(errp, "Can't specify 'file' and 'filename' options at the "
"same time");
ret = -EINVAL;
goto fail;
}
/* Find the right block driver */
drvname = qdict_get_try_str(*options, "driver");
if (drvname) {
drv = bdrv_find_format(drvname);
if (!drv) {
error_setg(errp, "Unknown driver '%s'", drvname);
}
qdict_del(*options, "driver");
} else if (filename) {
drv = bdrv_find_protocol(filename, allow_protocol_prefix);
if (!drv) {
error_setg(errp, "Unknown protocol");
}
} else {
error_setg(errp, "Must specify either driver or file");
drv = NULL;
}
if (!drv) {
/* errp has been set already */
ret = -ENOENT;
goto fail;
}
/* Parse the filename and open it */
if (drv->bdrv_parse_filename && filename) {
drv->bdrv_parse_filename(filename, *options, &local_err);
if (local_err) {
error_propagate(errp, local_err);
ret = -EINVAL;
goto fail;
}
if (!drv->bdrv_needs_filename) {
qdict_del(*options, "filename");
} else {
filename = qdict_get_str(*options, "filename");
}
}
if (!drv->bdrv_file_open) {
ret = bdrv_open(&bs, filename, NULL, *options, flags, drv, &local_err);
*options = NULL;
} else {
ret = bdrv_open_common(bs, NULL, *options, flags, drv, &local_err);
}
if (ret < 0) {
error_propagate(errp, local_err);
goto fail;
}
bs->growable = 1;
return 0;
fail:
return ret;
}
| 3,483 |
4,281 | 0 | PHP_FUNCTION(ini_set)
{
char *varname, *new_value;
int varname_len, new_value_len;
char *old_value;
if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "ss", &varname, &varname_len, &new_value, &new_value_len) == FAILURE) {
return;
}
old_value = zend_ini_string(varname, varname_len + 1, 0);
/* copy to return here, because alter might free it! */
if (old_value) {
RETVAL_STRING(old_value, 1);
} else {
RETVAL_FALSE;
}
#define _CHECK_PATH(var, var_len, ini) php_ini_check_path(var, var_len, ini, sizeof(ini))
/* open basedir check */
if (PG(open_basedir)) {
if (_CHECK_PATH(varname, varname_len, "error_log") ||
_CHECK_PATH(varname, varname_len, "java.class.path") ||
_CHECK_PATH(varname, varname_len, "java.home") ||
_CHECK_PATH(varname, varname_len, "mail.log") ||
_CHECK_PATH(varname, varname_len, "java.library.path") ||
_CHECK_PATH(varname, varname_len, "vpopmail.directory")) {
if (php_check_open_basedir(new_value TSRMLS_CC)) {
zval_dtor(return_value);
RETURN_FALSE;
}
}
}
if (zend_alter_ini_entry_ex(varname, varname_len + 1, new_value, new_value_len, PHP_INI_USER, PHP_INI_STAGE_RUNTIME, 0 TSRMLS_CC) == FAILURE) {
zval_dtor(return_value);
RETURN_FALSE;
}
}
| 3,484 |
121,861 | 0 | IOThread::IOThread(
PrefService* local_state,
policy::PolicyService* policy_service,
ChromeNetLog* net_log,
extensions::EventRouterForwarder* extension_event_router_forwarder)
: net_log_(net_log),
extension_event_router_forwarder_(extension_event_router_forwarder),
globals_(NULL),
sdch_manager_(NULL),
is_spdy_disabled_by_policy_(false),
weak_factory_(this) {
#if !defined(OS_IOS) && !defined(OS_ANDROID)
#if defined(OS_WIN)
if (!win8::IsSingleWindowMetroMode())
net::ProxyResolverV8::RememberDefaultIsolate();
else
net::ProxyResolverV8::CreateIsolate();
#else
net::ProxyResolverV8::RememberDefaultIsolate();
#endif
#endif
auth_schemes_ = local_state->GetString(prefs::kAuthSchemes);
negotiate_disable_cname_lookup_ = local_state->GetBoolean(
prefs::kDisableAuthNegotiateCnameLookup);
negotiate_enable_port_ = local_state->GetBoolean(
prefs::kEnableAuthNegotiatePort);
auth_server_whitelist_ = local_state->GetString(prefs::kAuthServerWhitelist);
auth_delegate_whitelist_ = local_state->GetString(
prefs::kAuthNegotiateDelegateWhitelist);
gssapi_library_name_ = local_state->GetString(prefs::kGSSAPILibraryName);
pref_proxy_config_tracker_.reset(
ProxyServiceFactory::CreatePrefProxyConfigTrackerOfLocalState(
local_state));
ChromeNetworkDelegate::InitializePrefsOnUIThread(
&system_enable_referrers_,
NULL,
NULL,
local_state);
ssl_config_service_manager_.reset(
SSLConfigServiceManager::CreateDefaultManager(local_state));
base::Value* dns_client_enabled_default = new base::FundamentalValue(
chrome_browser_net::ConfigureAsyncDnsFieldTrial());
local_state->SetDefaultPrefValue(prefs::kBuiltInDnsClientEnabled,
dns_client_enabled_default);
dns_client_enabled_.Init(prefs::kBuiltInDnsClientEnabled,
local_state,
base::Bind(&IOThread::UpdateDnsClientEnabled,
base::Unretained(this)));
dns_client_enabled_.MoveToThread(
BrowserThread::GetMessageLoopProxyForThread(BrowserThread::IO));
#if defined(ENABLE_CONFIGURATION_POLICY)
is_spdy_disabled_by_policy_ = policy_service->GetPolicies(
policy::PolicyNamespace(policy::POLICY_DOMAIN_CHROME, std::string())).Get(
policy::key::kDisableSpdy) != NULL;
#endif // ENABLE_CONFIGURATION_POLICY
BrowserThread::SetDelegate(BrowserThread::IO, this);
}
| 3,485 |
84,261 | 0 | static void futex_wait_queue_me(struct futex_hash_bucket *hb, struct futex_q *q,
struct hrtimer_sleeper *timeout)
{
/*
* The task state is guaranteed to be set before another task can
* wake it. set_current_state() is implemented using smp_store_mb() and
* queue_me() calls spin_unlock() upon completion, both serializing
* access to the hash list and forcing another memory barrier.
*/
set_current_state(TASK_INTERRUPTIBLE);
queue_me(q, hb);
/* Arm the timer */
if (timeout)
hrtimer_start_expires(&timeout->timer, HRTIMER_MODE_ABS);
/*
* If we have been removed from the hash list, then another task
* has tried to wake us, and we can skip the call to schedule().
*/
if (likely(!plist_node_empty(&q->list))) {
/*
* If the timer has already expired, current will already be
* flagged for rescheduling. Only call schedule if there
* is no timeout, or if it has yet to expire.
*/
if (!timeout || timeout->task)
freezable_schedule();
}
__set_current_state(TASK_RUNNING);
}
| 3,486 |
124,696 | 0 | TextRun RenderBlockFlow::constructTextRun(RenderObject* context, const Font& font, const RenderText* text, RenderStyle* style, TextDirection direction, TextRun::ExpansionBehavior expansion)
{
if (text->is8Bit())
return constructTextRunInternal(context, font, text->characters8(), text->textLength(), style, direction, expansion);
return constructTextRunInternal(context, font, text->characters16(), text->textLength(), style, direction, expansion);
}
| 3,487 |
20,666 | 0 | static bool emulator_get_cpuid(struct x86_emulate_ctxt *ctxt,
u32 *eax, u32 *ebx, u32 *ecx, u32 *edx)
{
struct kvm_cpuid_entry2 *cpuid = NULL;
if (eax && ecx)
cpuid = kvm_find_cpuid_entry(emul_to_vcpu(ctxt),
*eax, *ecx);
if (cpuid) {
*eax = cpuid->eax;
*ecx = cpuid->ecx;
if (ebx)
*ebx = cpuid->ebx;
if (edx)
*edx = cpuid->edx;
return true;
}
return false;
}
| 3,488 |
67,605 | 0 | MODRET set_allowemptypasswords(cmd_rec *cmd) {
int allow_empty_passwords = -1;
config_rec *c = NULL;
CHECK_ARGS(cmd, 1);
CHECK_CONF(cmd, CONF_ROOT|CONF_VIRTUAL|CONF_GLOBAL|CONF_ANON);
allow_empty_passwords = get_boolean(cmd, 1);
if (allow_empty_passwords == -1) {
CONF_ERROR(cmd, "expected Boolean parameter");
}
c = add_config_param(cmd->argv[0], 1, NULL);
c->argv[0] = pcalloc(c->pool, sizeof(int));
*((int *) c->argv[0]) = allow_empty_passwords;
c->flags |= CF_MERGEDOWN;
return PR_HANDLED(cmd);
}
| 3,489 |
4,730 | 0 | user_extension_get_value (User *user,
GDBusInterfaceInfo *interface,
const GDBusPropertyInfo *property)
{
const GVariantType *type = G_VARIANT_TYPE (property->signature);
GVariant *value;
g_autofree gchar *printed = NULL;
gint i;
/* First, try to get the value from the keyfile */
printed = g_key_file_get_value (user->keyfile, interface->name, property->name, NULL);
if (printed) {
value = g_variant_parse (type, printed, NULL, NULL, NULL);
if (value != NULL)
return value;
}
/* If that didn't work, try for a default value annotation */
for (i = 0; property->annotations && property->annotations[i]; i++) {
GDBusAnnotationInfo *annotation = property->annotations[i];
if (g_str_equal (annotation->key, "org.freedesktop.Accounts.DefaultValue.String")) {
if (g_str_equal (property->signature, "s"))
return g_variant_ref_sink (g_variant_new_string (annotation->value));
}
else if (g_str_equal (annotation->key, "org.freedesktop.Accounts.DefaultValue")) {
value = g_variant_parse (type, annotation->value, NULL, NULL, NULL);
if (value != NULL)
return value;
}
}
/* Nothing found... */
return NULL;
}
| 3,490 |
40,347 | 0 | static int bt_seq_open(struct inode *inode, struct file *file)
{
struct bt_sock_list *sk_list;
struct bt_seq_state *s;
sk_list = PDE_DATA(inode);
s = __seq_open_private(file, &bt_seq_ops,
sizeof(struct bt_seq_state));
if (!s)
return -ENOMEM;
s->l = sk_list;
return 0;
}
| 3,491 |
112,014 | 0 | void SyncTest::TearDownInProcessBrowserTestFixture() {
mock_host_resolver_override_.reset();
}
| 3,492 |
10,875 | 0 | des_cipher(const char *in, char *out, uint32_t salt, int count,
struct php_crypt_extended_data *data)
{
uint32_t l_out, r_out, rawl, rawr;
int retval;
setup_salt(salt, data);
rawl =
(uint32_t)(u_char)in[3] |
((uint32_t)(u_char)in[2] << 8) |
((uint32_t)(u_char)in[1] << 16) |
((uint32_t)(u_char)in[0] << 24);
rawr =
(uint32_t)(u_char)in[7] |
((uint32_t)(u_char)in[6] << 8) |
((uint32_t)(u_char)in[5] << 16) |
((uint32_t)(u_char)in[4] << 24);
retval = do_des(rawl, rawr, &l_out, &r_out, count, data);
out[0] = l_out >> 24;
out[1] = l_out >> 16;
out[2] = l_out >> 8;
out[3] = l_out;
out[4] = r_out >> 24;
out[5] = r_out >> 16;
out[6] = r_out >> 8;
out[7] = r_out;
return(retval);
}
| 3,493 |
95,884 | 0 | void CL_MapRestart_f( void ) {
if ( !com_cl_running ) {
return;
}
if ( !com_cl_running->integer ) {
return;
}
Com_Printf( "This command is no longer functional.\nUse \"loadgame current\" to load the current map." );
}
| 3,494 |
77,001 | 0 | ofpact_put_raw(struct ofpbuf *buf, enum ofp_version ofp_version,
enum ofp_raw_action_type raw, uint64_t arg)
{
const struct ofpact_raw_instance *inst;
struct ofp_action_header *oah;
const struct ofpact_hdrs *hdrs;
inst = ofpact_raw_lookup(ofp_version, raw);
hdrs = &inst->hdrs;
oah = ofpbuf_put_zeros(buf, inst->min_length);
oah->type = htons(hdrs->vendor ? OFPAT_VENDOR : hdrs->type);
oah->len = htons(inst->min_length);
oah->vendor = htonl(hdrs->vendor);
switch (hdrs->vendor) {
case 0:
break;
case NX_VENDOR_ID:
case ONF_VENDOR_ID: {
struct ext_action_header *nah = (struct ext_action_header *) oah;
nah->subtype = htons(hdrs->type);
break;
}
default:
OVS_NOT_REACHED();
}
if (inst->arg_len) {
uint8_t *p = (uint8_t *) oah + inst->arg_ofs + inst->arg_len;
int i;
for (i = 0; i < inst->arg_len; i++) {
*--p = arg;
arg >>= 8;
}
} else {
ovs_assert(!arg);
}
return oah;
}
| 3,495 |
186,621 | 1 | void HttpAuthFilterWhitelist::SetWhitelist(
const std::string& server_whitelist) {
rules_.ParseFromString(server_whitelist);
}
| 3,496 |
142,084 | 0 | void CGaiaCredentialBase::TellOmahaDidRun() {
#if defined(GOOGLE_CHROME_BUILD)
base::win::RegKey key;
LONG sts = key.Create(HKEY_CURRENT_USER, kRegUpdaterClientStateAppPath,
KEY_SET_VALUE | KEY_WOW64_32KEY);
if (sts != ERROR_SUCCESS) {
LOGFN(INFO) << "Unable to open omaha key sts=" << sts;
} else {
sts = key.WriteValue(L"dr", L"1");
if (sts != ERROR_SUCCESS)
LOGFN(INFO) << "Unable to write omaha dr value sts=" << sts;
}
#endif // defined(GOOGLE_CHROME_BUILD)
}
| 3,497 |
39,898 | 0 | static void skb_headers_offset_update(struct sk_buff *skb, int off)
{
/* Only adjust this if it actually is csum_start rather than csum */
if (skb->ip_summed == CHECKSUM_PARTIAL)
skb->csum_start += off;
/* {transport,network,mac}_header and tail are relative to skb->head */
skb->transport_header += off;
skb->network_header += off;
if (skb_mac_header_was_set(skb))
skb->mac_header += off;
skb->inner_transport_header += off;
skb->inner_network_header += off;
skb->inner_mac_header += off;
}
| 3,498 |
137,964 | 0 | bool AXLayoutObject::isVisited() const {
return m_layoutObject->style()->isLink() &&
m_layoutObject->style()->insideLink() ==
EInsideLink::kInsideVisitedLink;
}
| 3,499 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.