unique_id
int64 13
189k
| target
int64 0
1
| code
stringlengths 20
241k
| __index_level_0__
int64 0
18.9k
|
---|---|---|---|
165,642 | 0 | std::wstring GetBaseAppName() {
return InstallDetails::Get().mode().base_app_name;
}
| 600 |
128,190 | 0 | void Notification::dispatchCloseEvent()
{
if (m_state != NotificationStateShowing && m_state != NotificationStateClosing)
return;
m_state = NotificationStateClosed;
dispatchEvent(Event::create(EventTypeNames::close));
}
| 601 |
173,682 | 0 | void ATSParser::Program::signalDiscontinuity(
DiscontinuityType type, const sp<AMessage> &extra) {
int64_t mediaTimeUs;
if ((type & DISCONTINUITY_TIME)
&& extra != NULL
&& extra->findInt64(
IStreamListener::kKeyMediaTimeUs, &mediaTimeUs)) {
mFirstPTSValid = false;
}
for (size_t i = 0; i < mStreams.size(); ++i) {
mStreams.editValueAt(i)->signalDiscontinuity(type, extra);
}
}
| 602 |
53,345 | 0 | static int lex_get(lex_t *lex, json_error_t *error)
{
return stream_get(&lex->stream, error);
}
| 603 |
19,914 | 0 | static int nfs4_get_referral(struct inode *dir, const struct qstr *name,
struct nfs_fattr *fattr, struct nfs_fh *fhandle)
{
int status = -ENOMEM;
struct page *page = NULL;
struct nfs4_fs_locations *locations = NULL;
page = alloc_page(GFP_KERNEL);
if (page == NULL)
goto out;
locations = kmalloc(sizeof(struct nfs4_fs_locations), GFP_KERNEL);
if (locations == NULL)
goto out;
status = nfs4_proc_fs_locations(dir, name, locations, page);
if (status != 0)
goto out;
/* Make sure server returned a different fsid for the referral */
if (nfs_fsid_equal(&NFS_SERVER(dir)->fsid, &locations->fattr.fsid)) {
dprintk("%s: server did not return a different fsid for"
" a referral at %s\n", __func__, name->name);
status = -EIO;
goto out;
}
/* Fixup attributes for the nfs_lookup() call to nfs_fhget() */
nfs_fixup_referral_attributes(&locations->fattr);
/* replace the lookup nfs_fattr with the locations nfs_fattr */
memcpy(fattr, &locations->fattr, sizeof(struct nfs_fattr));
memset(fhandle, 0, sizeof(struct nfs_fh));
out:
if (page)
__free_page(page);
kfree(locations);
return status;
}
| 604 |
120,745 | 0 | void BaseMultipleFieldsDateAndTimeInputType::updateClearButtonVisibility()
{
ClearButtonElement* clearButton = clearButtonElement();
if (!clearButton)
return;
if (element()->isRequired() || !dateTimeEditElement()->anyEditableFieldsHaveValues())
clearButton->setInlineStyleProperty(CSSPropertyVisibility, CSSValueHidden);
else
clearButton->removeInlineStyleProperty(CSSPropertyVisibility);
}
| 605 |
73,371 | 0 | init_render_context(ASS_Renderer *render_priv, ASS_Event *event)
{
render_priv->state.event = event;
render_priv->state.parsed_tags = 0;
render_priv->state.has_clips = 0;
render_priv->state.evt_type = EVENT_NORMAL;
reset_render_context(render_priv, NULL);
render_priv->state.wrap_style = render_priv->track->WrapStyle;
render_priv->state.alignment = render_priv->state.style->Alignment;
render_priv->state.pos_x = 0;
render_priv->state.pos_y = 0;
render_priv->state.org_x = 0;
render_priv->state.org_y = 0;
render_priv->state.have_origin = 0;
render_priv->state.clip_x0 = 0;
render_priv->state.clip_y0 = 0;
render_priv->state.clip_x1 = render_priv->track->PlayResX;
render_priv->state.clip_y1 = render_priv->track->PlayResY;
render_priv->state.clip_mode = 0;
render_priv->state.detect_collisions = 1;
render_priv->state.fade = 0;
render_priv->state.drawing_scale = 0;
render_priv->state.pbo = 0;
render_priv->state.effect_type = EF_NONE;
render_priv->state.effect_timing = 0;
render_priv->state.effect_skip_timing = 0;
apply_transition_effects(render_priv, event);
}
| 606 |
44,290 | 0 | static int fill_thread_core_info(struct elf_thread_core_info *t,
const struct user_regset_view *view,
long signr, size_t *total)
{
unsigned int i;
/*
* NT_PRSTATUS is the one special case, because the regset data
* goes into the pr_reg field inside the note contents, rather
* than being the whole note contents. We fill the reset in here.
* We assume that regset 0 is NT_PRSTATUS.
*/
fill_prstatus(&t->prstatus, t->task, signr);
(void) view->regsets[0].get(t->task, &view->regsets[0],
0, PR_REG_SIZE(t->prstatus.pr_reg),
PR_REG_PTR(&t->prstatus), NULL);
fill_note(&t->notes[0], "CORE", NT_PRSTATUS,
PRSTATUS_SIZE(t->prstatus), &t->prstatus);
*total += notesize(&t->notes[0]);
do_thread_regset_writeback(t->task, &view->regsets[0]);
/*
* Each other regset might generate a note too. For each regset
* that has no core_note_type or is inactive, we leave t->notes[i]
* all zero and we'll know to skip writing it later.
*/
for (i = 1; i < view->n; ++i) {
const struct user_regset *regset = &view->regsets[i];
do_thread_regset_writeback(t->task, regset);
if (regset->core_note_type && regset->get &&
(!regset->active || regset->active(t->task, regset))) {
int ret;
size_t size = regset->n * regset->size;
void *data = kmalloc(size, GFP_KERNEL);
if (unlikely(!data))
return 0;
ret = regset->get(t->task, regset,
0, size, data, NULL);
if (unlikely(ret))
kfree(data);
else {
if (regset->core_note_type != NT_PRFPREG)
fill_note(&t->notes[i], "LINUX",
regset->core_note_type,
size, data);
else {
SET_PR_FPVALID(&t->prstatus, 1);
fill_note(&t->notes[i], "CORE",
NT_PRFPREG, size, data);
}
*total += notesize(&t->notes[i]);
}
}
}
return 1;
}
| 607 |
178,422 | 1 | int BN_hex2bn(BIGNUM **bn, const char *a)
{
BIGNUM *ret = NULL;
BN_ULONG l = 0;
int neg = 0, h, m, i, j, k, c;
int num;
if ((a == NULL) || (*a == '\0'))
return (0);
if (*a == '-') {
neg = 1;
a++;
a++;
}
for (i = 0; isxdigit((unsigned char)a[i]); i++) ;
num = i + neg;
if (bn == NULL)
return (0);
} else {
ret = *bn;
BN_zero(ret);
}
| 608 |
151,285 | 0 | void* AsyncId(unsigned long identifier) {
return reinterpret_cast<void*>((identifier << 1) | 1);
}
| 609 |
43,818 | 0 | make_NegHints(OM_uint32 *minor_status, gss_buffer_t *outbuf)
{
OM_uint32 major_status;
unsigned int tlen = 0;
unsigned int hintNameSize = 0;
unsigned char *ptr;
unsigned char *t;
const char *hintname = "not_defined_in_RFC4178@please_ignore";
const size_t hintname_len = strlen(hintname);
*outbuf = GSS_C_NO_BUFFER;
major_status = GSS_S_FAILURE;
/* Length of DER encoded GeneralString */
tlen = 1 + gssint_der_length_size(hintname_len) + hintname_len;
hintNameSize = tlen;
/* Length of DER encoded hintName */
tlen += 1 + gssint_der_length_size(hintNameSize);
t = gssalloc_malloc(tlen);
if (t == NULL) {
*minor_status = ENOMEM;
goto errout;
}
ptr = t;
*ptr++ = CONTEXT | 0x00; /* hintName identifier */
if (gssint_put_der_length(hintNameSize,
&ptr, tlen - (int)(ptr-t)))
goto errout;
*ptr++ = GENERAL_STRING;
if (gssint_put_der_length(hintname_len, &ptr, tlen - (int)(ptr-t)))
goto errout;
memcpy(ptr, hintname, hintname_len);
ptr += hintname_len;
*outbuf = (gss_buffer_t)malloc(sizeof(gss_buffer_desc));
if (*outbuf == NULL) {
*minor_status = ENOMEM;
goto errout;
}
(*outbuf)->value = (void *)t;
(*outbuf)->length = ptr - t;
t = NULL; /* don't free */
*minor_status = 0;
major_status = GSS_S_COMPLETE;
errout:
if (t != NULL) {
free(t);
}
return (major_status);
}
| 610 |
123,070 | 0 | void RenderWidgetHostImpl::TickActiveSmoothScrollGesture() {
TRACE_EVENT0("input", "RenderWidgetHostImpl::TickActiveSmoothScrollGesture");
tick_active_smooth_scroll_gestures_task_posted_ = false;
if (active_smooth_scroll_gestures_.empty()) {
TRACE_EVENT_INSTANT0("input", "EarlyOut_NoActiveScrollGesture");
return;
}
base::TimeTicks now = TimeTicks::HighResNow();
base::TimeDelta preferred_interval =
base::TimeDelta::FromMilliseconds(kSyntheticScrollMessageIntervalMs);
base::TimeDelta time_until_next_ideal_interval =
(last_smooth_scroll_gestures_tick_time_ + preferred_interval) -
now;
if (time_until_next_ideal_interval.InMilliseconds() > 0) {
TRACE_EVENT_INSTANT1(
"input", "EarlyOut_TickedTooRecently",
"delay", time_until_next_ideal_interval.InMilliseconds());
tick_active_smooth_scroll_gestures_task_posted_ = true;
MessageLoop::current()->PostDelayedTask(
FROM_HERE,
base::Bind(&RenderWidgetHostImpl::TickActiveSmoothScrollGesture,
weak_factory_.GetWeakPtr()),
time_until_next_ideal_interval);
return;
}
last_smooth_scroll_gestures_tick_time_ = now;
std::vector<int> ids_that_are_done;
for (SmoothScrollGestureMap::iterator it =
active_smooth_scroll_gestures_.begin();
it != active_smooth_scroll_gestures_.end();
++it) {
bool active = it->second->ForwardInputEvents(now, this);
if (!active)
ids_that_are_done.push_back(it->first);
}
for(size_t i = 0; i < ids_that_are_done.size(); i++) {
int id = ids_that_are_done[i];
SmoothScrollGestureMap::iterator it =
active_smooth_scroll_gestures_.find(id);
DCHECK(it != active_smooth_scroll_gestures_.end());
active_smooth_scroll_gestures_.erase(it);
Send(new ViewMsg_SmoothScrollCompleted(routing_id_, id));
}
if (!in_process_event_types_.empty())
return;
TRACE_EVENT_INSTANT1("input", "PostTickTask",
"delay", preferred_interval.InMilliseconds());
tick_active_smooth_scroll_gestures_task_posted_ = true;
MessageLoop::current()->PostDelayedTask(
FROM_HERE,
base::Bind(&RenderWidgetHostImpl::TickActiveSmoothScrollGesture,
weak_factory_.GetWeakPtr()),
preferred_interval);
}
| 611 |
68,368 | 0 | void perf_event_mmap(struct vm_area_struct *vma)
{
struct perf_mmap_event mmap_event;
if (!atomic_read(&nr_mmap_events))
return;
mmap_event = (struct perf_mmap_event){
.vma = vma,
/* .file_name */
/* .file_size */
.event_id = {
.header = {
.type = PERF_RECORD_MMAP,
.misc = PERF_RECORD_MISC_USER,
/* .size */
},
/* .pid */
/* .tid */
.start = vma->vm_start,
.len = vma->vm_end - vma->vm_start,
.pgoff = (u64)vma->vm_pgoff << PAGE_SHIFT,
},
/* .maj (attr_mmap2 only) */
/* .min (attr_mmap2 only) */
/* .ino (attr_mmap2 only) */
/* .ino_generation (attr_mmap2 only) */
/* .prot (attr_mmap2 only) */
/* .flags (attr_mmap2 only) */
};
perf_addr_filters_adjust(vma);
perf_event_mmap_event(&mmap_event);
}
| 612 |
136,934 | 0 | double HTMLInputElement::Minimum() const {
return input_type_->Minimum();
}
| 613 |
134,012 | 0 | AppListControllerDelegate* ExtensionAppItem::GetController() {
return AppListService::Get(chrome::GetActiveDesktop())->
GetControllerDelegate();
}
| 614 |
179,643 | 1 | static void ikev2_parent_inI2outR2_continue(struct pluto_crypto_req_cont *pcrc,
struct pluto_crypto_req *r,
err_t ugh)
{
struct dh_continuation *dh = (struct dh_continuation *)pcrc;
struct msg_digest *md = dh->md;
struct state *const st = md->st;
stf_status e;
DBG(DBG_CONTROLMORE,
DBG_log("ikev2 parent inI2outR2: calculating g^{xy}, sending R2"));
if (st == NULL) {
loglog(RC_LOG_SERIOUS,
"%s: Request was disconnected from state",
__FUNCTION__);
if (dh->md)
release_md(dh->md);
return;
}
/* XXX should check out ugh */
passert(ugh == NULL);
passert(cur_state == NULL);
passert(st != NULL);
passert(st->st_suspended_md == dh->md);
set_suspended(st, NULL); /* no longer connected or suspended */
set_cur_state(st);
st->st_calculating = FALSE;
e = ikev2_parent_inI2outR2_tail(pcrc, r);
if ( e > STF_FAIL) {
/* we do not send a notify because we are the initiator that could be responding to an error notification */
int v2_notify_num = e - STF_FAIL;
DBG_log(
"ikev2_parent_inI2outR2_tail returned STF_FAIL with %s",
enum_name(&ikev2_notify_names, v2_notify_num));
} else if ( e != STF_OK) {
DBG_log("ikev2_parent_inI2outR2_tail returned %s",
enum_name(&stfstatus_name, e));
}
if (dh->md != NULL) {
complete_v2_state_transition(&dh->md, e);
if (dh->md)
release_md(dh->md);
}
reset_globals();
passert(GLOBALS_ARE_RESET());
}
| 615 |
5,783 | 0 | static void ehci_detach(USBPort *port)
{
EHCIState *s = port->opaque;
uint32_t *portsc = &s->portsc[port->index];
const char *owner = (*portsc & PORTSC_POWNER) ? "comp" : "ehci";
trace_usb_ehci_port_detach(port->index, owner);
if (*portsc & PORTSC_POWNER) {
USBPort *companion = s->companion_ports[port->index];
companion->ops->detach(companion);
companion->dev = NULL;
/*
* EHCI spec 4.2.2: "When a disconnect occurs... On the event,
* the port ownership is returned immediately to the EHCI controller."
*/
*portsc &= ~PORTSC_POWNER;
return;
}
ehci_queues_rip_device(s, port->dev, 0);
ehci_queues_rip_device(s, port->dev, 1);
*portsc &= ~(PORTSC_CONNECT|PORTSC_PED|PORTSC_SUSPEND);
*portsc |= PORTSC_CSC;
ehci_raise_irq(s, USBSTS_PCD);
}
| 616 |
185,602 | 1 | void EnableHighDPISupport() {
if (IsHighDPIEnabled() &&
(base::win::GetVersion() < base::win::VERSION_WIN8_1)) {
if (!SetProcessDpiAwarenessWrapper(PROCESS_SYSTEM_DPI_AWARE)) {
SetProcessDPIAwareWrapper();
}
}
}
| 617 |
53,533 | 0 | archive_read_format_7zip_bid(struct archive_read *a, int best_bid)
{
const char *p;
/* If someone has already bid more than 32, then avoid
trashing the look-ahead buffers with a seek. */
if (best_bid > 32)
return (-1);
if ((p = __archive_read_ahead(a, 6, NULL)) == NULL)
return (0);
/* If first six bytes are the 7-Zip signature,
* return the bid right now. */
if (memcmp(p, _7ZIP_SIGNATURE, 6) == 0)
return (48);
/*
* It may a 7-Zip SFX archive file. If first two bytes are
* 'M' and 'Z' available on Windows or first four bytes are
* "\x7F\x45LF" available on posix like system, seek the 7-Zip
* signature. Although we will perform a seek when reading
* a header, what we do not use __archive_read_seek() here is
* due to a bidding performance.
*/
if ((p[0] == 'M' && p[1] == 'Z') || memcmp(p, "\x7F\x45LF", 4) == 0) {
ssize_t offset = SFX_MIN_ADDR;
ssize_t window = 4096;
ssize_t bytes_avail;
while (offset + window <= (SFX_MAX_ADDR)) {
const char *buff = __archive_read_ahead(a,
offset + window, &bytes_avail);
if (buff == NULL) {
/* Remaining bytes are less than window. */
window >>= 1;
if (window < 0x40)
return (0);
continue;
}
p = buff + offset;
while (p + 32 < buff + bytes_avail) {
int step = check_7zip_header_in_sfx(p);
if (step == 0)
return (48);
p += step;
}
offset = p - buff;
}
}
return (0);
}
| 618 |
106,779 | 0 | IntRect WebView::windowToScreen(const IntRect& rect)
{
return rect;
}
| 619 |
36,619 | 0 | static bool send_version(struct pool *pool, json_t *val)
{
char s[RBUFSIZE];
int id = json_integer_value(json_object_get(val, "id"));
if (!id)
return false;
sprintf(s, "{\"id\": %d, \"result\": \""PACKAGE"/"VERSION"\", \"error\": null}", id);
if (!stratum_send(pool, s, strlen(s)))
return false;
return true;
}
| 620 |
81,299 | 0 | static int instance_mkdir(const char *name)
{
struct trace_array *tr;
int ret;
mutex_lock(&event_mutex);
mutex_lock(&trace_types_lock);
ret = -EEXIST;
list_for_each_entry(tr, &ftrace_trace_arrays, list) {
if (tr->name && strcmp(tr->name, name) == 0)
goto out_unlock;
}
ret = -ENOMEM;
tr = kzalloc(sizeof(*tr), GFP_KERNEL);
if (!tr)
goto out_unlock;
tr->name = kstrdup(name, GFP_KERNEL);
if (!tr->name)
goto out_free_tr;
if (!alloc_cpumask_var(&tr->tracing_cpumask, GFP_KERNEL))
goto out_free_tr;
tr->trace_flags = global_trace.trace_flags & ~ZEROED_TRACE_FLAGS;
cpumask_copy(tr->tracing_cpumask, cpu_all_mask);
raw_spin_lock_init(&tr->start_lock);
tr->max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
tr->current_trace = &nop_trace;
INIT_LIST_HEAD(&tr->systems);
INIT_LIST_HEAD(&tr->events);
INIT_LIST_HEAD(&tr->hist_vars);
if (allocate_trace_buffers(tr, trace_buf_size) < 0)
goto out_free_tr;
tr->dir = tracefs_create_dir(name, trace_instance_dir);
if (!tr->dir)
goto out_free_tr;
ret = event_trace_add_tracer(tr->dir, tr);
if (ret) {
tracefs_remove_recursive(tr->dir);
goto out_free_tr;
}
ftrace_init_trace_array(tr);
init_tracer_tracefs(tr, tr->dir);
init_trace_flags_index(tr);
__update_tracer_options(tr);
list_add(&tr->list, &ftrace_trace_arrays);
mutex_unlock(&trace_types_lock);
mutex_unlock(&event_mutex);
return 0;
out_free_tr:
free_trace_buffers(tr);
free_cpumask_var(tr->tracing_cpumask);
kfree(tr->name);
kfree(tr);
out_unlock:
mutex_unlock(&trace_types_lock);
mutex_unlock(&event_mutex);
return ret;
}
| 621 |
24,574 | 0 | static int setup_rx_descbuffer(struct b43_dmaring *ring,
struct b43_dmadesc_generic *desc,
struct b43_dmadesc_meta *meta, gfp_t gfp_flags)
{
dma_addr_t dmaaddr;
struct sk_buff *skb;
B43_WARN_ON(ring->tx);
skb = __dev_alloc_skb(ring->rx_buffersize, gfp_flags);
if (unlikely(!skb))
return -ENOMEM;
b43_poison_rx_buffer(ring, skb);
dmaaddr = map_descbuffer(ring, skb->data, ring->rx_buffersize, 0);
if (b43_dma_mapping_error(ring, dmaaddr, ring->rx_buffersize, 0)) {
/* ugh. try to realloc in zone_dma */
gfp_flags |= GFP_DMA;
dev_kfree_skb_any(skb);
skb = __dev_alloc_skb(ring->rx_buffersize, gfp_flags);
if (unlikely(!skb))
return -ENOMEM;
b43_poison_rx_buffer(ring, skb);
dmaaddr = map_descbuffer(ring, skb->data,
ring->rx_buffersize, 0);
if (b43_dma_mapping_error(ring, dmaaddr, ring->rx_buffersize, 0)) {
b43err(ring->dev->wl, "RX DMA buffer allocation failed\n");
dev_kfree_skb_any(skb);
return -EIO;
}
}
meta->skb = skb;
meta->dmaaddr = dmaaddr;
ring->ops->fill_descriptor(ring, desc, dmaaddr,
ring->rx_buffersize, 0, 0, 0);
return 0;
}
| 622 |
1,786 | 0 | int main(int argc, char **argv)
{
char c;
int i = unpack_bz2_stream(0, 1);
if (i < 0)
fprintf(stderr, "%s\n", bunzip_errors[-i]);
else if (read(STDIN_FILENO, &c, 1))
fprintf(stderr, "Trailing garbage ignored\n");
return -i;
}
| 623 |
117,204 | 0 | void Texture::Create() {
ScopedGLErrorSuppressor suppressor(decoder_);
Destroy();
glGenTextures(1, &id_);
ScopedTexture2DBinder binder(decoder_, id_);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
glTexImage2D(
GL_TEXTURE_2D, 0, GL_RGBA, 16, 16, 0, GL_RGBA, GL_UNSIGNED_BYTE, NULL);
estimated_size_ = 16u * 16u * 4u;
decoder_->UpdateBackbufferMemoryAccounting();
}
| 624 |
133,119 | 0 | LRESULT HWNDMessageHandler::OnGetObject(UINT message,
WPARAM w_param,
LPARAM l_param) {
LRESULT reference_result = static_cast<LRESULT>(0L);
if (OBJID_CLIENT == l_param) {
base::win::ScopedComPtr<IAccessible> root(
delegate_->GetNativeViewAccessible());
reference_result = LresultFromObject(IID_IAccessible, w_param,
static_cast<IAccessible*>(root.Detach()));
}
return reference_result;
}
| 625 |
118,469 | 0 | int RenderFrameImpl::ShowContextMenu(ContextMenuClient* client,
const ContextMenuParams& params) {
DCHECK(client); // A null client means "internal" when we issue callbacks.
ContextMenuParams our_params(params);
our_params.custom_context.request_id = pending_context_menus_.Add(client);
Send(new FrameHostMsg_ContextMenu(routing_id_, our_params));
return our_params.custom_context.request_id;
}
| 626 |
120,702 | 0 | WebviewInfo::~WebviewInfo() {
}
| 627 |
148,275 | 0 | PrintingContextDelegate::PrintingContextDelegate(int render_process_id,
int render_frame_id)
: render_process_id_(render_process_id),
render_frame_id_(render_frame_id) {}
| 628 |
188,453 | 1 | void Segment::PreloadCluster(Cluster* pCluster, ptrdiff_t idx)
{
assert(pCluster);
assert(pCluster->m_index < 0);
assert(idx >= m_clusterCount);
const long count = m_clusterCount + m_clusterPreloadCount;
long& size = m_clusterSize;
assert(size >= count);
if (count >= size)
{
const long n = (size <= 0) ? 2048 : 2*size;
Cluster** const qq = new Cluster*[n];
Cluster** q = qq;
Cluster** p = m_clusters;
Cluster** const pp = p + count;
while (p != pp)
*q++ = *p++;
delete[] m_clusters;
m_clusters = qq;
size = n;
}
assert(m_clusters);
Cluster** const p = m_clusters + idx;
Cluster** q = m_clusters + count;
assert(q >= p);
assert(q < (m_clusters + size));
while (q > p)
{
Cluster** const qq = q - 1;
assert((*qq)->m_index < 0);
*q = *qq;
q = qq;
}
m_clusters[idx] = pCluster;
++m_clusterPreloadCount;
}
| 629 |
128,515 | 0 | ShellSurface::ShellSurface(Surface* surface,
ShellSurface* parent,
const gfx::Rect& initial_bounds,
bool activatable,
int container)
: widget_(nullptr),
surface_(surface),
parent_(parent ? parent->GetWidget()->GetNativeWindow() : nullptr),
initial_bounds_(initial_bounds),
activatable_(activatable),
container_(container),
pending_show_widget_(false),
scale_(1.0),
pending_scale_(1.0),
scoped_configure_(nullptr),
ignore_window_bounds_changes_(false),
resize_component_(HTCAPTION),
pending_resize_component_(HTCAPTION) {
ash::Shell::GetInstance()->activation_client()->AddObserver(this);
surface_->SetSurfaceDelegate(this);
surface_->AddSurfaceObserver(this);
surface_->window()->Show();
set_owned_by_client();
if (parent_)
parent_->AddObserver(this);
}
| 630 |
175,036 | 0 | status_t MPEG4Extractor::parseDrmSINF(off64_t *offset, off64_t data_offset) {
uint8_t updateIdTag;
if (mDataSource->readAt(data_offset, &updateIdTag, 1) < 1) {
return ERROR_IO;
}
data_offset ++;
if (0x01/*OBJECT_DESCRIPTOR_UPDATE_ID_TAG*/ != updateIdTag) {
return ERROR_MALFORMED;
}
uint8_t numOfBytes;
int32_t size = readSize(data_offset, mDataSource, &numOfBytes);
if (size < 0) {
return ERROR_IO;
}
int32_t classSize = size;
data_offset += numOfBytes;
while(size >= 11 ) {
uint8_t descriptorTag;
if (mDataSource->readAt(data_offset, &descriptorTag, 1) < 1) {
return ERROR_IO;
}
data_offset ++;
if (0x11/*OBJECT_DESCRIPTOR_ID_TAG*/ != descriptorTag) {
return ERROR_MALFORMED;
}
uint8_t buffer[8];
if (mDataSource->readAt(data_offset, buffer, 2) < 2) {
return ERROR_IO;
}
data_offset += 2;
if ((buffer[1] >> 5) & 0x0001) { //url flag is set
return ERROR_MALFORMED;
}
if (mDataSource->readAt(data_offset, buffer, 8) < 8) {
return ERROR_IO;
}
data_offset += 8;
if ((0x0F/*ES_ID_REF_TAG*/ != buffer[1])
|| ( 0x0A/*IPMP_DESCRIPTOR_POINTER_ID_TAG*/ != buffer[5])) {
return ERROR_MALFORMED;
}
SINF *sinf = new SINF;
sinf->trackID = U16_AT(&buffer[3]);
sinf->IPMPDescriptorID = buffer[7];
sinf->next = mFirstSINF;
mFirstSINF = sinf;
size -= (8 + 2 + 1);
}
if (size != 0) {
return ERROR_MALFORMED;
}
if (mDataSource->readAt(data_offset, &updateIdTag, 1) < 1) {
return ERROR_IO;
}
data_offset ++;
if(0x05/*IPMP_DESCRIPTOR_UPDATE_ID_TAG*/ != updateIdTag) {
return ERROR_MALFORMED;
}
size = readSize(data_offset, mDataSource, &numOfBytes);
if (size < 0) {
return ERROR_IO;
}
classSize = size;
data_offset += numOfBytes;
while (size > 0) {
uint8_t tag;
int32_t dataLen;
if (mDataSource->readAt(data_offset, &tag, 1) < 1) {
return ERROR_IO;
}
data_offset ++;
if (0x0B/*IPMP_DESCRIPTOR_ID_TAG*/ == tag) {
uint8_t id;
dataLen = readSize(data_offset, mDataSource, &numOfBytes);
if (dataLen < 0) {
return ERROR_IO;
} else if (dataLen < 4) {
return ERROR_MALFORMED;
}
data_offset += numOfBytes;
if (mDataSource->readAt(data_offset, &id, 1) < 1) {
return ERROR_IO;
}
data_offset ++;
SINF *sinf = mFirstSINF;
while (sinf && (sinf->IPMPDescriptorID != id)) {
sinf = sinf->next;
}
if (sinf == NULL) {
return ERROR_MALFORMED;
}
sinf->len = dataLen - 3;
sinf->IPMPData = new (std::nothrow) char[sinf->len];
if (sinf->IPMPData == NULL) {
return ERROR_MALFORMED;
}
data_offset += 2;
if (mDataSource->readAt(data_offset + 2, sinf->IPMPData, sinf->len) < sinf->len) {
return ERROR_IO;
}
data_offset += sinf->len;
size -= (dataLen + numOfBytes + 1);
}
}
if (size != 0) {
return ERROR_MALFORMED;
}
return UNKNOWN_ERROR; // Return a dummy error.
}
| 631 |
33,003 | 0 | static int sctp_getsockopt_initmsg(struct sock *sk, int len, char __user *optval, int __user *optlen)
{
if (len < sizeof(struct sctp_initmsg))
return -EINVAL;
len = sizeof(struct sctp_initmsg);
if (put_user(len, optlen))
return -EFAULT;
if (copy_to_user(optval, &sctp_sk(sk)->initmsg, len))
return -EFAULT;
return 0;
}
| 632 |
167,004 | 0 | void CopyReplacedNavigationEntryDataIfPreviouslyEmpty(
const NavigationEntryImpl& replaced_entry,
NavigationEntryImpl* output_entry) {
if (output_entry->GetReplacedEntryData().has_value())
return;
ReplacedNavigationEntryData data;
data.first_committed_url = replaced_entry.GetURL();
data.first_timestamp = replaced_entry.GetTimestamp();
data.first_transition_type = replaced_entry.GetTransitionType();
output_entry->SetReplacedEntryData(data);
}
| 633 |
37,488 | 0 | void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot)
{
struct kvm_memory_slot *memslot;
gfn_t last_gfn;
int i;
memslot = id_to_memslot(kvm->memslots, slot);
last_gfn = memslot->base_gfn + memslot->npages - 1;
spin_lock(&kvm->mmu_lock);
for (i = PT_PAGE_TABLE_LEVEL;
i < PT_PAGE_TABLE_LEVEL + KVM_NR_PAGE_SIZES; ++i) {
unsigned long *rmapp;
unsigned long last_index, index;
rmapp = memslot->arch.rmap[i - PT_PAGE_TABLE_LEVEL];
last_index = gfn_to_index(last_gfn, memslot->base_gfn, i);
for (index = 0; index <= last_index; ++index, ++rmapp) {
if (*rmapp)
__rmap_write_protect(kvm, rmapp, false);
if (need_resched() || spin_needbreak(&kvm->mmu_lock)) {
kvm_flush_remote_tlbs(kvm);
cond_resched_lock(&kvm->mmu_lock);
}
}
}
kvm_flush_remote_tlbs(kvm);
spin_unlock(&kvm->mmu_lock);
}
| 634 |
21,042 | 0 | static int mem_cgroup_charge_common(struct page *page, struct mm_struct *mm,
gfp_t gfp_mask, enum charge_type ctype)
{
struct mem_cgroup *memcg = NULL;
unsigned int nr_pages = 1;
struct page_cgroup *pc;
bool oom = true;
int ret;
if (PageTransHuge(page)) {
nr_pages <<= compound_order(page);
VM_BUG_ON(!PageTransHuge(page));
/*
* Never OOM-kill a process for a huge page. The
* fault handler will fall back to regular pages.
*/
oom = false;
}
pc = lookup_page_cgroup(page);
ret = __mem_cgroup_try_charge(mm, gfp_mask, nr_pages, &memcg, oom);
if (ret == -ENOMEM)
return ret;
__mem_cgroup_commit_charge(memcg, page, nr_pages, pc, ctype, false);
return 0;
}
| 635 |
152,317 | 0 | void RenderFrameImpl::FrameDetached(DetachType type) {
for (auto& observer : observers_)
observer.FrameDetached();
SendUpdateState();
if (type == DetachType::kRemove)
Send(new FrameHostMsg_Detach(routing_id_));
GetLocalRootRenderWidget()->UnregisterRenderFrame(this);
if (is_main_frame_) {
render_view_->DetachWebFrameWidget();
render_widget_->AbortWarmupCompositor();
} else if (render_widget_) {
render_widget_->CloseForFrame();
}
auto it = g_frame_map.Get().find(frame_);
CHECK(it != g_frame_map.Get().end());
CHECK_EQ(it->second, this);
g_frame_map.Get().erase(it);
frame_->Close();
frame_ = nullptr;
if (previous_routing_id_ != MSG_ROUTING_NONE) {
RenderFrameProxy* proxy =
RenderFrameProxy::FromRoutingID(previous_routing_id_);
CHECK(proxy);
CHECK_EQ(routing_id_, proxy->provisional_frame_routing_id());
proxy->set_provisional_frame_routing_id(MSG_ROUTING_NONE);
}
delete this;
}
| 636 |
53,348 | 0 | static void lex_save(lex_t *lex, int c)
{
strbuffer_append_byte(&lex->saved_text, c);
}
| 637 |
135,830 | 0 | Document& SelectionEditor::GetDocument() const {
DCHECK(LifecycleContext());
return *LifecycleContext();
}
| 638 |
144,970 | 0 | void RemoveAncestorObservers() {
for (auto* ancestor : ancestors_)
ancestor->RemoveObserver(this);
ancestors_.clear();
}
| 639 |
76,965 | 0 | format_alg(int port, struct ds *s)
{
switch(port) {
case IPPORT_FTP:
ds_put_format(s, "%salg=%sftp,", colors.param, colors.end);
break;
case IPPORT_TFTP:
ds_put_format(s, "%salg=%stftp,", colors.param, colors.end);
break;
case 0:
/* Don't print. */
break;
default:
ds_put_format(s, "%salg=%s%d,", colors.param, colors.end, port);
break;
}
}
| 640 |
156 | 0 | static EVP_PKEY * php_openssl_generate_private_key(struct php_x509_request * req TSRMLS_DC)
{
char * randfile = NULL;
int egdsocket, seeded;
EVP_PKEY * return_val = NULL;
if (req->priv_key_bits < MIN_KEY_LENGTH) {
php_error_docref(NULL TSRMLS_CC, E_WARNING, "private key length is too short; it needs to be at least %d bits, not %d",
MIN_KEY_LENGTH, req->priv_key_bits);
return NULL;
}
randfile = CONF_get_string(req->req_config, req->section_name, "RANDFILE");
php_openssl_load_rand_file(randfile, &egdsocket, &seeded);
if ((req->priv_key = EVP_PKEY_new()) != NULL) {
switch(req->priv_key_type) {
case OPENSSL_KEYTYPE_RSA:
if (EVP_PKEY_assign_RSA(req->priv_key, RSA_generate_key(req->priv_key_bits, 0x10001, NULL, NULL))) {
return_val = req->priv_key;
}
break;
#if !defined(NO_DSA) && defined(HAVE_DSA_DEFAULT_METHOD)
case OPENSSL_KEYTYPE_DSA:
{
DSA *dsapar = DSA_generate_parameters(req->priv_key_bits, NULL, 0, NULL, NULL, NULL, NULL);
if (dsapar) {
DSA_set_method(dsapar, DSA_get_default_method());
if (DSA_generate_key(dsapar)) {
if (EVP_PKEY_assign_DSA(req->priv_key, dsapar)) {
return_val = req->priv_key;
}
} else {
DSA_free(dsapar);
}
}
}
break;
#endif
#if !defined(NO_DH)
case OPENSSL_KEYTYPE_DH:
{
DH *dhpar = DH_generate_parameters(req->priv_key_bits, 2, NULL, NULL);
int codes = 0;
if (dhpar) {
DH_set_method(dhpar, DH_get_default_method());
if (DH_check(dhpar, &codes) && codes == 0 && DH_generate_key(dhpar)) {
if (EVP_PKEY_assign_DH(req->priv_key, dhpar)) {
return_val = req->priv_key;
}
} else {
DH_free(dhpar);
}
}
}
break;
#endif
default:
php_error_docref(NULL TSRMLS_CC, E_WARNING, "Unsupported private key type");
}
}
php_openssl_write_rand_file(randfile, egdsocket, seeded);
if (return_val == NULL) {
EVP_PKEY_free(req->priv_key);
req->priv_key = NULL;
return NULL;
}
return return_val;
}
| 641 |
64,179 | 0 | AP_CORE_DECLARE(void) ap_add_per_url_conf(server_rec *s, void *url_config)
{
core_server_config *sconf = ap_get_core_module_config(s->module_config);
void **new_space = (void **)apr_array_push(sconf->sec_url);
*new_space = url_config;
}
| 642 |
58,317 | 0 | static int __init atomic_pool_init(void)
{
struct dma_pool *pool = &atomic_pool;
pgprot_t prot = pgprot_dmacoherent(pgprot_kernel);
gfp_t gfp = GFP_KERNEL | GFP_DMA;
unsigned long nr_pages = pool->size >> PAGE_SHIFT;
unsigned long *bitmap;
struct page *page;
struct page **pages;
void *ptr;
int bitmap_size = BITS_TO_LONGS(nr_pages) * sizeof(long);
bitmap = kzalloc(bitmap_size, GFP_KERNEL);
if (!bitmap)
goto no_bitmap;
pages = kzalloc(nr_pages * sizeof(struct page *), GFP_KERNEL);
if (!pages)
goto no_pages;
if (IS_ENABLED(CONFIG_DMA_CMA))
ptr = __alloc_from_contiguous(NULL, pool->size, prot, &page,
atomic_pool_init);
else
ptr = __alloc_remap_buffer(NULL, pool->size, gfp, prot, &page,
atomic_pool_init);
if (ptr) {
int i;
for (i = 0; i < nr_pages; i++)
pages[i] = page + i;
spin_lock_init(&pool->lock);
pool->vaddr = ptr;
pool->pages = pages;
pool->bitmap = bitmap;
pool->nr_pages = nr_pages;
pr_info("DMA: preallocated %u KiB pool for atomic coherent allocations\n",
(unsigned)pool->size / 1024);
return 0;
}
kfree(pages);
no_pages:
kfree(bitmap);
no_bitmap:
pr_err("DMA: failed to allocate %u KiB pool for atomic coherent allocation\n",
(unsigned)pool->size / 1024);
return -ENOMEM;
}
| 643 |
2,738 | 0 | _dbus_header_cache_invalidate_all (DBusHeader *header)
{
int i;
i = 0;
while (i <= DBUS_HEADER_FIELD_LAST)
{
header->fields[i].value_pos = _DBUS_HEADER_FIELD_VALUE_UNKNOWN;
++i;
}
}
| 644 |
111,453 | 0 | static VirtualKeyboardType convertStringToKeyboardType(const AtomicString& string)
{
DEFINE_STATIC_LOCAL(AtomicString, Default, ("default"));
DEFINE_STATIC_LOCAL(AtomicString, Url, ("url"));
DEFINE_STATIC_LOCAL(AtomicString, Email, ("email"));
DEFINE_STATIC_LOCAL(AtomicString, Password, ("password"));
DEFINE_STATIC_LOCAL(AtomicString, Web, ("web"));
DEFINE_STATIC_LOCAL(AtomicString, Number, ("number"));
DEFINE_STATIC_LOCAL(AtomicString, Symbol, ("symbol"));
DEFINE_STATIC_LOCAL(AtomicString, Phone, ("phone"));
DEFINE_STATIC_LOCAL(AtomicString, Pin, ("pin"));
DEFINE_STATIC_LOCAL(AtomicString, Hex, ("hexadecimal"));
if (string.isEmpty())
return VKBTypeNotSet;
if (equalIgnoringCase(string, Default))
return VKBTypeDefault;
if (equalIgnoringCase(string, Url))
return VKBTypeUrl;
if (equalIgnoringCase(string, Email))
return VKBTypeEmail;
if (equalIgnoringCase(string, Password))
return VKBTypePassword;
if (equalIgnoringCase(string, Web))
return VKBTypeWeb;
if (equalIgnoringCase(string, Number))
return VKBTypeNumPunc;
if (equalIgnoringCase(string, Symbol))
return VKBTypeSymbol;
if (equalIgnoringCase(string, Phone))
return VKBTypePhone;
if (equalIgnoringCase(string, Pin) || equalIgnoringCase(string, Hex))
return VKBTypePin;
return VKBTypeNotSet;
}
| 645 |
61,404 | 0 | static int mov_probe(AVProbeData *p)
{
int64_t offset;
uint32_t tag;
int score = 0;
int moov_offset = -1;
/* check file header */
offset = 0;
for (;;) {
/* ignore invalid offset */
if ((offset + 8) > (unsigned int)p->buf_size)
break;
tag = AV_RL32(p->buf + offset + 4);
switch(tag) {
/* check for obvious tags */
case MKTAG('m','o','o','v'):
moov_offset = offset + 4;
case MKTAG('m','d','a','t'):
case MKTAG('p','n','o','t'): /* detect movs with preview pics like ew.mov and april.mov */
case MKTAG('u','d','t','a'): /* Packet Video PVAuthor adds this and a lot of more junk */
case MKTAG('f','t','y','p'):
if (AV_RB32(p->buf+offset) < 8 &&
(AV_RB32(p->buf+offset) != 1 ||
offset + 12 > (unsigned int)p->buf_size ||
AV_RB64(p->buf+offset + 8) == 0)) {
score = FFMAX(score, AVPROBE_SCORE_EXTENSION);
} else if (tag == MKTAG('f','t','y','p') &&
( AV_RL32(p->buf + offset + 8) == MKTAG('j','p','2',' ')
|| AV_RL32(p->buf + offset + 8) == MKTAG('j','p','x',' ')
)) {
score = FFMAX(score, 5);
} else {
score = AVPROBE_SCORE_MAX;
}
offset = FFMAX(4, AV_RB32(p->buf+offset)) + offset;
break;
/* those are more common words, so rate then a bit less */
case MKTAG('e','d','i','w'): /* xdcam files have reverted first tags */
case MKTAG('w','i','d','e'):
case MKTAG('f','r','e','e'):
case MKTAG('j','u','n','k'):
case MKTAG('p','i','c','t'):
score = FFMAX(score, AVPROBE_SCORE_MAX - 5);
offset = FFMAX(4, AV_RB32(p->buf+offset)) + offset;
break;
case MKTAG(0x82,0x82,0x7f,0x7d):
case MKTAG('s','k','i','p'):
case MKTAG('u','u','i','d'):
case MKTAG('p','r','f','l'):
/* if we only find those cause probedata is too small at least rate them */
score = FFMAX(score, AVPROBE_SCORE_EXTENSION);
offset = FFMAX(4, AV_RB32(p->buf+offset)) + offset;
break;
default:
offset = FFMAX(4, AV_RB32(p->buf+offset)) + offset;
}
}
if(score > AVPROBE_SCORE_MAX - 50 && moov_offset != -1) {
/* moov atom in the header - we should make sure that this is not a
* MOV-packed MPEG-PS */
offset = moov_offset;
while(offset < (p->buf_size - 16)){ /* Sufficient space */
/* We found an actual hdlr atom */
if(AV_RL32(p->buf + offset ) == MKTAG('h','d','l','r') &&
AV_RL32(p->buf + offset + 8) == MKTAG('m','h','l','r') &&
AV_RL32(p->buf + offset + 12) == MKTAG('M','P','E','G')){
av_log(NULL, AV_LOG_WARNING, "Found media data tag MPEG indicating this is a MOV-packed MPEG-PS.\n");
/* We found a media handler reference atom describing an
* MPEG-PS-in-MOV, return a
* low score to force expanding the probe window until
* mpegps_probe finds what it needs */
return 5;
}else
/* Keep looking */
offset+=2;
}
}
return score;
}
| 646 |
185,343 | 1 | bool Instance::HandleInputEvent(const pp::InputEvent& event) {
pp::InputEvent event_device_res(event);
{
pp::MouseInputEvent mouse_event(event);
if (!mouse_event.is_null()) {
pp::Point point = mouse_event.GetPosition();
pp::Point movement = mouse_event.GetMovement();
ScalePoint(device_scale_, &point);
ScalePoint(device_scale_, &movement);
mouse_event = pp::MouseInputEvent(
this,
event.GetType(),
event.GetTimeStamp(),
event.GetModifiers(),
mouse_event.GetButton(),
point,
mouse_event.GetClickCount(),
movement);
event_device_res = mouse_event;
}
}
if (event.GetType() == PP_INPUTEVENT_TYPE_MOUSEMOVE &&
(event.GetModifiers() & PP_INPUTEVENT_MODIFIER_MIDDLEBUTTONDOWN)) {
pp::MouseInputEvent mouse_event(event_device_res);
pp::Point pos = mouse_event.GetPosition();
EnableAutoscroll(pos);
UpdateCursor(CalculateAutoscroll(pos));
return true;
} else {
DisableAutoscroll();
}
#ifdef ENABLE_THUMBNAILS
if (event.GetType() == PP_INPUTEVENT_TYPE_MOUSELEAVE)
thumbnails_.SlideOut();
if (thumbnails_.HandleEvent(event_device_res))
return true;
#endif
if (toolbar_->HandleEvent(event_device_res))
return true;
#ifdef ENABLE_THUMBNAILS
if (v_scrollbar_.get() && event.GetType() == PP_INPUTEVENT_TYPE_MOUSEMOVE) {
pp::MouseInputEvent mouse_event(event);
pp::Point pt = mouse_event.GetPosition();
pp::Rect v_scrollbar_rc;
v_scrollbar_->GetLocation(&v_scrollbar_rc);
if (v_scrollbar_rc.Contains(pt) &&
(event.GetModifiers() & PP_INPUTEVENT_MODIFIER_LEFTBUTTONDOWN)) {
thumbnails_.SlideIn();
}
if (!v_scrollbar_rc.Contains(pt) && thumbnails_.visible() &&
!(event.GetModifiers() & PP_INPUTEVENT_MODIFIER_LEFTBUTTONDOWN) &&
!thumbnails_.rect().Contains(pt)) {
thumbnails_.SlideOut();
}
}
#endif
pp::InputEvent offset_event(event_device_res);
bool try_engine_first = true;
switch (offset_event.GetType()) {
case PP_INPUTEVENT_TYPE_MOUSEDOWN:
case PP_INPUTEVENT_TYPE_MOUSEUP:
case PP_INPUTEVENT_TYPE_MOUSEMOVE:
case PP_INPUTEVENT_TYPE_MOUSEENTER:
case PP_INPUTEVENT_TYPE_MOUSELEAVE: {
pp::MouseInputEvent mouse_event(event_device_res);
pp::MouseInputEvent mouse_event_dip(event);
pp::Point point = mouse_event.GetPosition();
point.set_x(point.x() - available_area_.x());
offset_event = pp::MouseInputEvent(
this,
event.GetType(),
event.GetTimeStamp(),
event.GetModifiers(),
mouse_event.GetButton(),
point,
mouse_event.GetClickCount(),
mouse_event.GetMovement());
if (!engine_->IsSelecting()) {
if (!IsOverlayScrollbar() &&
!available_area_.Contains(mouse_event.GetPosition())) {
try_engine_first = false;
} else if (IsOverlayScrollbar()) {
pp::Rect temp;
if ((v_scrollbar_.get() && v_scrollbar_->GetLocation(&temp) &&
temp.Contains(mouse_event_dip.GetPosition())) ||
(h_scrollbar_.get() && h_scrollbar_->GetLocation(&temp) &&
temp.Contains(mouse_event_dip.GetPosition()))) {
try_engine_first = false;
}
}
}
break;
}
default:
break;
}
if (try_engine_first && engine_->HandleEvent(offset_event))
return true;
if (v_scrollbar_.get() && event.GetType() == PP_INPUTEVENT_TYPE_KEYDOWN) {
pp::KeyboardInputEvent keyboard_event(event);
bool no_h_scrollbar = !h_scrollbar_.get();
uint32_t key_code = keyboard_event.GetKeyCode();
bool page_down = no_h_scrollbar && key_code == ui::VKEY_RIGHT;
bool page_up = no_h_scrollbar && key_code == ui::VKEY_LEFT;
if (zoom_mode_ == ZOOM_FIT_TO_PAGE) {
bool has_shift =
keyboard_event.GetModifiers() & PP_INPUTEVENT_MODIFIER_SHIFTKEY;
bool key_is_space = key_code == ui::VKEY_SPACE;
page_down |= key_is_space || key_code == ui::VKEY_NEXT;
page_up |= (key_is_space && has_shift) || (key_code == ui::VKEY_PRIOR);
}
if (page_down) {
int page = engine_->GetFirstVisiblePage();
if (engine_->GetPageRect(page).bottom() * zoom_ <=
v_scrollbar_->GetValue())
page++;
ScrollToPage(page + 1);
UpdateCursor(PP_CURSORTYPE_POINTER);
return true;
} else if (page_up) {
int page = engine_->GetFirstVisiblePage();
if (engine_->GetPageRect(page).y() * zoom_ >= v_scrollbar_->GetValue())
page--;
ScrollToPage(page);
UpdateCursor(PP_CURSORTYPE_POINTER);
return true;
}
}
if (v_scrollbar_.get() && v_scrollbar_->HandleEvent(event)) {
UpdateCursor(PP_CURSORTYPE_POINTER);
return true;
}
if (h_scrollbar_.get() && h_scrollbar_->HandleEvent(event)) {
UpdateCursor(PP_CURSORTYPE_POINTER);
return true;
}
if (timer_pending_ &&
(event.GetType() == PP_INPUTEVENT_TYPE_MOUSEUP ||
event.GetType() == PP_INPUTEVENT_TYPE_MOUSEMOVE)) {
timer_factory_.CancelAll();
timer_pending_ = false;
} else if (event.GetType() == PP_INPUTEVENT_TYPE_MOUSEMOVE &&
engine_->IsSelecting()) {
bool set_timer = false;
pp::MouseInputEvent mouse_event(event);
if (v_scrollbar_.get() &&
(mouse_event.GetPosition().y() <= 0 ||
mouse_event.GetPosition().y() >= (plugin_dip_size_.height() - 1))) {
v_scrollbar_->ScrollBy(
PP_SCROLLBY_LINE, mouse_event.GetPosition().y() >= 0 ? 1: -1);
set_timer = true;
}
if (h_scrollbar_.get() &&
(mouse_event.GetPosition().x() <= 0 ||
mouse_event.GetPosition().x() >= (plugin_dip_size_.width() - 1))) {
h_scrollbar_->ScrollBy(PP_SCROLLBY_LINE,
mouse_event.GetPosition().x() >= 0 ? 1: -1);
set_timer = true;
}
if (set_timer) {
last_mouse_event_ = pp::MouseInputEvent(event);
pp::CompletionCallback callback =
timer_factory_.NewCallback(&Instance::OnTimerFired);
pp::Module::Get()->core()->CallOnMainThread(kDragTimerMs, callback);
timer_pending_ = true;
}
}
if (event.GetType() == PP_INPUTEVENT_TYPE_KEYDOWN &&
event.GetModifiers() & kDefaultKeyModifier) {
pp::KeyboardInputEvent keyboard_event(event);
switch (keyboard_event.GetKeyCode()) {
case 'A':
engine_->SelectAll();
return true;
}
}
return (event.GetType() == PP_INPUTEVENT_TYPE_MOUSEDOWN);
}
| 647 |
138,041 | 0 | AccessibilityRole AXNodeObject::determineAccessibilityRole() {
if (!getNode())
return UnknownRole;
if ((m_ariaRole = determineAriaRoleAttribute()) != UnknownRole)
return m_ariaRole;
if (getNode()->isTextNode())
return StaticTextRole;
AccessibilityRole role = nativeAccessibilityRoleIgnoringAria();
if (role != UnknownRole)
return role;
if (getNode()->isElementNode()) {
Element* element = toElement(getNode());
if (element->isInCanvasSubtree() && element->supportsFocus())
return GroupRole;
}
return UnknownRole;
}
| 648 |
84,649 | 0 | table_border_width(struct table *t)
{
switch (t->border_mode) {
case BORDER_THIN:
case BORDER_THICK:
return t->maxcol * t->cellspacing + 2 * (RULE_WIDTH + t->cellpadding);
case BORDER_NOWIN:
case BORDER_NONE:
return t->maxcol * t->cellspacing;
default:
/* not reached */
return 0;
}
}
| 649 |
8,954 | 0 | static void net_tx_pkt_do_sw_csum(struct NetTxPkt *pkt)
{
struct iovec *iov = &pkt->vec[NET_TX_PKT_L2HDR_FRAG];
uint32_t csum_cntr;
uint16_t csum = 0;
uint32_t cso;
/* num of iovec without vhdr */
uint32_t iov_len = pkt->payload_frags + NET_TX_PKT_PL_START_FRAG - 1;
uint16_t csl;
struct ip_header *iphdr;
size_t csum_offset = pkt->virt_hdr.csum_start + pkt->virt_hdr.csum_offset;
/* Put zero to checksum field */
iov_from_buf(iov, iov_len, csum_offset, &csum, sizeof csum);
/* Calculate L4 TCP/UDP checksum */
csl = pkt->payload_len;
/* add pseudo header to csum */
iphdr = pkt->vec[NET_TX_PKT_L3HDR_FRAG].iov_base;
csum_cntr = eth_calc_ip4_pseudo_hdr_csum(iphdr, csl, &cso);
/* data checksum */
csum_cntr +=
net_checksum_add_iov(iov, iov_len, pkt->virt_hdr.csum_start, csl, cso);
/* Put the checksum obtained into the packet */
csum = cpu_to_be16(net_checksum_finish(csum_cntr));
iov_from_buf(iov, iov_len, csum_offset, &csum, sizeof csum);
}
| 650 |
27,024 | 0 | static void delayed_calls_add(int type, gpointer data)
{
DelayedCall *dcall = NPW_MemNew(DelayedCall, 1);
if (dcall == NULL)
return;
dcall->type = type;
dcall->data = data;
g_delayed_calls = g_list_append(g_delayed_calls, dcall);
if (g_delayed_calls_id == 0)
g_delayed_calls_id = g_idle_add_full(G_PRIORITY_LOW,
delayed_calls_process_cb, NULL, NULL);
}
| 651 |
35,170 | 0 | int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
{
int signr;
/* We only dequeue private signals from ourselves, we don't let
* signalfd steal them
*/
signr = __dequeue_signal(&tsk->pending, mask, info);
if (!signr) {
signr = __dequeue_signal(&tsk->signal->shared_pending,
mask, info);
/*
* itimer signal ?
*
* itimers are process shared and we restart periodic
* itimers in the signal delivery path to prevent DoS
* attacks in the high resolution timer case. This is
* compliant with the old way of self restarting
* itimers, as the SIGALRM is a legacy signal and only
* queued once. Changing the restart behaviour to
* restart the timer in the signal dequeue path is
* reducing the timer noise on heavy loaded !highres
* systems too.
*/
if (unlikely(signr == SIGALRM)) {
struct hrtimer *tmr = &tsk->signal->real_timer;
if (!hrtimer_is_queued(tmr) &&
tsk->signal->it_real_incr.tv64 != 0) {
hrtimer_forward(tmr, tmr->base->get_time(),
tsk->signal->it_real_incr);
hrtimer_restart(tmr);
}
}
}
recalc_sigpending();
if (!signr)
return 0;
if (unlikely(sig_kernel_stop(signr))) {
/*
* Set a marker that we have dequeued a stop signal. Our
* caller might release the siglock and then the pending
* stop signal it is about to process is no longer in the
* pending bitmasks, but must still be cleared by a SIGCONT
* (and overruled by a SIGKILL). So those cases clear this
* shared flag after we've set it. Note that this flag may
* remain set after the signal we return is ignored or
* handled. That doesn't matter because its only purpose
* is to alert stop-signal processing code when another
* processor has come along and cleared the flag.
*/
tsk->signal->flags |= SIGNAL_STOP_DEQUEUED;
}
if ((info->si_code & __SI_MASK) == __SI_TIMER && info->si_sys_private) {
/*
* Release the siglock to ensure proper locking order
* of timer locks outside of siglocks. Note, we leave
* irqs disabled here, since the posix-timers code is
* about to disable them again anyway.
*/
spin_unlock(&tsk->sighand->siglock);
do_schedule_next_timer(info);
spin_lock(&tsk->sighand->siglock);
}
return signr;
}
| 652 |
21,700 | 0 | static inline int try_get_ioctx(struct kioctx *kioctx)
{
return atomic_inc_not_zero(&kioctx->users);
}
| 653 |
59,305 | 0 | void set_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file)
{
struct file *old_exe_file;
/*
* It is safe to dereference the exe_file without RCU as
* this function is only called if nobody else can access
* this mm -- see comment above for justification.
*/
old_exe_file = rcu_dereference_raw(mm->exe_file);
if (new_exe_file)
get_file(new_exe_file);
rcu_assign_pointer(mm->exe_file, new_exe_file);
if (old_exe_file)
fput(old_exe_file);
}
| 654 |
141,062 | 0 | Document& Document::EnsureTemplateDocument() {
if (IsTemplateDocument())
return *this;
if (template_document_)
return *template_document_;
if (IsHTMLDocument()) {
template_document_ =
HTMLDocument::Create(DocumentInit::Create()
.WithContextDocument(ContextDocument())
.WithURL(BlankURL())
.WithNewRegistrationContext());
} else {
template_document_ =
Document::Create(DocumentInit::Create().WithURL(BlankURL()));
}
template_document_->template_document_host_ = this; // balanced in dtor.
return *template_document_.Get();
}
| 655 |
132,045 | 0 | void LayoutBlockFlow::checkForPaginationLogicalHeightChange(LayoutUnit& pageLogicalHeight, bool& pageLogicalHeightChanged, bool& hasSpecifiedPageLogicalHeight)
{
if (LayoutMultiColumnFlowThread* flowThread = multiColumnFlowThread()) {
LayoutUnit columnHeight;
if (hasDefiniteLogicalHeight() || isLayoutView()) {
LogicalExtentComputedValues computedValues;
computeLogicalHeight(LayoutUnit(), logicalTop(), computedValues);
columnHeight = computedValues.m_extent - borderAndPaddingLogicalHeight() - scrollbarLogicalHeight();
}
pageLogicalHeightChanged = columnHeight != flowThread->columnHeightAvailable();
flowThread->setColumnHeightAvailable(std::max(columnHeight, LayoutUnit()));
} else if (isLayoutFlowThread()) {
LayoutFlowThread* flowThread = toLayoutFlowThread(this);
pageLogicalHeight = flowThread->isPageLogicalHeightKnown() ? LayoutUnit(1) : LayoutUnit();
pageLogicalHeightChanged = flowThread->pageLogicalSizeChanged();
}
}
| 656 |
112,000 | 0 | bool SyncTest::ServerSupportsErrorTriggering() const {
EXPECT_NE(SERVER_TYPE_UNDECIDED, server_type_);
return server_type_ == LOCAL_PYTHON_SERVER;
}
| 657 |
1,917 | 0 | void reds_on_main_agent_start(MainChannelClient *mcc, uint32_t num_tokens)
{
SpiceCharDeviceState *dev_state = reds->agent_state.base;
RedChannelClient *rcc;
if (!vdagent) {
return;
}
spice_assert(vdagent->st && vdagent->st == dev_state);
rcc = main_channel_client_get_base(mcc);
reds->agent_state.client_agent_started = TRUE;
/*
* Note that in older releases, send_tokens were set to ~0 on both client
* and server. The server ignored the client given tokens.
* Thanks to that, when an old client is connected to a new server,
* and vice versa, the sending from the server to the client won't have
* flow control, but will have no other problem.
*/
if (!spice_char_device_client_exists(dev_state, rcc->client)) {
int client_added;
client_added = spice_char_device_client_add(dev_state,
rcc->client,
TRUE, /* flow control */
REDS_VDI_PORT_NUM_RECEIVE_BUFFS,
REDS_AGENT_WINDOW_SIZE,
num_tokens,
red_channel_client_waits_for_migrate_data(rcc));
if (!client_added) {
spice_warning("failed to add client to agent");
red_channel_client_shutdown(rcc);
return;
}
} else {
spice_char_device_send_to_client_tokens_set(dev_state,
rcc->client,
num_tokens);
}
reds->agent_state.write_filter.discard_all = FALSE;
}
| 658 |
49,625 | 0 | static int ffs_func_setup(struct usb_function *f,
const struct usb_ctrlrequest *creq)
{
struct ffs_function *func = ffs_func_from_usb(f);
struct ffs_data *ffs = func->ffs;
unsigned long flags;
int ret;
ENTER();
pr_vdebug("creq->bRequestType = %02x\n", creq->bRequestType);
pr_vdebug("creq->bRequest = %02x\n", creq->bRequest);
pr_vdebug("creq->wValue = %04x\n", le16_to_cpu(creq->wValue));
pr_vdebug("creq->wIndex = %04x\n", le16_to_cpu(creq->wIndex));
pr_vdebug("creq->wLength = %04x\n", le16_to_cpu(creq->wLength));
/*
* Most requests directed to interface go through here
* (notable exceptions are set/get interface) so we need to
* handle them. All other either handled by composite or
* passed to usb_configuration->setup() (if one is set). No
* matter, we will handle requests directed to endpoint here
* as well (as it's straightforward) but what to do with any
* other request?
*/
if (ffs->state != FFS_ACTIVE)
return -ENODEV;
switch (creq->bRequestType & USB_RECIP_MASK) {
case USB_RECIP_INTERFACE:
ret = ffs_func_revmap_intf(func, le16_to_cpu(creq->wIndex));
if (unlikely(ret < 0))
return ret;
break;
case USB_RECIP_ENDPOINT:
ret = ffs_func_revmap_ep(func, le16_to_cpu(creq->wIndex));
if (unlikely(ret < 0))
return ret;
if (func->ffs->user_flags & FUNCTIONFS_VIRTUAL_ADDR)
ret = func->ffs->eps_addrmap[ret];
break;
default:
return -EOPNOTSUPP;
}
spin_lock_irqsave(&ffs->ev.waitq.lock, flags);
ffs->ev.setup = *creq;
ffs->ev.setup.wIndex = cpu_to_le16(ret);
__ffs_event_add(ffs, FUNCTIONFS_SETUP);
spin_unlock_irqrestore(&ffs->ev.waitq.lock, flags);
return 0;
}
| 659 |
42,615 | 0 | int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
int proto)
{
struct net *net = dev_net(skb->dev);
struct sock *sk;
struct udphdr *uh;
const struct in6_addr *saddr, *daddr;
u32 ulen = 0;
if (!pskb_may_pull(skb, sizeof(struct udphdr)))
goto discard;
saddr = &ipv6_hdr(skb)->saddr;
daddr = &ipv6_hdr(skb)->daddr;
uh = udp_hdr(skb);
ulen = ntohs(uh->len);
if (ulen > skb->len)
goto short_packet;
if (proto == IPPROTO_UDP) {
/* UDP validates ulen. */
/* Check for jumbo payload */
if (ulen == 0)
ulen = skb->len;
if (ulen < sizeof(*uh))
goto short_packet;
if (ulen < skb->len) {
if (pskb_trim_rcsum(skb, ulen))
goto short_packet;
saddr = &ipv6_hdr(skb)->saddr;
daddr = &ipv6_hdr(skb)->daddr;
uh = udp_hdr(skb);
}
}
if (udp6_csum_init(skb, uh, proto))
goto csum_error;
/*
* Multicast receive code
*/
if (ipv6_addr_is_multicast(daddr))
return __udp6_lib_mcast_deliver(net, skb,
saddr, daddr, udptable, proto);
/* Unicast */
/*
* check socket cache ... must talk to Alan about his plans
* for sock caches... i'll skip this for now.
*/
sk = __udp6_lib_lookup_skb(skb, uh->source, uh->dest, udptable);
if (sk) {
int ret;
if (!uh->check && !udp_sk(sk)->no_check6_rx) {
sock_put(sk);
udp6_csum_zero_error(skb);
goto csum_error;
}
if (inet_get_convert_csum(sk) && uh->check && !IS_UDPLITE(sk))
skb_checksum_try_convert(skb, IPPROTO_UDP, uh->check,
ip6_compute_pseudo);
ret = udpv6_queue_rcv_skb(sk, skb);
sock_put(sk);
/* a return value > 0 means to resubmit the input, but
* it wants the return to be -protocol, or 0
*/
if (ret > 0)
return -ret;
return 0;
}
if (!uh->check) {
udp6_csum_zero_error(skb);
goto csum_error;
}
if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
goto discard;
if (udp_lib_checksum_complete(skb))
goto csum_error;
UDP6_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0);
kfree_skb(skb);
return 0;
short_packet:
net_dbg_ratelimited("UDP%sv6: short packet: From [%pI6c]:%u %d/%d to [%pI6c]:%u\n",
proto == IPPROTO_UDPLITE ? "-Lite" : "",
saddr, ntohs(uh->source),
ulen, skb->len,
daddr, ntohs(uh->dest));
goto discard;
csum_error:
UDP6_INC_STATS_BH(net, UDP_MIB_CSUMERRORS, proto == IPPROTO_UDPLITE);
discard:
UDP6_INC_STATS_BH(net, UDP_MIB_INERRORS, proto == IPPROTO_UDPLITE);
kfree_skb(skb);
return 0;
}
| 660 |
72,340 | 0 | cleanup_handler(int sig)
{
cleanup_socket();
#ifdef ENABLE_PKCS11
pkcs11_terminate();
#endif
_exit(2);
}
| 661 |
77,711 | 0 | ofputil_uninit_group_desc(struct ofputil_group_desc *gd)
{
ofputil_bucket_list_destroy(&gd->buckets);
ofputil_group_properties_destroy(&gd->props);
}
| 662 |
36,160 | 0 | static void mk_request_premature_close(int http_status, struct client_session *cs)
{
struct session_request *sr;
struct mk_list *sr_list = &cs->request_list;
struct mk_list *host_list = &config->hosts;
/*
* If the connection is too premature, we need to allocate a temporal session_request
* to do not break the plugins stages
*/
if (mk_list_is_empty(sr_list) == 0) {
sr = &cs->sr_fixed;
memset(sr, 0, sizeof(struct session_request));
mk_request_init(sr);
mk_list_add(&sr->_head, &cs->request_list);
}
else {
sr = mk_list_entry_first(sr_list, struct session_request, _head);
}
/* Raise error */
if (http_status > 0) {
if (!sr->host_conf) {
sr->host_conf = mk_list_entry_first(host_list, struct host, _head);
}
mk_request_error(http_status, cs, sr);
/* STAGE_40, request has ended */
mk_plugin_stage_run(MK_PLUGIN_STAGE_40, cs->socket,
NULL, cs, sr);
}
/* STAGE_50, connection closed and remove client_session*/
mk_plugin_stage_run(MK_PLUGIN_STAGE_50, cs->socket, NULL, NULL, NULL);
mk_session_remove(cs->socket);
}
| 663 |
69,875 | 0 | set_streams_blocked_on_circ(circuit_t *circ, channel_t *chan,
int block, streamid_t stream_id)
{
edge_connection_t *edge = NULL;
int n = 0;
if (circ->n_chan == chan) {
circ->streams_blocked_on_n_chan = block;
if (CIRCUIT_IS_ORIGIN(circ))
edge = TO_ORIGIN_CIRCUIT(circ)->p_streams;
} else {
circ->streams_blocked_on_p_chan = block;
tor_assert(!CIRCUIT_IS_ORIGIN(circ));
edge = TO_OR_CIRCUIT(circ)->n_streams;
}
for (; edge; edge = edge->next_stream) {
connection_t *conn = TO_CONN(edge);
if (stream_id && edge->stream_id != stream_id)
continue;
if (edge->edge_blocked_on_circ != block) {
++n;
edge->edge_blocked_on_circ = block;
}
if (!conn->read_event && !HAS_BUFFEREVENT(conn)) {
/* This connection is a placeholder for something; probably a DNS
* request. It can't actually stop or start reading.*/
continue;
}
if (block) {
if (connection_is_reading(conn))
connection_stop_reading(conn);
} else {
/* Is this right? */
if (!connection_is_reading(conn))
connection_start_reading(conn);
}
}
return n;
}
| 664 |
3,347 | 0 | static void fdctrl_write_ccr(FDCtrl *fdctrl, uint32_t value)
{
/* Reset mode */
if (!(fdctrl->dor & FD_DOR_nRESET)) {
FLOPPY_DPRINTF("Floppy controller in RESET state !\n");
return;
}
FLOPPY_DPRINTF("configuration control register set to 0x%02x\n", value);
/* Only the rate selection bits used in AT mode, and we
* store those in the DSR.
*/
fdctrl->dsr = (fdctrl->dsr & ~FD_DSR_DRATEMASK) |
(value & FD_DSR_DRATEMASK);
}
| 665 |
113,520 | 0 | void WebPage::touchPointAsMouseEvent(const Platform::TouchPoint& point, unsigned modifiers)
{
if (d->m_page->defersLoading())
return;
if (d->m_fullScreenPluginView.get())
return;
d->m_lastUserEventTimestamp = currentTime();
d->m_touchEventHandler->handleTouchPoint(point, modifiers);
}
| 666 |
77,243 | 0 | handle_meter_features_request(struct ofconn *ofconn,
const struct ofp_header *request)
{
struct ofproto *ofproto = ofconn_get_ofproto(ofconn);
struct ofputil_meter_features features;
struct ofpbuf *b;
if (ofproto->ofproto_class->meter_get_features) {
ofproto->ofproto_class->meter_get_features(ofproto, &features);
} else {
memset(&features, 0, sizeof features);
}
b = ofputil_encode_meter_features_reply(&features, request);
ofconn_send_reply(ofconn, b);
return 0;
}
| 667 |
175,596 | 0 | status_t NuMediaExtractor::readSampleData(const sp<ABuffer> &buffer) {
Mutex::Autolock autoLock(mLock);
ssize_t minIndex = fetchTrackSamples();
if (minIndex < 0) {
return ERROR_END_OF_STREAM;
}
TrackInfo *info = &mSelectedTracks.editItemAt(minIndex);
size_t sampleSize = info->mSample->range_length();
if (info->mTrackFlags & kIsVorbis) {
sampleSize += sizeof(int32_t);
}
if (buffer->capacity() < sampleSize) {
return -ENOMEM;
}
const uint8_t *src =
(const uint8_t *)info->mSample->data()
+ info->mSample->range_offset();
memcpy((uint8_t *)buffer->data(), src, info->mSample->range_length());
status_t err = OK;
if (info->mTrackFlags & kIsVorbis) {
err = appendVorbisNumPageSamples(info, buffer);
}
if (err == OK) {
buffer->setRange(0, sampleSize);
}
return err;
}
| 668 |
115,770 | 0 | void SafeBrowsingBlockingPage::RecordUserAction(BlockingPageEvent event) {
DictionaryValue strings;
PopulateMultipleThreatStringDictionary(&strings);
string16 title;
bool success = strings.GetString("title", &title);
DCHECK(success);
std::string action = "SBInterstitial";
if (title ==
l10n_util::GetStringUTF16(IDS_SAFE_BROWSING_MULTI_THREAT_TITLE)) {
action.append("Multiple");
} else if (title ==
l10n_util::GetStringUTF16(IDS_SAFE_BROWSING_MALWARE_TITLE)) {
action.append("Malware");
} else {
DCHECK_EQ(title,
l10n_util::GetStringUTF16(IDS_SAFE_BROWSING_PHISHING_TITLE));
action.append("Phishing");
}
switch (event) {
case SHOW:
action.append("Show");
break;
case PROCEED:
action.append("Proceed");
break;
case DONT_PROCEED:
action.append("DontProceed");
break;
default:
NOTREACHED() << "Unexpected event: " << event;
}
content::RecordComputedAction(action);
}
| 669 |
164,924 | 0 | ResourceDispatcherHostImpl::CreateResourceHandlerForDownload(
net::URLRequest* request,
bool is_content_initiated,
bool must_download,
bool is_new_request) {
DCHECK(!create_download_handler_intercept_.is_null());
std::unique_ptr<ResourceHandler> handler =
create_download_handler_intercept_.Run(request);
handler =
HandleDownloadStarted(request, std::move(handler), is_content_initiated,
must_download, is_new_request);
return handler;
}
| 670 |
129,410 | 0 | error::Error GLES2DecoderImpl::HandleGetActiveUniform(
uint32 immediate_data_size, const cmds::GetActiveUniform& c) {
GLuint program_id = c.program;
GLuint index = c.index;
uint32 name_bucket_id = c.name_bucket_id;
typedef cmds::GetActiveUniform::Result Result;
Result* result = GetSharedMemoryAs<Result*>(
c.result_shm_id, c.result_shm_offset, sizeof(*result));
if (!result) {
return error::kOutOfBounds;
}
if (result->success != 0) {
return error::kInvalidArguments;
}
Program* program = GetProgramInfoNotShader(
program_id, "glGetActiveUniform");
if (!program) {
return error::kNoError;
}
const Program::UniformInfo* uniform_info =
program->GetUniformInfo(index);
if (!uniform_info) {
LOCAL_SET_GL_ERROR(
GL_INVALID_VALUE, "glGetActiveUniform", "index out of range");
return error::kNoError;
}
result->success = 1; // true.
result->size = uniform_info->size;
result->type = uniform_info->type;
Bucket* bucket = CreateBucket(name_bucket_id);
bucket->SetFromString(uniform_info->name.c_str());
return error::kNoError;
}
| 671 |
84,385 | 0 | flatpak_proxy_init (FlatpakProxy *proxy)
{
proxy->policy = g_hash_table_new_full (g_str_hash, g_str_equal, g_free, NULL);
proxy->filters = g_hash_table_new_full (g_str_hash, g_str_equal, g_free, (GDestroyNotify)filter_list_free);
proxy->wildcard_policy = g_hash_table_new_full (g_str_hash, g_str_equal, g_free, NULL);
flatpak_proxy_add_policy (proxy, "org.freedesktop.DBus", FLATPAK_POLICY_TALK);
}
| 672 |
9,651 | 0 | static int php_check_dots(const char *element, int n)
{
while (n-- > 0) if (element[n] != '.') break;
return (n != -1);
}
| 673 |
57,728 | 0 | int kvm_get_dr(struct kvm_vcpu *vcpu, int dr, unsigned long *val)
{
switch (dr) {
case 0 ... 3:
*val = vcpu->arch.db[dr];
break;
case 4:
/* fall through */
case 6:
if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)
*val = vcpu->arch.dr6;
else
*val = kvm_x86_ops->get_dr6(vcpu);
break;
case 5:
/* fall through */
default: /* 7 */
*val = vcpu->arch.dr7;
break;
}
return 0;
}
| 674 |
77,709 | 0 | ofputil_switch_features_has_ports(struct ofpbuf *b)
{
struct ofp_header *oh = b->data;
size_t phy_port_size;
if (oh->version >= OFP13_VERSION) {
/* OpenFlow 1.3+ never has ports in the feature reply. */
return false;
}
phy_port_size = (oh->version == OFP10_VERSION
? sizeof(struct ofp10_phy_port)
: sizeof(struct ofp11_port));
if (ntohs(oh->length) + phy_port_size <= UINT16_MAX) {
/* There's room for additional ports in the feature reply.
* Assume that the list is complete. */
return true;
}
/* The feature reply has no room for more ports. Probably the list is
* truncated. Drop the ports and tell the caller to retrieve them with
* OFPST_PORT_DESC. */
b->size = sizeof *oh + sizeof(struct ofp_switch_features);
ofpmsg_update_length(b);
return false;
}
| 675 |
134,844 | 0 | void SetAllowableError(int amount) { allowable_error_ = amount; }
| 676 |
12,683 | 0 | void DTLS_RECORD_LAYER_free(RECORD_LAYER *rl)
{
DTLS_RECORD_LAYER_clear(rl);
pqueue_free(rl->d->unprocessed_rcds.q);
pqueue_free(rl->d->processed_rcds.q);
pqueue_free(rl->d->buffered_app_data.q);
OPENSSL_free(rl->d);
rl->d = NULL;
}
| 677 |
135,312 | 0 | void Document::notifyLayoutTreeOfSubtreeChanges()
{
if (!layoutView()->wasNotifiedOfSubtreeChange())
return;
m_lifecycle.advanceTo(DocumentLifecycle::InLayoutSubtreeChange);
layoutView()->handleSubtreeModifications();
ASSERT(!layoutView()->wasNotifiedOfSubtreeChange());
m_lifecycle.advanceTo(DocumentLifecycle::LayoutSubtreeChangeClean);
}
| 678 |
50,993 | 0 | static int do_proc_dointvec_ms_jiffies_conv(bool *negp, unsigned long *lvalp,
int *valp,
int write, void *data)
{
if (write) {
unsigned long jif = msecs_to_jiffies(*negp ? -*lvalp : *lvalp);
if (jif > INT_MAX)
return 1;
*valp = (int)jif;
} else {
int val = *valp;
unsigned long lval;
if (val < 0) {
*negp = true;
lval = -(unsigned long)val;
} else {
*negp = false;
lval = (unsigned long)val;
}
*lvalp = jiffies_to_msecs(lval);
}
return 0;
}
| 679 |
154,027 | 0 | void GLES2DecoderImpl::DoGetVertexAttribiv(GLuint index,
GLenum pname,
GLint* params,
GLsizei params_size) {
DoGetVertexAttribImpl<GLint>(index, pname, params);
}
| 680 |
171,602 | 0 | size_t strnlen32(const char32_t *s, size_t maxlen)
{
const char32_t *ss = s;
while ((maxlen > 0) && *ss) {
ss++;
maxlen--;
}
return ss-s;
}
| 681 |
32,227 | 0 | u16 skb_tx_hash(const struct net_device *dev, const struct sk_buff *skb)
{
u32 hash;
if (skb_rx_queue_recorded(skb)) {
hash = skb_get_rx_queue(skb);
while (unlikely(hash >= dev->real_num_tx_queues))
hash -= dev->real_num_tx_queues;
return hash;
}
if (skb->sk && skb->sk->sk_hash)
hash = skb->sk->sk_hash;
else
hash = skb->protocol;
hash = jhash_1word(hash, skb_tx_hashrnd);
return (u16) (((u64) hash * dev->real_num_tx_queues) >> 32);
}
| 682 |
63,126 | 0 | void Huff_setBloc(int _bloc)
{
bloc = _bloc;
}
| 683 |
160,358 | 0 | void NEVER_INLINE FreeList::checkFreedMemoryIsZapped(Address address,
size_t size) {
for (size_t i = 0; i < size; i++) {
ASSERT(address[i] == reuseAllowedZapValue ||
address[i] == reuseForbiddenZapValue);
}
}
| 684 |
119,161 | 0 | void XMLHttpRequest::clearResponseBuffers()
{
m_responseText.clear();
m_responseEncoding = String();
m_createdDocument = false;
m_responseDocument = 0;
m_responseBlob = 0;
m_responseStream = 0;
m_binaryResponseBuilder.clear();
m_responseArrayBuffer.clear();
}
| 685 |
15,756 | 0 | static uint64_t ns_to_ticks(uint64_t value)
{
return (muldiv64(value, FS_PER_NS, HPET_CLK_PERIOD));
}
| 686 |
117,547 | 0 | void BrightnessObserver::BrightnessChanged(int level, bool user_initiated) {
if (user_initiated)
BrightnessBubble::GetInstance()->ShowBubble(level, true);
else
BrightnessBubble::GetInstance()->UpdateWithoutShowingBubble(level, true);
VolumeBubble::GetInstance()->HideBubble();
}
| 687 |
8,009 | 0 | static void vnc_copy(VncState *vs, int src_x, int src_y, int dst_x, int dst_y, int w, int h)
{
/* send bitblit op to the vnc client */
vnc_lock_output(vs);
vnc_write_u8(vs, VNC_MSG_SERVER_FRAMEBUFFER_UPDATE);
vnc_write_u8(vs, 0);
vnc_write_u16(vs, 1); /* number of rects */
vnc_framebuffer_update(vs, dst_x, dst_y, w, h, VNC_ENCODING_COPYRECT);
vnc_write_u16(vs, src_x);
vnc_write_u16(vs, src_y);
vnc_unlock_output(vs);
vnc_flush(vs);
}
| 688 |
161,984 | 0 | void Create(service_manager::mojom::ServiceFactoryRequest request) {
service_factory_bindings_.AddBinding(this, std::move(request));
}
| 689 |
1,852 | 0 | void reds_client_disconnect(RedClient *client)
{
RedsMigTargetClient *mig_client;
if (exit_on_disconnect)
{
spice_info("Exiting server because of client disconnect.\n");
exit(0);
}
if (!client || client->disconnecting) {
spice_debug("client %p already during disconnection", client);
return;
}
spice_info(NULL);
/* disconnecting is set to prevent recursion because of the following:
* main_channel_client_on_disconnect->
* reds_client_disconnect->red_client_destroy->main_channel...
*/
client->disconnecting = TRUE;
mig_client = reds_mig_target_client_find(client);
if (mig_client) {
reds_mig_target_client_free(mig_client);
}
if (reds->mig_wait_disconnect) {
reds_mig_remove_wait_disconnect_client(client);
}
if (reds->agent_state.base) {
/* note that vdagent might be NULL, if the vdagent was once
* up and than was removed */
if (spice_char_device_client_exists(reds->agent_state.base, client)) {
spice_char_device_client_remove(reds->agent_state.base, client);
}
}
ring_remove(&client->link);
reds->num_clients--;
red_client_destroy(client);
if (reds->num_clients == 0) {
/* Let the agent know the client is disconnected */
if (reds->agent_state.base) {
SpiceCharDeviceWriteBuffer *char_dev_buf;
VDInternalBuf *internal_buf;
uint32_t total_msg_size;
total_msg_size = sizeof(VDIChunkHeader) + sizeof(VDAgentMessage);
char_dev_buf = spice_char_device_write_buffer_get_server_no_token(
reds->agent_state.base, total_msg_size);
char_dev_buf->buf_used = total_msg_size;
internal_buf = (VDInternalBuf *)char_dev_buf->buf;
internal_buf->chunk_header.port = VDP_SERVER_PORT;
internal_buf->chunk_header.size = sizeof(VDAgentMessage);
internal_buf->header.protocol = VD_AGENT_PROTOCOL;
internal_buf->header.type = VD_AGENT_CLIENT_DISCONNECTED;
internal_buf->header.opaque = 0;
internal_buf->header.size = 0;
spice_char_device_write_buffer_add(reds->agent_state.base,
char_dev_buf);
}
/* Reset write filter to start with clean state on client reconnect */
agent_msg_filter_init(&reds->agent_state.write_filter, agent_copypaste,
agent_file_xfer, TRUE);
/* Throw away pending chunks from the current (if any) and future
* messages read from the agent */
reds->agent_state.read_filter.result = AGENT_MSG_FILTER_DISCARD;
reds->agent_state.read_filter.discard_all = TRUE;
free(reds->agent_state.mig_data);
reds->agent_state.mig_data = NULL;
reds_mig_cleanup();
}
}
| 690 |
180,240 | 1 | SPL_METHOD(SplFileObject, seek)
{
spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC);
long line_pos;
if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "l", &line_pos) == FAILURE) {
return;
}
if (line_pos < 0) {
zend_throw_exception_ex(spl_ce_LogicException, 0 TSRMLS_CC, "Can't seek file %s to negative line %ld", intern->file_name, line_pos);
RETURN_FALSE;
}
spl_filesystem_file_rewind(getThis(), intern TSRMLS_CC);
while(intern->u.file.current_line_num < line_pos) {
if (spl_filesystem_file_read_line(getThis(), intern, 1 TSRMLS_CC) == FAILURE) {
break;
}
}
} /* }}} */
/* {{{ Function/Class/Method definitions */
| 691 |
21,193 | 0 | int __pte_alloc_kernel(pmd_t *pmd, unsigned long address)
{
pte_t *new = pte_alloc_one_kernel(&init_mm, address);
if (!new)
return -ENOMEM;
smp_wmb(); /* See comment in __pte_alloc */
spin_lock(&init_mm.page_table_lock);
if (likely(pmd_none(*pmd))) { /* Has another populated it ? */
pmd_populate_kernel(&init_mm, pmd, new);
new = NULL;
} else
VM_BUG_ON(pmd_trans_splitting(*pmd));
spin_unlock(&init_mm.page_table_lock);
if (new)
pte_free_kernel(&init_mm, new);
return 0;
}
| 692 |
101,086 | 0 | void DeleteOriginData(const GURL& origin,
StorageType type) {
quota_status_ = kQuotaStatusUnknown;
quota_manager_->DeleteOriginData(origin, type,
callback_factory_.NewCallback(
&QuotaManagerTest::StatusCallback));
}
| 693 |
163,183 | 0 | void TestPDFNavigationFromFrame(
const std::string& javascript,
ExpectedNavigationStatus expected_navigation_status) {
RenderFrameHost* child =
ChildFrameAt(shell()->web_contents()->GetMainFrame(), 0);
ASSERT_TRUE(child);
if (AreAllSitesIsolatedForTesting()) {
ASSERT_TRUE(child->IsCrossProcessSubframe());
}
ExecuteScriptAndCheckPDFNavigation(child, javascript,
expected_navigation_status);
}
| 694 |
172,067 | 0 | static void *rfcomm_cback(tBTA_JV_EVT event, tBTA_JV *p_data, void *user_data) {
void *new_user_data = NULL;
switch (event) {
case BTA_JV_RFCOMM_START_EVT:
on_srv_rfc_listen_started(&p_data->rfc_start, (uintptr_t)user_data);
break;
case BTA_JV_RFCOMM_CL_INIT_EVT:
on_cl_rfc_init(&p_data->rfc_cl_init, (uintptr_t)user_data);
break;
case BTA_JV_RFCOMM_OPEN_EVT:
BTA_JvSetPmProfile(p_data->rfc_open.handle,BTA_JV_PM_ID_1,BTA_JV_CONN_OPEN);
on_cli_rfc_connect(&p_data->rfc_open, (uintptr_t)user_data);
break;
case BTA_JV_RFCOMM_SRV_OPEN_EVT:
BTA_JvSetPmProfile(p_data->rfc_srv_open.handle,BTA_JV_PM_ALL,BTA_JV_CONN_OPEN);
new_user_data = (void *)(uintptr_t)on_srv_rfc_connect(&p_data->rfc_srv_open, (uintptr_t)user_data);
break;
case BTA_JV_RFCOMM_CLOSE_EVT:
APPL_TRACE_DEBUG("BTA_JV_RFCOMM_CLOSE_EVT: user_data:%d", (uintptr_t)user_data);
on_rfc_close(&p_data->rfc_close, (uintptr_t)user_data);
break;
case BTA_JV_RFCOMM_WRITE_EVT:
on_rfc_write_done(&p_data->rfc_write, (uintptr_t)user_data);
break;
case BTA_JV_RFCOMM_CONG_EVT:
on_rfc_outgoing_congest(&p_data->rfc_cong, (uintptr_t)user_data);
break;
case BTA_JV_RFCOMM_READ_EVT:
case BTA_JV_RFCOMM_DATA_IND_EVT:
break;
default:
LOG_ERROR("%s unhandled event %d, slot id: %zi", __func__, event, (uintptr_t)user_data);
break;
}
return new_user_data;
}
| 695 |
150,696 | 0 | int PageInfoUI::GetConnectionIconID(PageInfo::SiteConnectionStatus status) {
int resource_id = IDR_PAGEINFO_INFO;
switch (status) {
case PageInfo::SITE_CONNECTION_STATUS_UNKNOWN:
case PageInfo::SITE_CONNECTION_STATUS_INTERNAL_PAGE:
break;
case PageInfo::SITE_CONNECTION_STATUS_ENCRYPTED:
resource_id = IDR_PAGEINFO_GOOD;
break;
case PageInfo::SITE_CONNECTION_STATUS_INSECURE_PASSIVE_SUBRESOURCE:
case PageInfo::SITE_CONNECTION_STATUS_INSECURE_FORM_ACTION:
resource_id = IDR_PAGEINFO_WARNING_MINOR;
break;
case PageInfo::SITE_CONNECTION_STATUS_UNENCRYPTED:
resource_id = IDR_PAGEINFO_WARNING_MAJOR;
break;
case PageInfo::SITE_CONNECTION_STATUS_INSECURE_ACTIVE_SUBRESOURCE:
case PageInfo::SITE_CONNECTION_STATUS_ENCRYPTED_ERROR:
resource_id = IDR_PAGEINFO_BAD;
break;
}
return resource_id;
}
| 696 |
122,899 | 0 | void RenderProcessHostImpl::ReceivedBadMessage() {
if (run_renderer_in_process()) {
CHECK(false);
}
NOTREACHED();
base::KillProcess(GetHandle(), RESULT_CODE_KILLED_BAD_MESSAGE,
false);
}
| 697 |
183,072 | 1 | static int hci_sock_getname(struct socket *sock, struct sockaddr *addr,
int *addr_len, int peer)
{
struct sockaddr_hci *haddr = (struct sockaddr_hci *) addr;
struct sock *sk = sock->sk;
struct hci_dev *hdev = hci_pi(sk)->hdev;
BT_DBG("sock %p sk %p", sock, sk);
if (!hdev)
return -EBADFD;
lock_sock(sk);
*addr_len = sizeof(*haddr);
haddr->hci_family = AF_BLUETOOTH;
haddr->hci_dev = hdev->id;
release_sock(sk);
return 0;
}
| 698 |
53,015 | 0 | static void LogPrimitiveInfo(const PrimitiveInfo *primitive_info)
{
const char
*methods[] =
{
"point",
"replace",
"floodfill",
"filltoborder",
"reset",
"?"
};
PointInfo
p,
q,
point;
register ssize_t
i,
x;
ssize_t
coordinates,
y;
x=(ssize_t) ceil(primitive_info->point.x-0.5);
y=(ssize_t) ceil(primitive_info->point.y-0.5);
switch (primitive_info->primitive)
{
case AlphaPrimitive:
{
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
"AlphaPrimitive %.20g,%.20g %s",(double) x,(double) y,
methods[primitive_info->method]);
return;
}
case ColorPrimitive:
{
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
"ColorPrimitive %.20g,%.20g %s",(double) x,(double) y,
methods[primitive_info->method]);
return;
}
case ImagePrimitive:
{
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
"ImagePrimitive %.20g,%.20g",(double) x,(double) y);
return;
}
case PointPrimitive:
{
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
"PointPrimitive %.20g,%.20g %s",(double) x,(double) y,
methods[primitive_info->method]);
return;
}
case TextPrimitive:
{
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
"TextPrimitive %.20g,%.20g",(double) x,(double) y);
return;
}
default:
break;
}
coordinates=0;
p=primitive_info[0].point;
q.x=(-1.0);
q.y=(-1.0);
for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++)
{
point=primitive_info[i].point;
if (coordinates <= 0)
{
coordinates=(ssize_t) primitive_info[i].coordinates;
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" begin open (%.20g)",(double) coordinates);
p=point;
}
point=primitive_info[i].point;
if ((fabs(q.x-point.x) >= MagickEpsilon) ||
(fabs(q.y-point.y) >= MagickEpsilon))
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" %.20g: %.18g,%.18g",(double) coordinates,point.x,point.y);
else
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" %.20g: %g %g (duplicate)",(double) coordinates,point.x,point.y);
q=point;
coordinates--;
if (coordinates > 0)
continue;
if ((fabs(p.x-point.x) >= MagickEpsilon) ||
(fabs(p.y-point.y) >= MagickEpsilon))
(void) LogMagickEvent(DrawEvent,GetMagickModule()," end last (%.20g)",
(double) coordinates);
else
(void) LogMagickEvent(DrawEvent,GetMagickModule()," end open (%.20g)",
(double) coordinates);
}
}
| 699 |
Subsets and Splits