unique_id
int64 13
189k
| target
int64 0
1
| code
stringlengths 20
241k
| __index_level_0__
int64 0
18.9k
|
---|---|---|---|
11,442 | 0 | fbFetch_a2r2g2b2 (const FbBits *bits, int x, int width, CARD32 *buffer, miIndexedPtr indexed)
{
const CARD8 *pixel = (const CARD8 *)bits + x;
const CARD8 *end = pixel + width;
while (pixel < end) {
CARD32 p = READ(pixel++);
CARD32 a,r,g,b;
a = ((p & 0xc0) * 0x55) << 18;
r = ((p & 0x30) * 0x55) << 12;
g = ((p & 0x0c) * 0x55) << 6;
b = ((p & 0x03) * 0x55);
WRITE(buffer++, a|r|g|b);
}
}
| 16,800 |
22,110 | 0 | void exit_fs(struct task_struct *tsk)
{
struct fs_struct * fs = tsk->fs;
if (fs) {
task_lock(tsk);
tsk->fs = NULL;
task_unlock(tsk);
put_fs_struct(fs);
}
}
| 16,801 |
7,369 | 0 | add_mlist(struct mlist *mlp, struct magic_map *map, size_t idx)
{
struct mlist *ml;
if ((ml = CAST(struct mlist *, emalloc(sizeof(*ml)))) == NULL)
return -1;
ml->map = idx == 0 ? map : NULL;
ml->magic = map->magic[idx];
ml->nmagic = map->nmagic[idx];
mlp->prev->next = ml;
ml->prev = mlp->prev;
ml->next = mlp;
mlp->prev = ml;
return 0;
}
| 16,802 |
97,811 | 0 | PageClickTracker::PageClickTracker(RenderView* render_view)
: render_view_(render_view),
was_focused_(false) {
}
| 16,803 |
49,351 | 0 | static int fwnet_stop(struct net_device *net)
{
struct fwnet_device *dev = netdev_priv(net);
netif_stop_queue(net);
fwnet_broadcast_stop(dev);
return 0;
}
| 16,804 |
119,744 | 0 | bool NavigationControllerImpl::CanGoBack() const {
return entries_.size() > 1 && GetCurrentEntryIndex() > 0;
}
| 16,805 |
79,136 | 0 | static ssize_t ucma_join_ip_multicast(struct ucma_file *file,
const char __user *inbuf,
int in_len, int out_len)
{
struct rdma_ucm_join_ip_mcast cmd;
struct rdma_ucm_join_mcast join_cmd;
if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
return -EFAULT;
join_cmd.response = cmd.response;
join_cmd.uid = cmd.uid;
join_cmd.id = cmd.id;
join_cmd.addr_size = rdma_addr_size_in6(&cmd.addr);
if (!join_cmd.addr_size)
return -EINVAL;
join_cmd.join_flags = RDMA_MC_JOIN_FLAG_FULLMEMBER;
memcpy(&join_cmd.addr, &cmd.addr, join_cmd.addr_size);
return ucma_process_join(file, &join_cmd, out_len);
}
| 16,806 |
114,248 | 0 | void GpuChannelHost::MessageFilter::AddRoute(
int route_id,
base::WeakPtr<IPC::Channel::Listener> listener,
scoped_refptr<MessageLoopProxy> loop) {
DCHECK(parent_->factory_->IsIOThread());
DCHECK(listeners_.find(route_id) == listeners_.end());
GpuListenerInfo info;
info.listener = listener;
info.loop = loop;
listeners_[route_id] = info;
}
| 16,807 |
36,355 | 0 | static inline int nested_symlink(struct path *path, struct nameidata *nd)
{
int res;
if (unlikely(current->link_count >= MAX_NESTED_LINKS)) {
path_put_conditional(path, nd);
path_put(&nd->path);
return -ELOOP;
}
BUG_ON(nd->depth >= MAX_NESTED_LINKS);
nd->depth++;
current->link_count++;
do {
struct path link = *path;
void *cookie;
res = follow_link(&link, nd, &cookie);
if (res)
break;
res = walk_component(nd, path, LOOKUP_FOLLOW);
put_link(nd, &link, cookie);
} while (res > 0);
current->link_count--;
nd->depth--;
return res;
}
| 16,808 |
90,915 | 0 | std::string GenerateUserAgent()
{
srand((unsigned int)time(NULL));
int cversion = rand() % 0xFFFF;
int mversion = rand() % 3;
int sversion = rand() % 3;
std::stringstream sstr;
sstr << "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/" << (601 + sversion) << "." << (36+mversion) << " (KHTML, like Gecko) Chrome/" << (53 + mversion) << ".0." << cversion << ".0 Safari/" << (601 + sversion) << "." << (36+sversion);
return sstr.str();
}
| 16,809 |
90,928 | 0 | int SetThreadName(const std::thread::native_handle_type &thread, const char* threadName) {
DWORD dwThreadID = ::GetThreadId( static_cast<HANDLE>( thread ) );
THREADNAME_INFO info;
info.dwType = 0x1000;
info.szName = threadName;
info.dwThreadID = dwThreadID;
info.dwFlags = 0;
#pragma warning(push)
#pragma warning(disable: 6320 6322)
__try{
RaiseException(MS_VC_EXCEPTION, 0, sizeof(info) / sizeof(ULONG_PTR), (ULONG_PTR*)&info);
}
__except (EXCEPTION_EXECUTE_HANDLER){
}
#pragma warning(pop)
return 0;
}
| 16,810 |
80,861 | 0 | GF_Err styl_dump(GF_Box *a, FILE * trace)
{
u32 i;
GF_TextStyleBox*p = (GF_TextStyleBox*)a;
gf_isom_box_dump_start(a, "TextStyleBox", trace);
fprintf(trace, ">\n");
for (i=0; i<p->entry_count; i++) tx3g_dump_style(trace, &p->styles[i]);
if (!p->size) {
fprintf(trace, "<StyleRecord startChar=\"\" endChar=\"\" fontID=\"\" styles=\"Normal|Bold|Italic|Underlined\" fontSize=\"\" textColor=\"\" />\n");
}
gf_isom_box_dump_done("TextStyleBox", a, trace);
return GF_OK;
}
| 16,811 |
130,889 | 0 | static void methodWithUnsignedLongSequenceMethod(const v8::FunctionCallbackInfo<v8::Value>& info)
{
if (UNLIKELY(info.Length() < 1)) {
throwTypeError(ExceptionMessages::failedToExecute("methodWithUnsignedLongSequence", "TestObject", ExceptionMessages::notEnoughArguments(1, info.Length())), info.GetIsolate());
return;
}
TestObject* imp = V8TestObject::toNative(info.Holder());
V8TRYCATCH_VOID(Vector<unsigned>, unsignedLongSequence, toNativeArray<unsigned>(info[0], 1, info.GetIsolate()));
imp->methodWithUnsignedLongSequence(unsignedLongSequence);
}
| 16,812 |
97,529 | 0 | FrameLoader::~FrameLoader()
{
setOpener(0);
HashSet<Frame*>::iterator end = m_openedFrames.end();
for (HashSet<Frame*>::iterator it = m_openedFrames.begin(); it != end; ++it)
(*it)->loader()->m_opener = 0;
m_client->frameLoaderDestroyed();
}
| 16,813 |
103,194 | 0 | void Browser::ShowBrokenPageTab(TabContents* contents) {
UserMetrics::RecordAction(UserMetricsAction("ReportBug"), profile_);
string16 page_title = contents->GetTitle();
NavigationEntry* entry = contents->controller().GetActiveEntry();
if (!entry)
return;
std::string page_url = entry->url().spec();
std::vector<std::string> subst;
subst.push_back(UTF16ToASCII(page_title));
subst.push_back(page_url);
std::string report_page_url =
ReplaceStringPlaceholders(kBrokenPageUrl, subst, NULL);
ShowSingletonTab(GURL(report_page_url));
}
| 16,814 |
64,470 | 0 | R_API void r_config_bump(RConfig *cfg, const char *key) {
char *orig = strdup (r_config_get (cfg, key));
r_config_set (cfg, key, orig);
free (orig);
}
| 16,815 |
77,976 | 0 | static void InitPSDInfo(const Image *image, PSDInfo *info)
{
info->version=1;
info->columns=image->columns;
info->rows=image->rows;
/* Setting the mode to a value that won't change the colorspace */
info->mode=10;
info->channels=1U;
if (image->storage_class == PseudoClass)
info->mode=2; // indexed mode
else
{
info->channels=(unsigned short) image->number_channels;
info->min_channels=info->channels;
if (image->alpha_trait == BlendPixelTrait)
info->min_channels--;
}
}
| 16,816 |
12,481 | 0 | static void cleanup1_doall(ADDED_OBJ *a)
{
a->obj->nid=0;
a->obj->flags|=ASN1_OBJECT_FLAG_DYNAMIC|
ASN1_OBJECT_FLAG_DYNAMIC_STRINGS|
ASN1_OBJECT_FLAG_DYNAMIC_DATA;
}
| 16,817 |
68,758 | 0 | static int klsi_105_prepare_write_buffer(struct usb_serial_port *port,
void *dest, size_t size)
{
unsigned char *buf = dest;
int count;
count = kfifo_out_locked(&port->write_fifo, buf + KLSI_HDR_LEN, size,
&port->lock);
put_unaligned_le16(count, buf);
return count + KLSI_HDR_LEN;
}
| 16,818 |
2,084 | 0 | RedChannelClient *red_channel_client_create(int size, RedChannel *channel, RedClient *client,
RedsStream *stream,
int monitor_latency,
int num_common_caps, uint32_t *common_caps,
int num_caps, uint32_t *caps)
{
RedChannelClient *rcc = NULL;
pthread_mutex_lock(&client->lock);
if (!red_channel_client_pre_create_validate(channel, client)) {
goto error;
}
spice_assert(stream && channel && size >= sizeof(RedChannelClient));
rcc = spice_malloc0(size);
rcc->stream = stream;
rcc->channel = channel;
rcc->client = client;
rcc->refs = 1;
rcc->ack_data.messages_window = ~0; // blocks send message (maybe use send_data.blocked +
rcc->ack_data.client_generation = ~0;
rcc->ack_data.client_window = CLIENT_ACK_WINDOW;
rcc->send_data.main.marshaller = spice_marshaller_new();
rcc->send_data.urgent.marshaller = spice_marshaller_new();
rcc->send_data.marshaller = rcc->send_data.main.marshaller;
rcc->incoming.opaque = rcc;
rcc->incoming.cb = &channel->incoming_cb;
rcc->outgoing.opaque = rcc;
rcc->outgoing.cb = &channel->outgoing_cb;
rcc->outgoing.pos = 0;
rcc->outgoing.size = 0;
red_channel_client_set_remote_caps(rcc, num_common_caps, common_caps, num_caps, caps);
if (red_channel_client_test_remote_common_cap(rcc, SPICE_COMMON_CAP_MINI_HEADER)) {
rcc->incoming.header = mini_header_wrapper;
rcc->send_data.header = mini_header_wrapper;
rcc->is_mini_header = TRUE;
} else {
rcc->incoming.header = full_header_wrapper;
rcc->send_data.header = full_header_wrapper;
rcc->is_mini_header = FALSE;
}
rcc->incoming.header.data = rcc->incoming.header_buf;
rcc->incoming.serial = 1;
if (!channel->channel_cbs.config_socket(rcc)) {
goto error;
}
ring_init(&rcc->pipe);
rcc->pipe_size = 0;
stream->watch = channel->core->watch_add(stream->socket,
SPICE_WATCH_EVENT_READ,
red_channel_client_event, rcc);
rcc->id = channel->clients_num;
red_channel_add_client(channel, rcc);
red_client_add_channel(client, rcc);
red_channel_ref(channel);
pthread_mutex_unlock(&client->lock);
if (monitor_latency) {
rcc->latency_monitor.timer = channel->core->timer_add(
red_channel_client_ping_timer, rcc);
if (!client->during_target_migrate) {
red_channel_client_start_ping_timer(rcc, PING_TEST_IDLE_NET_TIMEOUT_MS);
}
rcc->latency_monitor.roundtrip = -1;
}
return rcc;
error:
free(rcc);
reds_stream_free(stream);
pthread_mutex_unlock(&client->lock);
return NULL;
}
| 16,819 |
72,335 | 0 | after_select(fd_set *readset, fd_set *writeset)
{
struct sockaddr_un sunaddr;
socklen_t slen;
char buf[1024];
int len, sock, r;
u_int i, orig_alloc;
uid_t euid;
gid_t egid;
for (i = 0, orig_alloc = sockets_alloc; i < orig_alloc; i++)
switch (sockets[i].type) {
case AUTH_UNUSED:
break;
case AUTH_SOCKET:
if (FD_ISSET(sockets[i].fd, readset)) {
slen = sizeof(sunaddr);
sock = accept(sockets[i].fd,
(struct sockaddr *)&sunaddr, &slen);
if (sock < 0) {
error("accept from AUTH_SOCKET: %s",
strerror(errno));
break;
}
if (getpeereid(sock, &euid, &egid) < 0) {
error("getpeereid %d failed: %s",
sock, strerror(errno));
close(sock);
break;
}
if ((euid != 0) && (getuid() != euid)) {
error("uid mismatch: "
"peer euid %u != uid %u",
(u_int) euid, (u_int) getuid());
close(sock);
break;
}
new_socket(AUTH_CONNECTION, sock);
}
break;
case AUTH_CONNECTION:
if (sshbuf_len(sockets[i].output) > 0 &&
FD_ISSET(sockets[i].fd, writeset)) {
len = write(sockets[i].fd,
sshbuf_ptr(sockets[i].output),
sshbuf_len(sockets[i].output));
if (len == -1 && (errno == EAGAIN ||
errno == EINTR))
continue;
if (len <= 0) {
close_socket(&sockets[i]);
break;
}
if ((r = sshbuf_consume(sockets[i].output,
len)) != 0)
fatal("%s: buffer error: %s",
__func__, ssh_err(r));
}
if (FD_ISSET(sockets[i].fd, readset)) {
len = read(sockets[i].fd, buf, sizeof(buf));
if (len == -1 && (errno == EAGAIN ||
errno == EINTR))
continue;
if (len <= 0) {
close_socket(&sockets[i]);
break;
}
if ((r = sshbuf_put(sockets[i].input,
buf, len)) != 0)
fatal("%s: buffer error: %s",
__func__, ssh_err(r));
explicit_bzero(buf, sizeof(buf));
process_message(&sockets[i]);
}
break;
default:
fatal("Unknown type %d", sockets[i].type);
}
}
| 16,820 |
21,865 | 0 | void drm_crtc_convert_to_umode(struct drm_mode_modeinfo *out,
struct drm_display_mode *in)
{
out->clock = in->clock;
out->hdisplay = in->hdisplay;
out->hsync_start = in->hsync_start;
out->hsync_end = in->hsync_end;
out->htotal = in->htotal;
out->hskew = in->hskew;
out->vdisplay = in->vdisplay;
out->vsync_start = in->vsync_start;
out->vsync_end = in->vsync_end;
out->vtotal = in->vtotal;
out->vscan = in->vscan;
out->vrefresh = in->vrefresh;
out->flags = in->flags;
out->type = in->type;
strncpy(out->name, in->name, DRM_DISPLAY_MODE_LEN);
out->name[DRM_DISPLAY_MODE_LEN-1] = 0;
}
| 16,821 |
150,462 | 0 | WebContents* NavigationControllerImpl::GetWebContents() const {
return delegate_->GetWebContents();
}
| 16,822 |
148,599 | 0 | WebContentsImpl::WebContentsTreeNode::WebContentsTreeNode(
WebContentsImpl* current_web_contents)
: current_web_contents_(current_web_contents),
outer_web_contents_(nullptr),
outer_contents_frame_tree_node_id_(
FrameTreeNode::kFrameTreeNodeInvalidId),
focused_web_contents_(current_web_contents) {}
| 16,823 |
173,345 | 0 | Parcel::Blob::Blob() :
mFd(-1), mData(NULL), mSize(0), mMutable(false) {
}
| 16,824 |
97,834 | 0 | void AutoFillManager::OnInfoBarClosed(bool should_save) {
if (should_save)
personal_data_->SaveImportedCreditCard();
UploadFormData();
}
| 16,825 |
31,267 | 0 | static int async_decrypt(struct ablkcipher_request *req)
{
struct crypto_tfm *tfm = req->base.tfm;
struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;
struct blkcipher_desc desc = {
.tfm = __crypto_blkcipher_cast(tfm),
.info = req->info,
.flags = req->base.flags,
};
return alg->decrypt(&desc, req->dst, req->src, req->nbytes);
}
| 16,826 |
149,254 | 0 | const AtomicString& HTMLFormControlElement::autocapitalize() const {
if (!FastGetAttribute(kAutocapitalizeAttr).IsEmpty())
return HTMLElement::autocapitalize();
if (HTMLFormElement* form = Form())
return form->autocapitalize();
return g_empty_atom;
}
| 16,827 |
73,648 | 0 | MagickExport PixelPacket *QueueAuthenticPixelCacheNexus(Image *image,
const ssize_t x,const ssize_t y,const size_t columns,const size_t rows,
const MagickBooleanType clone,NexusInfo *nexus_info,ExceptionInfo *exception)
{
CacheInfo
*restrict cache_info;
MagickOffsetType
offset;
MagickSizeType
number_pixels;
PixelPacket
*restrict pixels;
RectangleInfo
region;
/*
Validate pixel cache geometry.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) GetImagePixelCache(image,clone,exception);
if (cache_info == (Cache) NULL)
return((PixelPacket *) NULL);
assert(cache_info->signature == MagickSignature);
if ((cache_info->columns == 0) || (cache_info->rows == 0) || (x < 0) ||
(y < 0) || (x >= (ssize_t) cache_info->columns) ||
(y >= (ssize_t) cache_info->rows))
{
(void) ThrowMagickException(exception,GetMagickModule(),CacheError,
"PixelsAreNotAuthentic","`%s'",image->filename);
return((PixelPacket *) NULL);
}
offset=(MagickOffsetType) y*cache_info->columns+x;
if (offset < 0)
return((PixelPacket *) NULL);
number_pixels=(MagickSizeType) cache_info->columns*cache_info->rows;
offset+=(MagickOffsetType) (rows-1)*cache_info->columns+columns-1;
if ((MagickSizeType) offset >= number_pixels)
return((PixelPacket *) NULL);
/*
Return pixel cache.
*/
region.x=x;
region.y=y;
region.width=columns;
region.height=rows;
pixels=SetPixelCacheNexusPixels(cache_info,WriteMode,®ion,
(image->clip_mask != (Image *) NULL) || (image->mask != (Image *) NULL) ?
MagickTrue : MagickFalse,nexus_info,exception);
return(pixels);
}
| 16,828 |
89,238 | 0 | setup_tree(Node* node, regex_t* reg, int state, ScanEnv* env)
{
int r = 0;
switch (NODE_TYPE(node)) {
case NODE_LIST:
{
Node* prev = NULL_NODE;
do {
r = setup_tree(NODE_CAR(node), reg, state, env);
if (IS_NOT_NULL(prev) && r == 0) {
r = next_setup(prev, NODE_CAR(node), reg);
}
prev = NODE_CAR(node);
} while (r == 0 && IS_NOT_NULL(node = NODE_CDR(node)));
}
break;
case NODE_ALT:
do {
r = setup_tree(NODE_CAR(node), reg, (state | IN_ALT), env);
} while (r == 0 && IS_NOT_NULL(node = NODE_CDR(node)));
break;
case NODE_STRING:
if (IS_IGNORECASE(reg->options) && !NODE_STRING_IS_RAW(node)) {
r = expand_case_fold_string(node, reg, state);
}
break;
case NODE_BACKREF:
{
int i;
int* p;
BackRefNode* br = BACKREF_(node);
p = BACKREFS_P(br);
for (i = 0; i < br->back_num; i++) {
if (p[i] > env->num_mem) return ONIGERR_INVALID_BACKREF;
MEM_STATUS_ON(env->backrefed_mem, p[i]);
MEM_STATUS_ON(env->bt_mem_start, p[i]);
#ifdef USE_BACKREF_WITH_LEVEL
if (NODE_IS_NEST_LEVEL(node)) {
MEM_STATUS_ON(env->bt_mem_end, p[i]);
}
#endif
}
}
break;
case NODE_BAG:
{
BagNode* en = BAG_(node);
switch (en->type) {
case BAG_OPTION:
{
OnigOptionType options = reg->options;
reg->options = BAG_(node)->o.options;
r = setup_tree(NODE_BODY(node), reg, state, env);
reg->options = options;
}
break;
case BAG_MEMORY:
#ifdef USE_CALL
state |= en->m.called_state;
#endif
if ((state & (IN_ALT | IN_NOT | IN_VAR_REPEAT | IN_MULTI_ENTRY)) != 0
|| NODE_IS_RECURSION(node)) {
MEM_STATUS_ON(env->bt_mem_start, en->m.regnum);
}
r = setup_tree(NODE_BODY(node), reg, state, env);
break;
case BAG_STOP_BACKTRACK:
{
Node* target = NODE_BODY(node);
r = setup_tree(target, reg, state, env);
if (NODE_TYPE(target) == NODE_QUANT) {
QuantNode* tqn = QUANT_(target);
if (IS_REPEAT_INFINITE(tqn->upper) && tqn->lower <= 1 &&
tqn->greedy != 0) { /* (?>a*), a*+ etc... */
if (NODE_IS_SIMPLE_TYPE(NODE_BODY(target)))
NODE_STATUS_ADD(node, STOP_BT_SIMPLE_REPEAT);
}
}
}
break;
case BAG_IF_ELSE:
r = setup_tree(NODE_BODY(node), reg, (state | IN_ALT), env);
if (r != 0) return r;
if (IS_NOT_NULL(en->te.Then)) {
r = setup_tree(en->te.Then, reg, (state | IN_ALT), env);
if (r != 0) return r;
}
if (IS_NOT_NULL(en->te.Else))
r = setup_tree(en->te.Else, reg, (state | IN_ALT), env);
break;
}
}
break;
case NODE_QUANT:
r = setup_quant(node, reg, state, env);
break;
case NODE_ANCHOR:
r = setup_anchor(node, reg, state, env);
break;
#ifdef USE_CALL
case NODE_CALL:
#endif
case NODE_CTYPE:
case NODE_CCLASS:
case NODE_GIMMICK:
default:
break;
}
return r;
}
| 16,829 |
81,061 | 0 | static int vmx_restore_vmx_ept_vpid_cap(struct vcpu_vmx *vmx, u64 data)
{
u64 vmx_ept_vpid_cap;
vmx_ept_vpid_cap = vmx_control_msr(vmx->nested.msrs.ept_caps,
vmx->nested.msrs.vpid_caps);
/* Every bit is either reserved or a feature bit. */
if (!is_bitwise_subset(vmx_ept_vpid_cap, data, -1ULL))
return -EINVAL;
vmx->nested.msrs.ept_caps = data;
vmx->nested.msrs.vpid_caps = data >> 32;
return 0;
}
| 16,830 |
25,218 | 0 | static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
{
struct ipv6_pinfo *np = inet6_sk(sk);
struct tcp_sock *tp;
struct sk_buff *opt_skb = NULL;
/* Imagine: socket is IPv6. IPv4 packet arrives,
goes to IPv4 receive handler and backlogged.
From backlog it always goes here. Kerboom...
Fortunately, tcp_rcv_established and rcv_established
handle them correctly, but it is not case with
tcp_v6_hnd_req and tcp_v6_send_reset(). --ANK
*/
if (skb->protocol == htons(ETH_P_IP))
return tcp_v4_do_rcv(sk, skb);
#ifdef CONFIG_TCP_MD5SIG
if (tcp_v6_inbound_md5_hash (sk, skb))
goto discard;
#endif
if (sk_filter(sk, skb))
goto discard;
/*
* socket locking is here for SMP purposes as backlog rcv
* is currently called with bh processing disabled.
*/
/* Do Stevens' IPV6_PKTOPTIONS.
Yes, guys, it is the only place in our code, where we
may make it not affecting IPv4.
The rest of code is protocol independent,
and I do not like idea to uglify IPv4.
Actually, all the idea behind IPV6_PKTOPTIONS
looks not very well thought. For now we latch
options, received in the last packet, enqueued
by tcp. Feel free to propose better solution.
--ANK (980728)
*/
if (np->rxopt.all)
opt_skb = skb_clone(skb, GFP_ATOMIC);
if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
sock_rps_save_rxhash(sk, skb->rxhash);
if (tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len))
goto reset;
if (opt_skb)
goto ipv6_pktoptions;
return 0;
}
if (skb->len < tcp_hdrlen(skb) || tcp_checksum_complete(skb))
goto csum_err;
if (sk->sk_state == TCP_LISTEN) {
struct sock *nsk = tcp_v6_hnd_req(sk, skb);
if (!nsk)
goto discard;
/*
* Queue it on the new socket if the new socket is active,
* otherwise we just shortcircuit this and continue with
* the new socket..
*/
if(nsk != sk) {
sock_rps_save_rxhash(nsk, skb->rxhash);
if (tcp_child_process(sk, nsk, skb))
goto reset;
if (opt_skb)
__kfree_skb(opt_skb);
return 0;
}
} else
sock_rps_save_rxhash(sk, skb->rxhash);
if (tcp_rcv_state_process(sk, skb, tcp_hdr(skb), skb->len))
goto reset;
if (opt_skb)
goto ipv6_pktoptions;
return 0;
reset:
tcp_v6_send_reset(sk, skb);
discard:
if (opt_skb)
__kfree_skb(opt_skb);
kfree_skb(skb);
return 0;
csum_err:
TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS);
goto discard;
ipv6_pktoptions:
/* Do you ask, what is it?
1. skb was enqueued by tcp.
2. skb is added to tail of read queue, rather than out of order.
3. socket is not in passive state.
4. Finally, it really contains options, which user wants to receive.
*/
tp = tcp_sk(sk);
if (TCP_SKB_CB(opt_skb)->end_seq == tp->rcv_nxt &&
!((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) {
if (np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo)
np->mcast_oif = inet6_iif(opt_skb);
if (np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim)
np->mcast_hops = ipv6_hdr(opt_skb)->hop_limit;
if (ipv6_opt_accepted(sk, opt_skb)) {
skb_set_owner_r(opt_skb, sk);
opt_skb = xchg(&np->pktoptions, opt_skb);
} else {
__kfree_skb(opt_skb);
opt_skb = xchg(&np->pktoptions, NULL);
}
}
kfree_skb(opt_skb);
return 0;
}
| 16,831 |
123,253 | 0 | bool ShouldSendPinchGesture() {
static bool pinch_allowed =
CommandLine::ForCurrentProcess()->HasSwitch(switches::kEnableViewport) ||
CommandLine::ForCurrentProcess()->HasSwitch(switches::kEnablePinch);
return pinch_allowed;
}
| 16,832 |
61,237 | 0 | static int allmbox_cb(void *rock,
const char *key,
size_t keylen,
const char *data,
size_t datalen)
{
struct allmb_rock *mbrock = (struct allmb_rock *)rock;
if (!mbrock->mbentry) {
int r = mboxlist_parse_entry(&mbrock->mbentry, key, keylen, data, datalen);
if (r) return r;
}
return mbrock->proc(mbrock->mbentry, mbrock->rock);
}
| 16,833 |
185,450 | 1 | void HTMLFormControlElement::parseAttribute(const QualifiedName& name, const AtomicString& value)
{
if (name == formAttr) {
formAttributeChanged();
UseCounter::count(document(), UseCounter::FormAttribute);
} else if (name == disabledAttr) {
bool oldDisabled = m_disabled;
m_disabled = !value.isNull();
if (oldDisabled != m_disabled)
disabledAttributeChanged();
} else if (name == readonlyAttr) {
bool wasReadOnly = m_isReadOnly;
m_isReadOnly = !value.isNull();
if (wasReadOnly != m_isReadOnly) {
setNeedsWillValidateCheck();
setNeedsStyleRecalc(SubtreeStyleChange, StyleChangeReasonForTracing::fromAttribute(name));
if (layoutObject())
LayoutTheme::theme().controlStateChanged(*layoutObject(), ReadOnlyControlState);
}
} else if (name == requiredAttr) {
bool wasRequired = m_isRequired;
m_isRequired = !value.isNull();
if (wasRequired != m_isRequired)
requiredAttributeChanged();
UseCounter::count(document(), UseCounter::RequiredAttribute);
} else if (name == autofocusAttr) {
HTMLElement::parseAttribute(name, value);
UseCounter::count(document(), UseCounter::AutoFocusAttribute);
} else {
HTMLElement::parseAttribute(name, value);
}
}
| 16,834 |
180,850 | 1 | get_endpoints(struct usbtest_dev *dev, struct usb_interface *intf)
{
int tmp;
struct usb_host_interface *alt;
struct usb_host_endpoint *in, *out;
struct usb_host_endpoint *iso_in, *iso_out;
struct usb_host_endpoint *int_in, *int_out;
struct usb_device *udev;
for (tmp = 0; tmp < intf->num_altsetting; tmp++) {
unsigned ep;
in = out = NULL;
iso_in = iso_out = NULL;
int_in = int_out = NULL;
alt = intf->altsetting + tmp;
if (override_alt >= 0 &&
override_alt != alt->desc.bAlternateSetting)
continue;
/* take the first altsetting with in-bulk + out-bulk;
* ignore other endpoints and altsettings.
*/
for (ep = 0; ep < alt->desc.bNumEndpoints; ep++) {
struct usb_host_endpoint *e;
int edi;
e = alt->endpoint + ep;
edi = usb_endpoint_dir_in(&e->desc);
switch (usb_endpoint_type(&e->desc)) {
case USB_ENDPOINT_XFER_BULK:
endpoint_update(edi, &in, &out, e);
continue;
case USB_ENDPOINT_XFER_INT:
if (dev->info->intr)
endpoint_update(edi, &int_in, &int_out, e);
continue;
case USB_ENDPOINT_XFER_ISOC:
if (dev->info->iso)
endpoint_update(edi, &iso_in, &iso_out, e);
/* FALLTHROUGH */
default:
continue;
}
}
if ((in && out) || iso_in || iso_out || int_in || int_out)
goto found;
}
return -EINVAL;
found:
udev = testdev_to_usbdev(dev);
dev->info->alt = alt->desc.bAlternateSetting;
if (alt->desc.bAlternateSetting != 0) {
tmp = usb_set_interface(udev,
alt->desc.bInterfaceNumber,
alt->desc.bAlternateSetting);
if (tmp < 0)
return tmp;
}
if (in) {
dev->in_pipe = usb_rcvbulkpipe(udev,
in->desc.bEndpointAddress & USB_ENDPOINT_NUMBER_MASK);
dev->out_pipe = usb_sndbulkpipe(udev,
out->desc.bEndpointAddress & USB_ENDPOINT_NUMBER_MASK);
}
if (iso_in) {
dev->iso_in = &iso_in->desc;
dev->in_iso_pipe = usb_rcvisocpipe(udev,
iso_in->desc.bEndpointAddress
& USB_ENDPOINT_NUMBER_MASK);
}
if (iso_out) {
dev->iso_out = &iso_out->desc;
dev->out_iso_pipe = usb_sndisocpipe(udev,
iso_out->desc.bEndpointAddress
& USB_ENDPOINT_NUMBER_MASK);
}
if (int_in) {
dev->int_in = &int_in->desc;
dev->in_int_pipe = usb_rcvintpipe(udev,
int_in->desc.bEndpointAddress
& USB_ENDPOINT_NUMBER_MASK);
}
if (int_out) {
dev->int_out = &int_out->desc;
dev->out_int_pipe = usb_sndintpipe(udev,
int_out->desc.bEndpointAddress
& USB_ENDPOINT_NUMBER_MASK);
}
return 0;
}
| 16,835 |
112,699 | 0 | void DocumentLoader::setMainDocumentError(const ResourceError& error)
{
m_mainDocumentError = error;
frameLoader()->client()->setMainDocumentError(this, error);
}
| 16,836 |
32,215 | 0 | static void *ptype_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
struct packet_type *pt;
struct list_head *nxt;
int hash;
++*pos;
if (v == SEQ_START_TOKEN)
return ptype_get_idx(0);
pt = v;
nxt = pt->list.next;
if (pt->type == htons(ETH_P_ALL)) {
if (nxt != &ptype_all)
goto found;
hash = 0;
nxt = ptype_base[0].next;
} else
hash = ntohs(pt->type) & PTYPE_HASH_MASK;
while (nxt == &ptype_base[hash]) {
if (++hash >= PTYPE_HASH_SIZE)
return NULL;
nxt = ptype_base[hash].next;
}
found:
return list_entry(nxt, struct packet_type, list);
}
| 16,837 |
8,755 | 0 | void FAST_FUNC udhcp_add_simple_option(struct dhcp_packet *packet, uint8_t code, uint32_t data)
{
const struct dhcp_optflag *dh;
for (dh = dhcp_optflags; dh->code; dh++) {
if (dh->code == code) {
uint8_t option[6], len;
option[OPT_CODE] = code;
len = dhcp_option_lengths[dh->flags & OPTION_TYPE_MASK];
option[OPT_LEN] = len;
if (BB_BIG_ENDIAN)
data <<= 8 * (4 - len);
/* Assignment is unaligned! */
move_to_unaligned32(&option[OPT_DATA], data);
udhcp_add_binary_option(packet, option);
return;
}
}
bb_error_msg("can't add option 0x%02x", code);
}
| 16,838 |
9,477 | 0 | static ALWAYS_INLINE double MIND(double a, double b)
{
if (a < b)
return a;
return b;
}
| 16,839 |
92,035 | 0 | void blk_start_request(struct request *req)
{
lockdep_assert_held(req->q->queue_lock);
WARN_ON_ONCE(req->q->mq_ops);
blk_dequeue_request(req);
if (test_bit(QUEUE_FLAG_STATS, &req->q->queue_flags)) {
req->io_start_time_ns = ktime_get_ns();
#ifdef CONFIG_BLK_DEV_THROTTLING_LOW
req->throtl_size = blk_rq_sectors(req);
#endif
req->rq_flags |= RQF_STATS;
rq_qos_issue(req->q, req);
}
BUG_ON(blk_rq_is_complete(req));
blk_add_timer(req);
}
| 16,840 |
165,504 | 0 | ContentSecurityPolicy::ContentSecurityPolicy()
: execution_context_(nullptr),
override_inline_style_allowed_(false),
script_hash_algorithms_used_(kContentSecurityPolicyHashAlgorithmNone),
style_hash_algorithms_used_(kContentSecurityPolicyHashAlgorithmNone),
sandbox_mask_(0),
treat_as_public_address_(false),
require_safe_types_(false),
insecure_request_policy_(kLeaveInsecureRequestsAlone) {}
| 16,841 |
83,574 | 0 | static void update_read_synchronize(rdpUpdate* update, wStream* s)
{
Stream_Seek_UINT16(s); /* pad2Octets (2 bytes) */
/**
* The Synchronize Update is an artifact from the
* T.128 protocol and should be ignored.
*/
}
| 16,842 |
66,693 | 0 | static int dvb_usbv2_init(struct dvb_usb_device *d)
{
int ret;
dev_dbg(&d->udev->dev, "%s:\n", __func__);
dvb_usbv2_device_power_ctrl(d, 1);
if (d->props->read_config) {
ret = d->props->read_config(d);
if (ret < 0)
goto err;
}
ret = dvb_usbv2_i2c_init(d);
if (ret < 0)
goto err;
ret = dvb_usbv2_adapter_init(d);
if (ret < 0)
goto err;
if (d->props->init) {
ret = d->props->init(d);
if (ret < 0)
goto err;
}
ret = dvb_usbv2_remote_init(d);
if (ret < 0)
goto err;
dvb_usbv2_device_power_ctrl(d, 0);
return 0;
err:
dvb_usbv2_device_power_ctrl(d, 0);
dev_dbg(&d->udev->dev, "%s: failed=%d\n", __func__, ret);
return ret;
}
| 16,843 |
13,259 | 0 | c_pdf14trans_is_closing(const gs_composite_t * composite_action, gs_composite_t ** ppcte,
gx_device *dev)
{
gs_pdf14trans_t *pct0 = (gs_pdf14trans_t *)composite_action;
int op0 = pct0->params.pdf14_op;
switch (op0) {
default: return_error(gs_error_unregistered); /* Must not happen. */
case PDF14_PUSH_DEVICE:
return COMP_ENQUEUE;
case PDF14_ABORT_DEVICE:
return COMP_ENQUEUE;
case PDF14_POP_DEVICE:
if (*ppcte == NULL)
return COMP_ENQUEUE;
else {
gs_compositor_closing_state state = find_opening_op(PDF14_PUSH_DEVICE, ppcte, COMP_EXEC_IDLE);
if (state == COMP_EXEC_IDLE)
return COMP_DROP_QUEUE;
return state;
}
case PDF14_BEGIN_TRANS_GROUP:
return COMP_ENQUEUE;
case PDF14_END_TRANS_GROUP:
if (*ppcte == NULL)
return COMP_EXEC_QUEUE;
return find_opening_op(PDF14_BEGIN_TRANS_GROUP, ppcte, COMP_MARK_IDLE);
case PDF14_BEGIN_TRANS_MASK:
return COMP_ENQUEUE;
case PDF14_PUSH_TRANS_STATE:
return COMP_ENQUEUE;
case PDF14_POP_TRANS_STATE:
return COMP_ENQUEUE;
case PDF14_PUSH_SMASK_COLOR:
return COMP_ENQUEUE;
break;
case PDF14_POP_SMASK_COLOR:
return COMP_ENQUEUE;
break;
case PDF14_END_TRANS_MASK:
if (*ppcte == NULL)
return COMP_EXEC_QUEUE;
return find_opening_op(PDF14_BEGIN_TRANS_MASK, ppcte, COMP_MARK_IDLE);
case PDF14_SET_BLEND_PARAMS:
if (*ppcte == NULL)
return COMP_ENQUEUE;
/* hack : ignore csel - here it is always zero : */
return find_same_op(composite_action, PDF14_SET_BLEND_PARAMS, ppcte);
}
}
| 16,844 |
159,099 | 0 | bool DeleteDownloadedFile(const base::FilePath& path) {
DCHECK(GetDownloadTaskRunner()->RunsTasksInCurrentSequence());
if (base::DirectoryExists(path))
return true;
return base::DeleteFile(path, false);
}
| 16,845 |
96,350 | 0 | static VOID MiniportEnableMSIInterrupt(
IN PVOID MiniportInterruptContext,
IN ULONG MessageId
)
{
PARANDIS_ADAPTER *pContext = (PARANDIS_ADAPTER *)MiniportInterruptContext;
CParaNdisAbstractPath *path = GetPathByMessageId(pContext, MessageId);
path->EnableInterrupts();
}
| 16,846 |
37,804 | 0 | static int nop_on_interception(struct vcpu_svm *svm)
{
return 1;
}
| 16,847 |
34,170 | 0 | static inline u64 rfc3390_initial_rate(struct sock *sk)
{
const struct ccid3_hc_tx_sock *hc = ccid3_hc_tx_sk(sk);
const __u32 w_init = clamp_t(__u32, 4380U, 2 * hc->tx_s, 4 * hc->tx_s);
return scaled_div(w_init << 6, hc->tx_rtt);
}
| 16,848 |
137,255 | 0 | void Textfield::OnEnabledChanged() {
View::OnEnabledChanged();
if (GetInputMethod())
GetInputMethod()->OnTextInputTypeChanged(this);
SchedulePaint();
}
| 16,849 |
141,232 | 0 | void Document::SetLastFocusType(WebFocusType last_focus_type) {
last_focus_type_ = last_focus_type;
}
| 16,850 |
76,035 | 0 | vrrp_state_handler(vector_t *strvec)
{
char *str = strvec_slot(strvec, 1);
vrrp_t *vrrp = LIST_TAIL_DATA(vrrp_data->vrrp);
if (!strcmp(str, "MASTER"))
vrrp->wantstate = VRRP_STATE_MAST;
else if (!strcmp(str, "BACKUP"))
{
if (vrrp->wantstate == VRRP_STATE_MAST)
report_config_error(CONFIG_GENERAL_ERROR, "(%s) state previously set as MASTER - ignoring BACKUP", vrrp->iname);
else
vrrp->wantstate = VRRP_STATE_BACK;
}
else {
report_config_error(CONFIG_GENERAL_ERROR,"(%s) unknown state '%s', defaulting to BACKUP", vrrp->iname, str);
vrrp->wantstate = VRRP_STATE_BACK;
}
}
| 16,851 |
45,327 | 0 | void btrfs_truncate_item(struct btrfs_root *root, struct btrfs_path *path,
u32 new_size, int from_end)
{
int slot;
struct extent_buffer *leaf;
struct btrfs_item *item;
u32 nritems;
unsigned int data_end;
unsigned int old_data_start;
unsigned int old_size;
unsigned int size_diff;
int i;
struct btrfs_map_token token;
btrfs_init_map_token(&token);
leaf = path->nodes[0];
slot = path->slots[0];
old_size = btrfs_item_size_nr(leaf, slot);
if (old_size == new_size)
return;
nritems = btrfs_header_nritems(leaf);
data_end = leaf_data_end(root, leaf);
old_data_start = btrfs_item_offset_nr(leaf, slot);
size_diff = old_size - new_size;
BUG_ON(slot < 0);
BUG_ON(slot >= nritems);
/*
* item0..itemN ... dataN.offset..dataN.size .. data0.size
*/
/* first correct the data pointers */
for (i = slot; i < nritems; i++) {
u32 ioff;
item = btrfs_item_nr(i);
ioff = btrfs_token_item_offset(leaf, item, &token);
btrfs_set_token_item_offset(leaf, item,
ioff + size_diff, &token);
}
/* shift the data */
if (from_end) {
memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
data_end + size_diff, btrfs_leaf_data(leaf) +
data_end, old_data_start + new_size - data_end);
} else {
struct btrfs_disk_key disk_key;
u64 offset;
btrfs_item_key(leaf, &disk_key, slot);
if (btrfs_disk_key_type(&disk_key) == BTRFS_EXTENT_DATA_KEY) {
unsigned long ptr;
struct btrfs_file_extent_item *fi;
fi = btrfs_item_ptr(leaf, slot,
struct btrfs_file_extent_item);
fi = (struct btrfs_file_extent_item *)(
(unsigned long)fi - size_diff);
if (btrfs_file_extent_type(leaf, fi) ==
BTRFS_FILE_EXTENT_INLINE) {
ptr = btrfs_item_ptr_offset(leaf, slot);
memmove_extent_buffer(leaf, ptr,
(unsigned long)fi,
BTRFS_FILE_EXTENT_INLINE_DATA_START);
}
}
memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
data_end + size_diff, btrfs_leaf_data(leaf) +
data_end, old_data_start - data_end);
offset = btrfs_disk_key_offset(&disk_key);
btrfs_set_disk_key_offset(&disk_key, offset + size_diff);
btrfs_set_item_key(leaf, &disk_key, slot);
if (slot == 0)
fixup_low_keys(root, path, &disk_key, 1);
}
item = btrfs_item_nr(slot);
btrfs_set_item_size(leaf, item, new_size);
btrfs_mark_buffer_dirty(leaf);
if (btrfs_leaf_free_space(root, leaf) < 0) {
btrfs_print_leaf(root, leaf);
BUG();
}
}
| 16,852 |
56,797 | 0 | int usb_disable_ltm(struct usb_device *udev)
{
return 0;
}
| 16,853 |
146,921 | 0 | void Document::writeln(LocalDOMWindow* calling_window,
const Vector<String>& text,
ExceptionState& exception_state) {
DCHECK(calling_window);
StringBuilder builder;
for (const String& string : text)
builder.Append(string);
writeln(builder.ToString(), calling_window->document(), exception_state);
}
| 16,854 |
84,771 | 0 | static int del_dac(struct task_struct *child, int slot)
{
if (slot == 1) {
if ((dbcr_dac(child) & (DBCR_DAC1R | DBCR_DAC1W)) == 0)
return -ENOENT;
child->thread.debug.dac1 = 0;
dbcr_dac(child) &= ~(DBCR_DAC1R | DBCR_DAC1W);
#ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
if (child->thread.debug.dbcr2 & DBCR2_DAC12MODE) {
child->thread.debug.dac2 = 0;
child->thread.debug.dbcr2 &= ~DBCR2_DAC12MODE;
}
child->thread.debug.dbcr2 &= ~(DBCR2_DVC1M | DBCR2_DVC1BE);
#endif
#if CONFIG_PPC_ADV_DEBUG_DVCS > 0
child->thread.debug.dvc1 = 0;
#endif
} else if (slot == 2) {
if ((dbcr_dac(child) & (DBCR_DAC2R | DBCR_DAC2W)) == 0)
return -ENOENT;
#ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
if (child->thread.debug.dbcr2 & DBCR2_DAC12MODE)
/* Part of a range */
return -EINVAL;
child->thread.debug.dbcr2 &= ~(DBCR2_DVC2M | DBCR2_DVC2BE);
#endif
#if CONFIG_PPC_ADV_DEBUG_DVCS > 0
child->thread.debug.dvc2 = 0;
#endif
child->thread.debug.dac2 = 0;
dbcr_dac(child) &= ~(DBCR_DAC2R | DBCR_DAC2W);
} else
return -EINVAL;
return 0;
}
| 16,855 |
88,743 | 0 | int modbus_receive_confirmation(modbus_t *ctx, uint8_t *rsp)
{
if (ctx == NULL) {
errno = EINVAL;
return -1;
}
return _modbus_receive_msg(ctx, rsp, MSG_CONFIRMATION);
}
| 16,856 |
181,387 | 1 | static int set_registers(rtl8150_t * dev, u16 indx, u16 size, void *data)
{
return usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0),
RTL8150_REQ_SET_REGS, RTL8150_REQT_WRITE,
indx, 0, data, size, 500);
}
| 16,857 |
78,765 | 0 | static unsigned short muscle_parse_singleAcl(const sc_acl_entry_t* acl)
{
unsigned short acl_entry = 0;
while(acl) {
int key = acl->key_ref;
int method = acl->method;
switch(method) {
case SC_AC_NEVER:
return 0xFFFF;
/* Ignore... other items overwrite these */
case SC_AC_NONE:
case SC_AC_UNKNOWN:
break;
case SC_AC_CHV:
acl_entry |= (1 << key); /* Assuming key 0 == SO */
break;
case SC_AC_AUT:
case SC_AC_TERM:
case SC_AC_PRO:
default:
/* Ignored */
break;
}
acl = acl->next;
}
return acl_entry;
}
| 16,858 |
141,777 | 0 | ChromeMetricsServiceClient::CreateUploader(
base::StringPiece server_url,
base::StringPiece insecure_server_url,
base::StringPiece mime_type,
metrics::MetricsLogUploader::MetricServiceType service_type,
const metrics::MetricsLogUploader::UploadCallback& on_upload_complete) {
return std::make_unique<metrics::NetMetricsLogUploader>(
g_browser_process->shared_url_loader_factory(), server_url,
insecure_server_url, mime_type, service_type, on_upload_complete);
}
| 16,859 |
17,848 | 0 | static int fchmodat_nofollow(int dirfd, const char *name, mode_t mode)
{
int fd, ret;
/* FIXME: this should be handled with fchmodat(AT_SYMLINK_NOFOLLOW).
* Unfortunately, the linux kernel doesn't implement it yet. As an
* alternative, let's open the file and use fchmod() instead. This
* may fail depending on the permissions of the file, but it is the
* best we can do to avoid TOCTTOU. We first try to open read-only
* in case name points to a directory. If that fails, we try write-only
* in case name doesn't point to a directory.
*/
fd = openat_file(dirfd, name, O_RDONLY, 0);
if (fd == -1) {
/* In case the file is writable-only and isn't a directory. */
if (errno == EACCES) {
fd = openat_file(dirfd, name, O_WRONLY, 0);
}
if (fd == -1 && errno == EISDIR) {
errno = EACCES;
}
}
if (fd == -1) {
return -1;
}
ret = fchmod(fd, mode);
close_preserve_errno(fd);
return ret;
}
| 16,860 |
131,423 | 0 | static void limitedToOnlyAttributeAttributeGetterCallback(v8::Local<v8::String>, const v8::PropertyCallbackInfo<v8::Value>& info)
{
TRACE_EVENT_SET_SAMPLING_STATE("Blink", "DOMGetter");
TestObjectPythonV8Internal::limitedToOnlyAttributeAttributeGetter(info);
TRACE_EVENT_SET_SAMPLING_STATE("V8", "V8Execution");
}
| 16,861 |
187,419 | 1 | int validate_camera_metadata_structure(const camera_metadata_t *metadata,
const size_t *expected_size) {
if (metadata == NULL) {
ALOGE("%s: metadata is null!", __FUNCTION__);
return ERROR;
}
// Check that the metadata pointer is well-aligned first.
{
static const struct {
const char *name;
size_t alignment;
} alignments[] = {
{
.name = "camera_metadata",
.alignment = METADATA_ALIGNMENT
},
{
.name = "camera_metadata_buffer_entry",
.alignment = ENTRY_ALIGNMENT
},
{
.name = "camera_metadata_data",
.alignment = DATA_ALIGNMENT
},
};
for (size_t i = 0; i < sizeof(alignments)/sizeof(alignments[0]); ++i) {
uintptr_t aligned_ptr = ALIGN_TO(metadata, alignments[i].alignment);
if ((uintptr_t)metadata != aligned_ptr) {
ALOGE("%s: Metadata pointer is not aligned (actual %p, "
"expected %p) to type %s",
__FUNCTION__, metadata,
(void*)aligned_ptr, alignments[i].name);
return ERROR;
}
}
}
/**
* Check that the metadata contents are correct
*/
if (expected_size != NULL && metadata->size > *expected_size) {
ALOGE("%s: Metadata size (%" PRIu32 ") should be <= expected size (%zu)",
__FUNCTION__, metadata->size, *expected_size);
return ERROR;
}
if (metadata->entry_count > metadata->entry_capacity) {
ALOGE("%s: Entry count (%" PRIu32 ") should be <= entry capacity "
"(%" PRIu32 ")",
__FUNCTION__, metadata->entry_count, metadata->entry_capacity);
return ERROR;
}
const metadata_uptrdiff_t entries_end =
metadata->entries_start + metadata->entry_capacity;
if (entries_end < metadata->entries_start || // overflow check
entries_end > metadata->data_start) {
ALOGE("%s: Entry start + capacity (%" PRIu32 ") should be <= data start "
"(%" PRIu32 ")",
__FUNCTION__,
(metadata->entries_start + metadata->entry_capacity),
metadata->data_start);
return ERROR;
}
const metadata_uptrdiff_t data_end =
metadata->data_start + metadata->data_capacity;
if (data_end < metadata->data_start || // overflow check
data_end > metadata->size) {
ALOGE("%s: Data start + capacity (%" PRIu32 ") should be <= total size "
"(%" PRIu32 ")",
__FUNCTION__,
(metadata->data_start + metadata->data_capacity),
metadata->size);
return ERROR;
}
// Validate each entry
const metadata_size_t entry_count = metadata->entry_count;
camera_metadata_buffer_entry_t *entries = get_entries(metadata);
for (size_t i = 0; i < entry_count; ++i) {
if ((uintptr_t)&entries[i] != ALIGN_TO(&entries[i], ENTRY_ALIGNMENT)) {
ALOGE("%s: Entry index %zu had bad alignment (address %p),"
" expected alignment %zu",
__FUNCTION__, i, &entries[i], ENTRY_ALIGNMENT);
return ERROR;
}
camera_metadata_buffer_entry_t entry = entries[i];
if (entry.type >= NUM_TYPES) {
ALOGE("%s: Entry index %zu had a bad type %d",
__FUNCTION__, i, entry.type);
return ERROR;
}
// TODO: fix vendor_tag_ops across processes so we don't need to special
// case vendor-specific tags
uint32_t tag_section = entry.tag >> 16;
int tag_type = get_camera_metadata_tag_type(entry.tag);
if (tag_type != (int)entry.type && tag_section < VENDOR_SECTION) {
ALOGE("%s: Entry index %zu had tag type %d, but the type was %d",
__FUNCTION__, i, tag_type, entry.type);
return ERROR;
}
size_t data_size;
if (validate_and_calculate_camera_metadata_entry_data_size(&data_size, entry.type,
entry.count) != OK) {
ALOGE("%s: Entry data size is invalid. type: %u count: %u", __FUNCTION__, entry.type,
entry.count);
return ERROR;
}
if (data_size != 0) {
camera_metadata_data_t *data =
(camera_metadata_data_t*) (get_data(metadata) +
entry.data.offset);
if ((uintptr_t)data != ALIGN_TO(data, DATA_ALIGNMENT)) {
ALOGE("%s: Entry index %zu had bad data alignment (address %p),"
" expected align %zu, (tag name %s, data size %zu)",
__FUNCTION__, i, data, DATA_ALIGNMENT,
get_camera_metadata_tag_name(entry.tag) ?: "unknown",
data_size);
return ERROR;
}
size_t data_entry_end = entry.data.offset + data_size;
if (data_entry_end < entry.data.offset || // overflow check
data_entry_end > metadata->data_capacity) {
ALOGE("%s: Entry index %zu data ends (%zu) beyond the capacity "
"%" PRIu32, __FUNCTION__, i, data_entry_end,
metadata->data_capacity);
return ERROR;
}
} else if (entry.count == 0) {
if (entry.data.offset != 0) {
ALOGE("%s: Entry index %zu had 0 items, but offset was non-0 "
"(%" PRIu32 "), tag name: %s", __FUNCTION__, i, entry.data.offset,
get_camera_metadata_tag_name(entry.tag) ?: "unknown");
return ERROR;
}
} // else data stored inline, so we look at value which can be anything.
}
return OK;
}
| 16,862 |
45,049 | 0 | static apr_uint64_t ap_ntoh64(const apr_uint64_t *input)
{
apr_uint64_t rval;
unsigned char *data = (unsigned char *)&rval;
if (APR_IS_BIGENDIAN) {
return *input;
}
data[0] = *input >> 56;
data[1] = *input >> 48;
data[2] = *input >> 40;
data[3] = *input >> 32;
data[4] = *input >> 24;
data[5] = *input >> 16;
data[6] = *input >> 8;
data[7] = *input >> 0;
return rval;
}
| 16,863 |
34,136 | 0 | static int hci_sock_setsockopt(struct socket *sock, int level, int optname,
char __user *optval, unsigned int len)
{
struct hci_ufilter uf = { .opcode = 0 };
struct sock *sk = sock->sk;
int err = 0, opt = 0;
BT_DBG("sk %p, opt %d", sk, optname);
lock_sock(sk);
if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
err = -EINVAL;
goto done;
}
switch (optname) {
case HCI_DATA_DIR:
if (get_user(opt, (int __user *)optval)) {
err = -EFAULT;
break;
}
if (opt)
hci_pi(sk)->cmsg_mask |= HCI_CMSG_DIR;
else
hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_DIR;
break;
case HCI_TIME_STAMP:
if (get_user(opt, (int __user *)optval)) {
err = -EFAULT;
break;
}
if (opt)
hci_pi(sk)->cmsg_mask |= HCI_CMSG_TSTAMP;
else
hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_TSTAMP;
break;
case HCI_FILTER:
{
struct hci_filter *f = &hci_pi(sk)->filter;
uf.type_mask = f->type_mask;
uf.opcode = f->opcode;
uf.event_mask[0] = *((u32 *) f->event_mask + 0);
uf.event_mask[1] = *((u32 *) f->event_mask + 1);
}
len = min_t(unsigned int, len, sizeof(uf));
if (copy_from_user(&uf, optval, len)) {
err = -EFAULT;
break;
}
if (!capable(CAP_NET_RAW)) {
uf.type_mask &= hci_sec_filter.type_mask;
uf.event_mask[0] &= *((u32 *) hci_sec_filter.event_mask + 0);
uf.event_mask[1] &= *((u32 *) hci_sec_filter.event_mask + 1);
}
{
struct hci_filter *f = &hci_pi(sk)->filter;
f->type_mask = uf.type_mask;
f->opcode = uf.opcode;
*((u32 *) f->event_mask + 0) = uf.event_mask[0];
*((u32 *) f->event_mask + 1) = uf.event_mask[1];
}
break;
default:
err = -ENOPROTOOPT;
break;
}
done:
release_sock(sk);
return err;
}
| 16,864 |
66,242 | 0 | static int mailimf_time_parse(const char * message, size_t length,
size_t * indx,
int * phour, int * pmin,
int * psec,
int * pzone)
{
size_t cur_token;
int hour;
int min;
int sec;
int zone;
int r;
cur_token = * indx;
r = mailimf_cfws_parse(message, length, &cur_token);
if ((r != MAILIMF_NO_ERROR) && (r != MAILIMF_ERROR_PARSE))
return r;
r = mailimf_time_of_day_parse(message, length, &cur_token,
&hour, &min, &sec);
if (r != MAILIMF_NO_ERROR)
return r;
r = mailimf_fws_parse(message, length, &cur_token);
if ((r != MAILIMF_NO_ERROR) && (r != MAILIMF_ERROR_PARSE))
return r;
r = mailimf_zone_parse(message, length, &cur_token, &zone);
if (r == MAILIMF_NO_ERROR) {
/* do nothing */
}
else if (r == MAILIMF_ERROR_PARSE) {
zone = 0;
}
else {
return r;
}
* phour = hour;
* pmin = min;
* psec = sec;
* pzone = zone;
* indx = cur_token;
return MAILIMF_NO_ERROR;
}
| 16,865 |
20,324 | 0 | pfn_t hva_to_pfn_atomic(struct kvm *kvm, unsigned long addr)
{
return hva_to_pfn(kvm, addr, true, NULL, true, NULL);
}
| 16,866 |
154,579 | 0 | error::Error GLES2DecoderPassthroughImpl::DoBlendFunc(GLenum sfactor,
GLenum dfactor) {
api()->glBlendFuncFn(sfactor, dfactor);
return error::kNoError;
}
| 16,867 |
182,568 | 1 | static int ocfs2_dio_get_block(struct inode *inode, sector_t iblock,
struct buffer_head *bh_result, int create)
{
struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
struct ocfs2_inode_info *oi = OCFS2_I(inode);
struct ocfs2_write_ctxt *wc;
struct ocfs2_write_cluster_desc *desc = NULL;
struct ocfs2_dio_write_ctxt *dwc = NULL;
struct buffer_head *di_bh = NULL;
u64 p_blkno;
loff_t pos = iblock << inode->i_sb->s_blocksize_bits;
unsigned len, total_len = bh_result->b_size;
int ret = 0, first_get_block = 0;
len = osb->s_clustersize - (pos & (osb->s_clustersize - 1));
len = min(total_len, len);
mlog(0, "get block of %lu at %llu:%u req %u\n",
inode->i_ino, pos, len, total_len);
/*
* Because we need to change file size in ocfs2_dio_end_io_write(), or
* we may need to add it to orphan dir. So can not fall to fast path
* while file size will be changed.
*/
if (pos + total_len <= i_size_read(inode)) {
down_read(&oi->ip_alloc_sem);
/* This is the fast path for re-write. *
ret = ocfs2_get_block(inode, iblock, bh_result, create);
up_read(&oi->ip_alloc_sem);
if (buffer_mapped(bh_result) &&
!buffer_new(bh_result) &&
ret == 0)
goto out;
/* Clear state set by ocfs2_get_block. */
bh_result->b_state = 0;
}
dwc = ocfs2_dio_alloc_write_ctx(bh_result, &first_get_block);
if (unlikely(dwc == NULL)) {
ret = -ENOMEM;
mlog_errno(ret);
goto out;
}
if (ocfs2_clusters_for_bytes(inode->i_sb, pos + total_len) >
ocfs2_clusters_for_bytes(inode->i_sb, i_size_read(inode)) &&
!dwc->dw_orphaned) {
/*
* when we are going to alloc extents beyond file size, add the
* inode to orphan dir, so we can recall those spaces when
* system crashed during write.
*/
ret = ocfs2_add_inode_to_orphan(osb, inode);
if (ret < 0) {
mlog_errno(ret);
goto out;
}
dwc->dw_orphaned = 1;
}
ret = ocfs2_inode_lock(inode, &di_bh, 1);
if (ret) {
mlog_errno(ret);
goto out;
}
down_write(&oi->ip_alloc_sem);
if (first_get_block) {
if (ocfs2_sparse_alloc(OCFS2_SB(inode->i_sb)))
ret = ocfs2_zero_tail(inode, di_bh, pos);
else
ret = ocfs2_expand_nonsparse_inode(inode, di_bh, pos,
total_len, NULL);
if (ret < 0) {
mlog_errno(ret);
goto unlock;
}
}
ret = ocfs2_write_begin_nolock(inode->i_mapping, pos, len,
OCFS2_WRITE_DIRECT, NULL,
(void **)&wc, di_bh, NULL);
if (ret) {
mlog_errno(ret);
goto unlock;
}
desc = &wc->w_desc[0];
p_blkno = ocfs2_clusters_to_blocks(inode->i_sb, desc->c_phys);
BUG_ON(p_blkno == 0);
p_blkno += iblock & (u64)(ocfs2_clusters_to_blocks(inode->i_sb, 1) - 1);
map_bh(bh_result, inode->i_sb, p_blkno);
bh_result->b_size = len;
if (desc->c_needs_zero)
set_buffer_new(bh_result);
/* May sleep in end_io. It should not happen in a irq context. So defer
* it to dio work queue. */
set_buffer_defer_completion(bh_result);
if (!list_empty(&wc->w_unwritten_list)) {
struct ocfs2_unwritten_extent *ue = NULL;
ue = list_first_entry(&wc->w_unwritten_list,
struct ocfs2_unwritten_extent,
ue_node);
BUG_ON(ue->ue_cpos != desc->c_cpos);
/* The physical address may be 0, fill it. */
ue->ue_phys = desc->c_phys;
list_splice_tail_init(&wc->w_unwritten_list, &dwc->dw_zero_list);
dwc->dw_zero_count++;
}
ret = ocfs2_write_end_nolock(inode->i_mapping, pos, len, len, wc);
BUG_ON(ret != len);
ret = 0;
unlock:
up_write(&oi->ip_alloc_sem);
ocfs2_inode_unlock(inode, 1);
brelse(di_bh);
out:
if (ret < 0)
ret = -EIO;
return ret;
}
| 16,868 |
96,220 | 0 | int sm_skip_attr_write(p_fm_config_conx_hdlt hdl, fm_mgr_type_t mgr, int argc, char *argv[]) {
fm_mgr_config_errno_t res;
fm_msg_ret_code_t ret_code;
unsigned int attrSkip = 0;
if (argc > 1) {
printf("Error: only 1 argument or less expected\n");
return 0;
}
if ((argc==0) || ((argc==1) && (strcmp(argv[0],"-help")==0)) ) {
printf(" SM SKIP WRITE BITMASKS...\n");
printf(" SM_SKIP_WRITE_PORTINFO 0x00000001 (Includes Port Info)\n");
printf(" SM_SKIP_WRITE_SMINFO 0x00000002 (Includes Sm Info)\n");
printf(" SM_SKIP_WRITE_GUID 0x00000004 (Includes GUID Info\n");
printf(" SM_SKIP_WRITE_SWITCHINFO 0x00000008 (Includes Switch Info\n");
printf(" SM_SKIP_WRITE_SWITCHLTV 0x00000010 (Includes Switch LTV)\n");
printf(" SM_SKIP_WRITE_VLARB 0x00000020 (Includes VLArb Tables/Preempt Tables)\n");
printf(" SM_SKIP_WRITE_MAPS 0x00000040 (Includes SL::SC, SC::SL, SC::VL)\n");
printf(" SM_SKIP_WRITE_LFT 0x00000080 (Includes LFT, MFT)\n");
printf(" SM_SKIP_WRITE_AR 0x00000100 (Includes PG table, PG FDB)\n");
printf(" SM_SKIP_WRITE_PKEY 0x00000200\n");
printf(" SM_SKIP_WRITE_CONG 0x00000400 (Includes HFI / Switch congestion)\n");
printf(" SM_SKIP_WRITE_BFRCTRL 0x00000800\n");
printf(" SM_SKIP_WRITE_NOTICE 0x00001000\n");
return 0;
}
attrSkip = strtol(argv[0],NULL,0);
if((res = fm_mgr_simple_query(hdl, FM_ACT_GET, FM_DT_SM_SKIP_ATTRIBUTE_WRITE, mgr, sizeof(attrSkip), (void*) &attrSkip, &ret_code)) != FM_CONF_OK) {
fprintf(stderr, "sm_skip_attr_write: Failed to retrieve data: \n"
"\tError:(%d) %s \n\tRet code:(%d) %s\n",
res, fm_mgr_get_error_str(res),ret_code,
fm_mgr_get_resp_error_str(ret_code));
} else {
printf("Successfully sent set to 0x%x of skip write to local SM instance\n", attrSkip);
}
return 0;
}
| 16,869 |
86,221 | 0 | static int __init i8042_init(void)
{
struct platform_device *pdev;
int err;
dbg_init();
err = i8042_platform_init();
if (err)
return err;
err = i8042_controller_check();
if (err)
goto err_platform_exit;
pdev = platform_create_bundle(&i8042_driver, i8042_probe, NULL, 0, NULL, 0);
if (IS_ERR(pdev)) {
err = PTR_ERR(pdev);
goto err_platform_exit;
}
bus_register_notifier(&serio_bus, &i8042_kbd_bind_notifier_block);
panic_blink = i8042_panic_blink;
return 0;
err_platform_exit:
i8042_platform_exit();
return err;
}
| 16,870 |
151,168 | 0 | void InspectorNetworkAgent::DidReceiveData(unsigned long identifier,
DocumentLoader* loader,
const char* data,
int data_length) {
String request_id = IdentifiersFactory::RequestId(identifier);
if (data) {
NetworkResourcesData::ResourceData const* resource_data =
resources_data_->Data(request_id);
if (resource_data &&
(!resource_data->CachedResource() ||
resource_data->CachedResource()->GetDataBufferingPolicy() ==
kDoNotBufferData ||
IsErrorStatusCode(resource_data->HttpStatusCode())))
resources_data_->MaybeAddResourceData(request_id, data, data_length);
}
GetFrontend()->dataReceived(
request_id, MonotonicallyIncreasingTime(), data_length,
resources_data_->GetAndClearPendingEncodedDataLength(request_id));
}
| 16,871 |
59,107 | 0 | static int check_cond_jmp_op(struct bpf_verifier_env *env,
struct bpf_insn *insn, int *insn_idx)
{
struct bpf_verifier_state *other_branch, *this_branch = env->cur_state;
struct bpf_reg_state *regs = this_branch->regs, *dst_reg;
u8 opcode = BPF_OP(insn->code);
int err;
if (opcode > BPF_JSLE) {
verbose(env, "invalid BPF_JMP opcode %x\n", opcode);
return -EINVAL;
}
if (BPF_SRC(insn->code) == BPF_X) {
if (insn->imm != 0) {
verbose(env, "BPF_JMP uses reserved fields\n");
return -EINVAL;
}
/* check src1 operand */
err = check_reg_arg(env, insn->src_reg, SRC_OP);
if (err)
return err;
if (is_pointer_value(env, insn->src_reg)) {
verbose(env, "R%d pointer comparison prohibited\n",
insn->src_reg);
return -EACCES;
}
} else {
if (insn->src_reg != BPF_REG_0) {
verbose(env, "BPF_JMP uses reserved fields\n");
return -EINVAL;
}
}
/* check src2 operand */
err = check_reg_arg(env, insn->dst_reg, SRC_OP);
if (err)
return err;
dst_reg = ®s[insn->dst_reg];
/* detect if R == 0 where R was initialized to zero earlier */
if (BPF_SRC(insn->code) == BPF_K &&
(opcode == BPF_JEQ || opcode == BPF_JNE) &&
dst_reg->type == SCALAR_VALUE &&
tnum_equals_const(dst_reg->var_off, insn->imm)) {
if (opcode == BPF_JEQ) {
/* if (imm == imm) goto pc+off;
* only follow the goto, ignore fall-through
*/
*insn_idx += insn->off;
return 0;
} else {
/* if (imm != imm) goto pc+off;
* only follow fall-through branch, since
* that's where the program will go
*/
return 0;
}
}
other_branch = push_stack(env, *insn_idx + insn->off + 1, *insn_idx);
if (!other_branch)
return -EFAULT;
/* detect if we are comparing against a constant value so we can adjust
* our min/max values for our dst register.
* this is only legit if both are scalars (or pointers to the same
* object, I suppose, but we don't support that right now), because
* otherwise the different base pointers mean the offsets aren't
* comparable.
*/
if (BPF_SRC(insn->code) == BPF_X) {
if (dst_reg->type == SCALAR_VALUE &&
regs[insn->src_reg].type == SCALAR_VALUE) {
if (tnum_is_const(regs[insn->src_reg].var_off))
reg_set_min_max(&other_branch->regs[insn->dst_reg],
dst_reg, regs[insn->src_reg].var_off.value,
opcode);
else if (tnum_is_const(dst_reg->var_off))
reg_set_min_max_inv(&other_branch->regs[insn->src_reg],
®s[insn->src_reg],
dst_reg->var_off.value, opcode);
else if (opcode == BPF_JEQ || opcode == BPF_JNE)
/* Comparing for equality, we can combine knowledge */
reg_combine_min_max(&other_branch->regs[insn->src_reg],
&other_branch->regs[insn->dst_reg],
®s[insn->src_reg],
®s[insn->dst_reg], opcode);
}
} else if (dst_reg->type == SCALAR_VALUE) {
reg_set_min_max(&other_branch->regs[insn->dst_reg],
dst_reg, insn->imm, opcode);
}
/* detect if R == 0 where R is returned from bpf_map_lookup_elem() */
if (BPF_SRC(insn->code) == BPF_K &&
insn->imm == 0 && (opcode == BPF_JEQ || opcode == BPF_JNE) &&
dst_reg->type == PTR_TO_MAP_VALUE_OR_NULL) {
/* Mark all identical map registers in each branch as either
* safe or unknown depending R == 0 or R != 0 conditional.
*/
mark_map_regs(this_branch, insn->dst_reg, opcode == BPF_JNE);
mark_map_regs(other_branch, insn->dst_reg, opcode == BPF_JEQ);
} else if (!try_match_pkt_pointers(insn, dst_reg, ®s[insn->src_reg],
this_branch, other_branch) &&
is_pointer_value(env, insn->dst_reg)) {
verbose(env, "R%d pointer comparison prohibited\n",
insn->dst_reg);
return -EACCES;
}
if (env->log.level)
print_verifier_state(env, this_branch);
return 0;
}
| 16,872 |
32,759 | 0 | static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
{
int current_link_up = 0;
if (!(mac_status & MAC_STATUS_PCS_SYNCED))
goto out;
if (tp->link_config.autoneg == AUTONEG_ENABLE) {
u32 txflags, rxflags;
int i;
if (fiber_autoneg(tp, &txflags, &rxflags)) {
u32 local_adv = 0, remote_adv = 0;
if (txflags & ANEG_CFG_PS1)
local_adv |= ADVERTISE_1000XPAUSE;
if (txflags & ANEG_CFG_PS2)
local_adv |= ADVERTISE_1000XPSE_ASYM;
if (rxflags & MR_LP_ADV_SYM_PAUSE)
remote_adv |= LPA_1000XPAUSE;
if (rxflags & MR_LP_ADV_ASYM_PAUSE)
remote_adv |= LPA_1000XPAUSE_ASYM;
tp->link_config.rmt_adv =
mii_adv_to_ethtool_adv_x(remote_adv);
tg3_setup_flow_control(tp, local_adv, remote_adv);
current_link_up = 1;
}
for (i = 0; i < 30; i++) {
udelay(20);
tw32_f(MAC_STATUS,
(MAC_STATUS_SYNC_CHANGED |
MAC_STATUS_CFG_CHANGED));
udelay(40);
if ((tr32(MAC_STATUS) &
(MAC_STATUS_SYNC_CHANGED |
MAC_STATUS_CFG_CHANGED)) == 0)
break;
}
mac_status = tr32(MAC_STATUS);
if (current_link_up == 0 &&
(mac_status & MAC_STATUS_PCS_SYNCED) &&
!(mac_status & MAC_STATUS_RCVD_CFG))
current_link_up = 1;
} else {
tg3_setup_flow_control(tp, 0, 0);
/* Forcing 1000FD link up. */
current_link_up = 1;
tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
udelay(40);
tw32_f(MAC_MODE, tp->mac_mode);
udelay(40);
}
out:
return current_link_up;
}
| 16,873 |
31,380 | 0 | static int check_reqid(struct xfrm_policy *xp, int dir, int count, void *ptr)
{
int i;
u32 reqid = *(u32*)ptr;
for (i=0; i<xp->xfrm_nr; i++) {
if (xp->xfrm_vec[i].reqid == reqid)
return -EEXIST;
}
return 0;
}
| 16,874 |
121,258 | 0 | void HTMLInputElement::setSize(unsigned size)
{
setAttribute(sizeAttr, String::number(size));
}
| 16,875 |
73,199 | 0 | static int file_read(jas_stream_obj_t *obj, char *buf, int cnt)
{
jas_stream_fileobj_t *fileobj = JAS_CAST(jas_stream_fileobj_t *, obj);
return read(fileobj->fd, buf, cnt);
}
| 16,876 |
166,095 | 0 | void FileSystemManagerImpl::Write(
const GURL& file_path,
const std::string& blob_uuid,
int64_t position,
blink::mojom::FileSystemCancellableOperationRequest op_request,
blink::mojom::FileSystemOperationListenerPtr listener) {
DCHECK_CURRENTLY_ON(BrowserThread::IO);
FileSystemURL url(context_->CrackURL(file_path));
base::Optional<base::File::Error> opt_error = ValidateFileSystemURL(url);
if (opt_error) {
listener->ErrorOccurred(opt_error.value());
return;
}
if (!security_policy_->CanWriteFileSystemFile(process_id_, url)) {
listener->ErrorOccurred(base::File::FILE_ERROR_SECURITY);
return;
}
std::unique_ptr<storage::BlobDataHandle> blob =
blob_storage_context_->context()->GetBlobDataFromUUID(blob_uuid);
OperationListenerID listener_id = AddOpListener(std::move(listener));
OperationID op_id = operation_runner()->Write(
url, std::move(blob), position,
base::BindRepeating(&FileSystemManagerImpl::DidWrite, GetWeakPtr(),
listener_id));
cancellable_operations_.AddBinding(
std::make_unique<FileSystemCancellableOperationImpl>(op_id, this),
std::move(op_request));
}
| 16,877 |
26,665 | 0 | static int nl80211_authenticate(struct sk_buff *skb, struct genl_info *info)
{
struct cfg80211_registered_device *rdev = info->user_ptr[0];
struct net_device *dev = info->user_ptr[1];
struct ieee80211_channel *chan;
const u8 *bssid, *ssid, *ie = NULL;
int err, ssid_len, ie_len = 0;
enum nl80211_auth_type auth_type;
struct key_parse key;
bool local_state_change;
if (!is_valid_ie_attr(info->attrs[NL80211_ATTR_IE]))
return -EINVAL;
if (!info->attrs[NL80211_ATTR_MAC])
return -EINVAL;
if (!info->attrs[NL80211_ATTR_AUTH_TYPE])
return -EINVAL;
if (!info->attrs[NL80211_ATTR_SSID])
return -EINVAL;
if (!info->attrs[NL80211_ATTR_WIPHY_FREQ])
return -EINVAL;
err = nl80211_parse_key(info, &key);
if (err)
return err;
if (key.idx >= 0) {
if (key.type != -1 && key.type != NL80211_KEYTYPE_GROUP)
return -EINVAL;
if (!key.p.key || !key.p.key_len)
return -EINVAL;
if ((key.p.cipher != WLAN_CIPHER_SUITE_WEP40 ||
key.p.key_len != WLAN_KEY_LEN_WEP40) &&
(key.p.cipher != WLAN_CIPHER_SUITE_WEP104 ||
key.p.key_len != WLAN_KEY_LEN_WEP104))
return -EINVAL;
if (key.idx > 4)
return -EINVAL;
} else {
key.p.key_len = 0;
key.p.key = NULL;
}
if (key.idx >= 0) {
int i;
bool ok = false;
for (i = 0; i < rdev->wiphy.n_cipher_suites; i++) {
if (key.p.cipher == rdev->wiphy.cipher_suites[i]) {
ok = true;
break;
}
}
if (!ok)
return -EINVAL;
}
if (!rdev->ops->auth)
return -EOPNOTSUPP;
if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_STATION &&
dev->ieee80211_ptr->iftype != NL80211_IFTYPE_P2P_CLIENT)
return -EOPNOTSUPP;
bssid = nla_data(info->attrs[NL80211_ATTR_MAC]);
chan = ieee80211_get_channel(&rdev->wiphy,
nla_get_u32(info->attrs[NL80211_ATTR_WIPHY_FREQ]));
if (!chan || (chan->flags & IEEE80211_CHAN_DISABLED))
return -EINVAL;
ssid = nla_data(info->attrs[NL80211_ATTR_SSID]);
ssid_len = nla_len(info->attrs[NL80211_ATTR_SSID]);
if (info->attrs[NL80211_ATTR_IE]) {
ie = nla_data(info->attrs[NL80211_ATTR_IE]);
ie_len = nla_len(info->attrs[NL80211_ATTR_IE]);
}
auth_type = nla_get_u32(info->attrs[NL80211_ATTR_AUTH_TYPE]);
if (!nl80211_valid_auth_type(auth_type))
return -EINVAL;
local_state_change = !!info->attrs[NL80211_ATTR_LOCAL_STATE_CHANGE];
return cfg80211_mlme_auth(rdev, dev, chan, auth_type, bssid,
ssid, ssid_len, ie, ie_len,
key.p.key, key.p.key_len, key.idx,
local_state_change);
}
| 16,878 |
172,981 | 0 | static int rpng2_x_msb(ulg u32val)
{
int i;
for (i = 31; i >= 0; --i) {
if (u32val & 0x80000000L)
break;
u32val <<= 1;
}
return i;
}
| 16,879 |
110,956 | 0 | void RootWindowHostLinux::SetFocusWhenShown(bool focus_when_shown) {
static const char* k_NET_WM_USER_TIME = "_NET_WM_USER_TIME";
focus_when_shown_ = focus_when_shown;
if (IsWindowManagerPresent() && !focus_when_shown_) {
ui::SetIntProperty(xwindow_,
k_NET_WM_USER_TIME,
k_NET_WM_USER_TIME,
0);
}
}
| 16,880 |
45,422 | 0 | static unsigned long ecryptfs_get_zeroed_pages(gfp_t gfp_mask,
unsigned int order)
{
struct page *page;
page = alloc_pages(gfp_mask | __GFP_ZERO, order);
if (page)
return (unsigned long) page_address(page);
return 0;
}
| 16,881 |
48,516 | 0 | static void ion_buffer_add(struct ion_device *dev,
struct ion_buffer *buffer)
{
struct rb_node **p = &dev->buffers.rb_node;
struct rb_node *parent = NULL;
struct ion_buffer *entry;
while (*p) {
parent = *p;
entry = rb_entry(parent, struct ion_buffer, node);
if (buffer < entry) {
p = &(*p)->rb_left;
} else if (buffer > entry) {
p = &(*p)->rb_right;
} else {
pr_err("%s: buffer already found.", __func__);
BUG();
}
}
rb_link_node(&buffer->node, parent, p);
rb_insert_color(&buffer->node, &dev->buffers);
}
| 16,882 |
64,893 | 0 | static void rle_write_trns(struct rle_context *rlectx, int num_trns)
{
iw_byte dstbuf[4];
int num_remaining = num_trns;
int num_to_write;
while(num_remaining>0) {
num_to_write = num_remaining;
if(num_to_write>255) num_to_write=255;
dstbuf[0]=0x00; // 00 02 = Delta
dstbuf[1]=0x02;
dstbuf[2]=(iw_byte)num_to_write; // X offset
dstbuf[3]=0x00; // Y offset
iwbmp_write(rlectx->wctx,dstbuf,4);
rlectx->total_bytes_written+=4;
num_remaining -= num_to_write;
}
rlectx->pending_data_start += num_trns;
}
| 16,883 |
40,023 | 0 | cifs_update_eof(struct cifsInodeInfo *cifsi, loff_t offset,
unsigned int bytes_written)
{
loff_t end_of_write = offset + bytes_written;
if (end_of_write > cifsi->server_eof)
cifsi->server_eof = end_of_write;
}
| 16,884 |
37,593 | 0 | static pfn_t spte_to_pfn(u64 pte)
{
return (pte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT;
}
| 16,885 |
89,414 | 0 | void irc_servers_reconnect_deinit(void)
{
signal_remove("server connect copy", (SIGNAL_FUNC) sig_server_connect_copy);
signal_remove("server reconnect save status", (SIGNAL_FUNC) sig_server_reconnect_save_status);
signal_remove("event connected", (SIGNAL_FUNC) sig_connected);
signal_remove("event 436", (SIGNAL_FUNC) event_nick_collision);
signal_remove("event kill", (SIGNAL_FUNC) event_kill);
}
| 16,886 |
110,609 | 0 | bool GLES2DecoderImpl::Initialize(
const scoped_refptr<gfx::GLSurface>& surface,
const scoped_refptr<gfx::GLContext>& context,
bool offscreen,
const gfx::Size& size,
const DisallowedFeatures& disallowed_features,
const char* allowed_extensions,
const std::vector<int32>& attribs) {
TRACE_EVENT0("gpu", "GLES2DecoderImpl::Initialize");
DCHECK(context->IsCurrent(surface.get()));
DCHECK(!context_.get());
if (CommandLine::ForCurrentProcess()->HasSwitch(
switches::kEnableGPUDebugging)) {
set_debug(true);
}
if (CommandLine::ForCurrentProcess()->HasSwitch(
switches::kEnableGPUCommandLogging)) {
set_log_commands(true);
}
compile_shader_always_succeeds_ = CommandLine::ForCurrentProcess()->HasSwitch(
switches::kCompileShaderAlwaysSucceeds);
context_ = context;
surface_ = surface;
if (!group_->Initialize(disallowed_features, allowed_extensions)) {
LOG(ERROR) << "GpuScheduler::InitializeCommon failed because group "
<< "failed to initialize.";
group_ = NULL; // Must not destroy ContextGroup if it is not initialized.
Destroy(true);
return false;
}
CHECK_GL_ERROR();
disallowed_features_ = disallowed_features;
vertex_attrib_manager_.reset(new VertexAttribManager());
vertex_attrib_manager_->Initialize(group_->max_vertex_attribs());
query_manager_.reset(new QueryManager(this, feature_info_));
util_.set_num_compressed_texture_formats(
validators_->compressed_texture_format.GetValues().size());
if (gfx::GetGLImplementation() != gfx::kGLImplementationEGLGLES2) {
glEnableVertexAttribArray(0);
}
glGenBuffersARB(1, &attrib_0_buffer_id_);
glBindBuffer(GL_ARRAY_BUFFER, attrib_0_buffer_id_);
glVertexAttribPointer(0, 1, GL_FLOAT, GL_FALSE, 0, NULL);
glBindBuffer(GL_ARRAY_BUFFER, 0);
glGenBuffersARB(1, &fixed_attrib_buffer_id_);
texture_units_.reset(
new TextureUnit[group_->max_texture_units()]);
for (uint32 tt = 0; tt < group_->max_texture_units(); ++tt) {
glActiveTexture(GL_TEXTURE0 + tt);
TextureManager::TextureInfo* info;
if (feature_info_->feature_flags().oes_egl_image_external) {
info = texture_manager()->GetDefaultTextureInfo(GL_TEXTURE_EXTERNAL_OES);
texture_units_[tt].bound_texture_external_oes = info;
glBindTexture(GL_TEXTURE_EXTERNAL_OES, info->service_id());
}
if (feature_info_->feature_flags().arb_texture_rectangle) {
info = texture_manager()->GetDefaultTextureInfo(GL_TEXTURE_RECTANGLE_ARB);
texture_units_[tt].bound_texture_rectangle_arb = info;
glBindTexture(GL_TEXTURE_RECTANGLE_ARB, info->service_id());
}
info = texture_manager()->GetDefaultTextureInfo(GL_TEXTURE_CUBE_MAP);
texture_units_[tt].bound_texture_cube_map = info;
glBindTexture(GL_TEXTURE_CUBE_MAP, info->service_id());
info = texture_manager()->GetDefaultTextureInfo(GL_TEXTURE_2D);
texture_units_[tt].bound_texture_2d = info;
glBindTexture(GL_TEXTURE_2D, info->service_id());
}
glActiveTexture(GL_TEXTURE0);
CHECK_GL_ERROR();
ContextCreationAttribParser attrib_parser;
if (!attrib_parser.Parse(attribs))
return false;
GLint v = 0;
glGetIntegerv(GL_ALPHA_BITS, &v);
back_buffer_color_format_ =
(attrib_parser.alpha_size_ != 0 && v > 0) ? GL_RGBA : GL_RGB;
glGetIntegerv(GL_DEPTH_BITS, &v);
back_buffer_has_depth_ = attrib_parser.depth_size_ != 0 && v > 0;
glGetIntegerv(GL_STENCIL_BITS, &v);
back_buffer_has_stencil_ = attrib_parser.stencil_size_ != 0 && v > 0;
if (offscreen) {
if (attrib_parser.samples_ > 0 && attrib_parser.sample_buffers_ > 0 &&
(context_->HasExtension("GL_EXT_framebuffer_multisample") ||
context_->HasExtension("GL_ANGLE_framebuffer_multisample"))) {
GLint max_sample_count = 1;
glGetIntegerv(GL_MAX_SAMPLES_EXT, &max_sample_count);
offscreen_target_samples_ = std::min(attrib_parser.samples_,
max_sample_count);
} else {
offscreen_target_samples_ = 1;
}
offscreen_target_buffer_preserved_ = attrib_parser.buffer_preserved_;
if (gfx::GetGLImplementation() == gfx::kGLImplementationEGLGLES2) {
const bool rgb8_supported =
context_->HasExtension("GL_OES_rgb8_rgba8");
if (rgb8_supported && offscreen_target_samples_ > 1) {
offscreen_target_color_format_ = attrib_parser.alpha_size_ > 0 ?
GL_RGBA8 : GL_RGB8;
} else {
offscreen_target_samples_ = 1;
offscreen_target_color_format_ = attrib_parser.alpha_size_ > 0 ?
GL_RGBA : GL_RGB;
}
const bool depth24_stencil8_supported =
context_->HasExtension("GL_OES_packed_depth_stencil");
VLOG(1) << "GL_OES_packed_depth_stencil "
<< (depth24_stencil8_supported ? "" : "not ") << "supported.";
if ((attrib_parser.depth_size_ > 0 || attrib_parser.stencil_size_ > 0) &&
depth24_stencil8_supported) {
offscreen_target_depth_format_ = GL_DEPTH24_STENCIL8;
offscreen_target_stencil_format_ = 0;
} else {
offscreen_target_depth_format_ = attrib_parser.depth_size_ > 0 ?
GL_DEPTH_COMPONENT16 : 0;
offscreen_target_stencil_format_ = attrib_parser.stencil_size_ > 0 ?
GL_STENCIL_INDEX8 : 0;
}
} else {
offscreen_target_color_format_ = attrib_parser.alpha_size_ > 0 ?
GL_RGBA : GL_RGB;
const bool depth24_stencil8_supported =
context_->HasExtension("GL_EXT_packed_depth_stencil");
VLOG(1) << "GL_EXT_packed_depth_stencil "
<< (depth24_stencil8_supported ? "" : "not ") << "supported.";
if ((attrib_parser.depth_size_ > 0 || attrib_parser.stencil_size_ > 0) &&
depth24_stencil8_supported) {
offscreen_target_depth_format_ = GL_DEPTH24_STENCIL8;
offscreen_target_stencil_format_ = 0;
} else {
offscreen_target_depth_format_ = attrib_parser.depth_size_ > 0 ?
GL_DEPTH_COMPONENT : 0;
offscreen_target_stencil_format_ = attrib_parser.stencil_size_ > 0 ?
GL_STENCIL_INDEX : 0;
}
}
offscreen_saved_color_format_ = attrib_parser.alpha_size_ > 0 ?
GL_RGBA : GL_RGB;
offscreen_target_frame_buffer_.reset(new FrameBuffer(this));
offscreen_target_frame_buffer_->Create();
if (IsOffscreenBufferMultisampled()) {
offscreen_target_color_render_buffer_.reset(new RenderBuffer(this));
offscreen_target_color_render_buffer_->Create();
} else {
offscreen_target_color_texture_.reset(new Texture(this));
offscreen_target_color_texture_->Create();
}
offscreen_target_depth_render_buffer_.reset(new RenderBuffer(this));
offscreen_target_depth_render_buffer_->Create();
offscreen_target_stencil_render_buffer_.reset(new RenderBuffer(this));
offscreen_target_stencil_render_buffer_->Create();
offscreen_saved_frame_buffer_.reset(new FrameBuffer(this));
offscreen_saved_frame_buffer_->Create();
offscreen_saved_color_texture_.reset(new Texture(this));
offscreen_saved_color_texture_->Create();
if (!ResizeOffscreenFrameBuffer(size)) {
LOG(ERROR) << "Could not allocate offscreen buffer storage.";
Destroy(true);
return false;
}
DoBindFramebuffer(GL_FRAMEBUFFER, 0);
}
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT | GL_STENCIL_BUFFER_BIT);
if (gfx::GetGLImplementation() != gfx::kGLImplementationEGLGLES2) {
glEnable(GL_VERTEX_PROGRAM_POINT_SIZE);
glEnable(GL_POINT_SPRITE);
}
has_robustness_extension_ =
context->HasExtension("GL_ARB_robustness") ||
context->HasExtension("GL_EXT_robustness");
if (!feature_info_->feature_flags().disable_workarounds) {
#if defined(OS_MACOSX)
needs_mac_nvidia_driver_workaround_ =
feature_info_->feature_flags().is_nvidia;
needs_glsl_built_in_function_emulation_ =
feature_info_->feature_flags().is_amd;
#endif
}
if (!InitializeShaderTranslator()) {
return false;
}
viewport_width_ = size.width();
viewport_height_ = size.height();
glViewport(viewport_x_, viewport_y_, viewport_width_, viewport_height_);
GLint viewport_params[4] = { 0 };
glGetIntegerv(GL_MAX_VIEWPORT_DIMS, viewport_params);
viewport_max_width_ = viewport_params[0];
viewport_max_height_ = viewport_params[1];
glActiveTexture(GL_TEXTURE0 + active_texture_unit_);
glLineWidth(1.0);
EnableDisable(GL_BLEND, enable_blend_);
glBlendColor(0.0f, 0.0, 0.0f, 0.0f);
glBlendFunc(GL_ONE, GL_ZERO);
glBlendEquation(GL_FUNC_ADD);
glBlendFuncSeparate(GL_ONE, GL_ZERO, GL_ONE, GL_ZERO);
glClearColor(clear_red_, clear_green_, clear_blue_, clear_alpha_);
glColorMask(mask_red_, mask_green_, mask_blue_, mask_alpha_);
EnableDisable(GL_CULL_FACE, enable_cull_face_);
glCullFace(GL_BACK);
glClearDepth(clear_depth_);
glDepthFunc(GL_LESS);
glDepthRange(0.0f, 1.0f);
EnableDisable(GL_DEPTH_TEST, enable_depth_test_);
glEnable(GL_DITHER);
glFrontFace(GL_CCW);
glHint(GL_GENERATE_MIPMAP_HINT, GL_DONT_CARE);
glLineWidth(1.0f);
glPixelStorei(GL_PACK_ALIGNMENT, pack_alignment_);
glPolygonOffset(0.0f, 0.0f);
glDisable(GL_POLYGON_OFFSET_FILL);
glSampleCoverage(1.0, false);
glScissor(viewport_x_, viewport_y_, viewport_width_, viewport_height_);
EnableDisable(GL_SCISSOR_TEST, enable_scissor_test_);
EnableDisable(GL_STENCIL_TEST, enable_stencil_test_);
glClearStencil(clear_stencil_);
glStencilFunc(GL_ALWAYS, 0, 0xFFFFFFFFU);
glStencilOp(GL_KEEP, GL_KEEP, GL_KEEP);
glStencilMaskSeparate(GL_FRONT, mask_stencil_front_);
glStencilMaskSeparate(GL_BACK, mask_stencil_back_);
glPixelStorei(GL_UNPACK_ALIGNMENT, unpack_alignment_);
DoBindBuffer(GL_ARRAY_BUFFER, 0);
DoBindBuffer(GL_ELEMENT_ARRAY_BUFFER, 0);
DoBindFramebuffer(GL_FRAMEBUFFER, 0);
DoBindRenderbuffer(GL_RENDERBUFFER, 0);
#if defined(OS_MACOSX)
if (!feature_info_->feature_flags().disable_workarounds &&
(feature_info_->feature_flags().is_amd ||
feature_info_->feature_flags().is_intel) &&
gfx::GetGLImplementation() == gfx::kGLImplementationDesktopGL) {
glPointParameteri(GL_POINT_SPRITE_COORD_ORIGIN, GL_LOWER_LEFT);
}
#endif
return true;
}
| 16,887 |
81,272 | 0 | static void *eval_map_start(struct seq_file *m, loff_t *pos)
{
union trace_eval_map_item *v;
loff_t l = 0;
mutex_lock(&trace_eval_mutex);
v = trace_eval_maps;
if (v)
v++;
while (v && l < *pos) {
v = eval_map_next(m, v, &l);
}
return v;
}
| 16,888 |
135,063 | 0 | void AppCacheHost::DoPendingSwapCache() {
DCHECK_EQ(false, pending_swap_cache_callback_.is_null());
bool success = false;
if (associated_cache_.get() && associated_cache_->owning_group()) {
if (associated_cache_->owning_group()->is_obsolete()) {
success = true;
AssociateNoCache(GURL());
} else if (swappable_cache_.get()) {
DCHECK(swappable_cache_.get() ==
swappable_cache_->owning_group()->newest_complete_cache());
success = true;
AssociateCompleteCache(swappable_cache_.get());
}
}
pending_swap_cache_callback_.Run(success, pending_callback_param_);
pending_swap_cache_callback_.Reset();
pending_callback_param_ = NULL;
}
| 16,889 |
13,505 | 0 | void js_unref(js_State *J, const char *ref)
{
js_delregistry(J, ref);
}
| 16,890 |
131,079 | 0 | static void stringAttrWithGetterExceptionAttributeSetterCallback(v8::Local<v8::String>, v8::Local<v8::Value> jsValue, const v8::PropertyCallbackInfo<void>& info)
{
TRACE_EVENT_SET_SAMPLING_STATE("Blink", "DOMSetter");
TestObjectV8Internal::stringAttrWithGetterExceptionAttributeSetter(jsValue, info);
TRACE_EVENT_SET_SAMPLING_STATE("V8", "V8Execution");
}
| 16,891 |
66,924 | 0 | static int decode_frame_png(AVCodecContext *avctx,
void *data, int *got_frame,
AVPacket *avpkt)
{
PNGDecContext *const s = avctx->priv_data;
const uint8_t *buf = avpkt->data;
int buf_size = avpkt->size;
AVFrame *p;
int64_t sig;
int ret;
ff_thread_release_buffer(avctx, &s->last_picture);
FFSWAP(ThreadFrame, s->picture, s->last_picture);
p = s->picture.f;
bytestream2_init(&s->gb, buf, buf_size);
/* check signature */
sig = bytestream2_get_be64(&s->gb);
if (sig != PNGSIG &&
sig != MNGSIG) {
av_log(avctx, AV_LOG_ERROR, "Invalid PNG signature 0x%08"PRIX64".\n", sig);
return AVERROR_INVALIDDATA;
}
s->y = s->state = s->has_trns = 0;
/* init the zlib */
s->zstream.zalloc = ff_png_zalloc;
s->zstream.zfree = ff_png_zfree;
s->zstream.opaque = NULL;
ret = inflateInit(&s->zstream);
if (ret != Z_OK) {
av_log(avctx, AV_LOG_ERROR, "inflateInit returned error %d\n", ret);
return AVERROR_EXTERNAL;
}
if ((ret = decode_frame_common(avctx, s, p, avpkt)) < 0)
goto the_end;
if (avctx->skip_frame == AVDISCARD_ALL) {
*got_frame = 0;
ret = bytestream2_tell(&s->gb);
goto the_end;
}
if ((ret = av_frame_ref(data, s->picture.f)) < 0)
return ret;
*got_frame = 1;
ret = bytestream2_tell(&s->gb);
the_end:
inflateEnd(&s->zstream);
s->crow_buf = NULL;
return ret;
}
| 16,892 |
118,850 | 0 | void WebContentsImpl::DidEndColorChooser() {
Send(new ViewMsg_DidEndColorChooser(GetRoutingID(),
color_chooser_identifier_));
color_chooser_.reset();
color_chooser_identifier_ = 0;
}
| 16,893 |
151,821 | 0 | void BackgroundLoaderOfflinerTest::OnCompletion(
const SavePageRequest& request,
Offliner::RequestStatus status) {
DCHECK(!completion_callback_called_); // Expect 1 callback per request.
completion_callback_called_ = true;
request_status_ = status;
}
| 16,894 |
4,308 | 0 | PHPAPI int _php_error_log(int opt_err, char *message, char *opt, char *headers TSRMLS_DC) /* {{{ */
{
return _php_error_log_ex(opt_err, message, (opt_err == 3) ? strlen(message) : 0, opt, headers TSRMLS_CC);
}
/* }}} */
| 16,895 |
69,202 | 0 | ZEND_API void ZEND_FASTCALL zend_hash_merge_ex(HashTable *target, HashTable *source, copy_ctor_func_t pCopyConstructor, merge_checker_func_t pMergeSource, void *pParam)
{
uint32_t idx;
Bucket *p;
zval *t;
IS_CONSISTENT(source);
IS_CONSISTENT(target);
HT_ASSERT(GC_REFCOUNT(target) == 1);
for (idx = 0; idx < source->nNumUsed; idx++) {
p = source->arData + idx;
if (UNEXPECTED(Z_TYPE(p->val) == IS_UNDEF)) continue;
if (zend_hash_replace_checker_wrapper(target, &p->val, p, pParam, pMergeSource)) {
t = zend_hash_update(target, p->key, &p->val);
if (t && pCopyConstructor) {
pCopyConstructor(t);
}
}
}
if (target->nNumOfElements > 0) {
idx = 0;
while (Z_TYPE(target->arData[idx].val) == IS_UNDEF) {
idx++;
}
target->nInternalPointer = idx;
}
}
| 16,896 |
182,099 | 1 | int ff_mms_asf_header_parser(MMSContext *mms)
{
uint8_t *p = mms->asf_header;
uint8_t *end;
int flags, stream_id;
mms->stream_num = 0;
if (mms->asf_header_size < sizeof(ff_asf_guid) * 2 + 22 ||
memcmp(p, ff_asf_header, sizeof(ff_asf_guid))) {
av_log(NULL, AV_LOG_ERROR,
"Corrupt stream (invalid ASF header, size=%d)\n",
mms->asf_header_size);
return AVERROR_INVALIDDATA;
}
end = mms->asf_header + mms->asf_header_size;
p += sizeof(ff_asf_guid) + 14;
while(end - p >= sizeof(ff_asf_guid) + 8) {
uint64_t chunksize;
if (!memcmp(p, ff_asf_data_header, sizeof(ff_asf_guid))) {
chunksize = 50; // see Reference [2] section 5.1
} else {
chunksize = AV_RL64(p + sizeof(ff_asf_guid));
}
if (!chunksize || chunksize > end - p) {
av_log(NULL, AV_LOG_ERROR,
"Corrupt stream (header chunksize %"PRId64" is invalid)\n",
chunksize);
return AVERROR_INVALIDDATA;
}
if (!memcmp(p, ff_asf_file_header, sizeof(ff_asf_guid))) {
/* read packet size */
if (end - p > sizeof(ff_asf_guid) * 2 + 68) {
mms->asf_packet_len = AV_RL32(p + sizeof(ff_asf_guid) * 2 + 64);
if (mms->asf_packet_len <= 0 || mms->asf_packet_len > sizeof(mms->in_buffer)) {
av_log(NULL, AV_LOG_ERROR,
"Corrupt stream (too large pkt_len %d)\n",
mms->asf_packet_len);
return AVERROR_INVALIDDATA;
}
}
} else if (!memcmp(p, ff_asf_stream_header, sizeof(ff_asf_guid))) {
flags = AV_RL16(p + sizeof(ff_asf_guid)*3 + 24);
stream_id = flags & 0x7F;
//The second condition is for checking CS_PKT_STREAM_ID_REQUEST packet size,
//we can calculate the packet size by stream_num.
//Please see function send_stream_selection_request().
if (mms->stream_num < MMS_MAX_STREAMS &&
46 + mms->stream_num * 6 < sizeof(mms->out_buffer)) {
mms->streams = av_fast_realloc(mms->streams,
&mms->nb_streams_allocated,
(mms->stream_num + 1) * sizeof(MMSStream));
if (!mms->streams)
return AVERROR(ENOMEM);
mms->streams[mms->stream_num].id = stream_id;
mms->stream_num++;
} else {
av_log(NULL, AV_LOG_ERROR,
"Corrupt stream (too many A/V streams)\n");
return AVERROR_INVALIDDATA;
}
} else if (!memcmp(p, ff_asf_ext_stream_header, sizeof(ff_asf_guid))) {
if (end - p >= 88) {
int stream_count = AV_RL16(p + 84), ext_len_count = AV_RL16(p + 86);
uint64_t skip_bytes = 88;
while (stream_count--) {
if (end - p < skip_bytes + 4) {
av_log(NULL, AV_LOG_ERROR,
"Corrupt stream (next stream name length is not in the buffer)\n");
return AVERROR_INVALIDDATA;
}
skip_bytes += 4 + AV_RL16(p + skip_bytes + 2);
}
while (ext_len_count--) {
if (end - p < skip_bytes + 22) {
av_log(NULL, AV_LOG_ERROR,
"Corrupt stream (next extension system info length is not in the buffer)\n");
return AVERROR_INVALIDDATA;
}
skip_bytes += 22 + AV_RL32(p + skip_bytes + 18);
}
if (end - p < skip_bytes) {
av_log(NULL, AV_LOG_ERROR,
"Corrupt stream (the last extension system info length is invalid)\n");
return AVERROR_INVALIDDATA;
}
if (chunksize - skip_bytes > 24)
chunksize = skip_bytes;
}
} else if (!memcmp(p, ff_asf_head1_guid, sizeof(ff_asf_guid))) {
chunksize = 46; // see references [2] section 3.4. This should be set 46.
}
p += chunksize;
}
return 0;
}
| 16,897 |
179,881 | 1 | static bool cgroupfs_mount_cgroup(void *hdata, const char *root, int type)
{
size_t bufsz = strlen(root) + sizeof("/sys/fs/cgroup");
char *path = NULL;
char **parts = NULL;
char *dirname = NULL;
char *abs_path = NULL;
char *abs_path2 = NULL;
struct cgfs_data *cgfs_d;
struct cgroup_process_info *info, *base_info;
int r, saved_errno = 0;
cgfs_d = hdata;
if (!cgfs_d)
return false;
base_info = cgfs_d->info;
/* If we get passed the _NOSPEC types, we default to _MIXED, since we don't
* have access to the lxc_conf object at this point. It really should be up
* to the caller to fix this, but this doesn't really hurt.
*/
if (type == LXC_AUTO_CGROUP_FULL_NOSPEC)
type = LXC_AUTO_CGROUP_FULL_MIXED;
else if (type == LXC_AUTO_CGROUP_NOSPEC)
type = LXC_AUTO_CGROUP_MIXED;
if (type < LXC_AUTO_CGROUP_RO || type > LXC_AUTO_CGROUP_FULL_MIXED) {
ERROR("could not mount cgroups into container: invalid type specified internally");
errno = EINVAL;
return false;
}
path = calloc(1, bufsz);
if (!path)
return false;
snprintf(path, bufsz, "%s/sys/fs/cgroup", root);
r = mount("cgroup_root", path, "tmpfs", MS_NOSUID|MS_NODEV|MS_NOEXEC|MS_RELATIME, "size=10240k,mode=755");
if (r < 0) {
SYSERROR("could not mount tmpfs to /sys/fs/cgroup in the container");
return false;
}
/* now mount all the hierarchies we care about */
for (info = base_info; info; info = info->next) {
size_t subsystem_count, i;
struct cgroup_mount_point *mp = info->designated_mount_point;
if (!mp)
mp = lxc_cgroup_find_mount_point(info->hierarchy, info->cgroup_path, true);
if (!mp) {
SYSERROR("could not find original mount point for cgroup hierarchy while trying to mount cgroup filesystem");
goto out_error;
}
subsystem_count = lxc_array_len((void **)info->hierarchy->subsystems);
parts = calloc(subsystem_count + 1, sizeof(char *));
if (!parts)
goto out_error;
for (i = 0; i < subsystem_count; i++) {
if (!strncmp(info->hierarchy->subsystems[i], "name=", 5))
parts[i] = info->hierarchy->subsystems[i] + 5;
else
parts[i] = info->hierarchy->subsystems[i];
}
dirname = lxc_string_join(",", (const char **)parts, false);
if (!dirname)
goto out_error;
/* create subsystem directory */
abs_path = lxc_append_paths(path, dirname);
if (!abs_path)
goto out_error;
r = mkdir_p(abs_path, 0755);
if (r < 0 && errno != EEXIST) {
SYSERROR("could not create cgroup subsystem directory /sys/fs/cgroup/%s", dirname);
goto out_error;
}
abs_path2 = lxc_append_paths(abs_path, info->cgroup_path);
if (!abs_path2)
goto out_error;
if (type == LXC_AUTO_CGROUP_FULL_RO || type == LXC_AUTO_CGROUP_FULL_RW || type == LXC_AUTO_CGROUP_FULL_MIXED) {
/* bind-mount the cgroup entire filesystem there */
if (strcmp(mp->mount_prefix, "/") != 0) {
/* FIXME: maybe we should just try to remount the entire hierarchy
* with a regular mount command? may that works? */
ERROR("could not automatically mount cgroup-full to /sys/fs/cgroup/%s: host has no mount point for this cgroup filesystem that has access to the root cgroup", dirname);
goto out_error;
}
r = mount(mp->mount_point, abs_path, "none", MS_BIND, 0);
if (r < 0) {
SYSERROR("error bind-mounting %s to %s", mp->mount_point, abs_path);
goto out_error;
}
/* main cgroup path should be read-only */
if (type == LXC_AUTO_CGROUP_FULL_RO || type == LXC_AUTO_CGROUP_FULL_MIXED) {
r = mount(NULL, abs_path, NULL, MS_REMOUNT|MS_BIND|MS_RDONLY, NULL);
if (r < 0) {
SYSERROR("error re-mounting %s readonly", abs_path);
goto out_error;
}
}
/* own cgroup should be read-write */
if (type == LXC_AUTO_CGROUP_FULL_MIXED) {
r = mount(abs_path2, abs_path2, NULL, MS_BIND, NULL);
if (r < 0) {
SYSERROR("error bind-mounting %s onto itself", abs_path2);
goto out_error;
}
r = mount(NULL, abs_path2, NULL, MS_REMOUNT|MS_BIND, NULL);
if (r < 0) {
SYSERROR("error re-mounting %s readwrite", abs_path2);
goto out_error;
}
}
} else {
/* create path for container's cgroup */
r = mkdir_p(abs_path2, 0755);
if (r < 0 && errno != EEXIST) {
SYSERROR("could not create cgroup directory /sys/fs/cgroup/%s%s", dirname, info->cgroup_path);
goto out_error;
}
/* for read-only and mixed cases, we have to bind-mount the tmpfs directory
* that points to the hierarchy itself (i.e. /sys/fs/cgroup/cpu etc.) onto
* itself and then bind-mount it read-only, since we keep the tmpfs itself
* read-write (see comment below)
*/
if (type == LXC_AUTO_CGROUP_MIXED || type == LXC_AUTO_CGROUP_RO) {
r = mount(abs_path, abs_path, NULL, MS_BIND, NULL);
if (r < 0) {
SYSERROR("error bind-mounting %s onto itself", abs_path);
goto out_error;
}
r = mount(NULL, abs_path, NULL, MS_REMOUNT|MS_BIND|MS_RDONLY, NULL);
if (r < 0) {
SYSERROR("error re-mounting %s readonly", abs_path);
goto out_error;
}
}
free(abs_path);
abs_path = NULL;
/* bind-mount container's cgroup to that directory */
abs_path = cgroup_to_absolute_path(mp, info->cgroup_path, NULL);
if (!abs_path)
goto out_error;
r = mount(abs_path, abs_path2, "none", MS_BIND, 0);
if (r < 0) {
SYSERROR("error bind-mounting %s to %s", abs_path, abs_path2);
goto out_error;
}
if (type == LXC_AUTO_CGROUP_RO) {
r = mount(NULL, abs_path2, NULL, MS_REMOUNT|MS_BIND|MS_RDONLY, NULL);
if (r < 0) {
SYSERROR("error re-mounting %s readonly", abs_path2);
goto out_error;
}
}
}
free(abs_path);
free(abs_path2);
abs_path = NULL;
abs_path2 = NULL;
/* add symlinks for every single subsystem */
if (subsystem_count > 1) {
for (i = 0; i < subsystem_count; i++) {
abs_path = lxc_append_paths(path, parts[i]);
if (!abs_path)
goto out_error;
r = symlink(dirname, abs_path);
if (r < 0)
WARN("could not create symlink %s -> %s in /sys/fs/cgroup of container", parts[i], dirname);
free(abs_path);
abs_path = NULL;
}
}
free(dirname);
free(parts);
dirname = NULL;
parts = NULL;
}
/* We used to remount the entire tmpfs readonly if any :ro or
* :mixed mode was specified. However, Ubuntu's mountall has the
* unfortunate behavior to block bootup if /sys/fs/cgroup is
* mounted read-only and cannot be remounted read-write.
* (mountall reads /lib/init/fstab and tries to (re-)mount all of
* these if they are not already mounted with the right options;
* it contains an entry for /sys/fs/cgroup. In case it can't do
* that, it prompts for the user to either manually fix it or
* boot anyway. But without user input, booting of the container
* hangs.)
*
* Instead of remounting the entire tmpfs readonly, we only
* remount the paths readonly that are part of the cgroup
* hierarchy.
*/
free(path);
return true;
out_error:
saved_errno = errno;
free(path);
free(dirname);
free(parts);
free(abs_path);
free(abs_path2);
errno = saved_errno;
return false;
}
| 16,898 |
109,202 | 0 | Page* InspectorController::inspectedPage() const
{
return m_page;
}
| 16,899 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.