unique_id
int64 13
189k
| target
int64 0
1
| code
stringlengths 20
241k
| __index_level_0__
int64 0
18.9k
|
---|---|---|---|
77,011 | 0 | ofpacts_decode(const void *actions, size_t actions_len,
enum ofp_version ofp_version,
const struct vl_mff_map *vl_mff_map,
uint64_t *ofpacts_tlv_bitmap, struct ofpbuf *ofpacts)
{
struct ofpbuf openflow = ofpbuf_const_initializer(actions, actions_len);
while (openflow.size) {
const struct ofp_action_header *action = openflow.data;
enum ofp_raw_action_type raw;
enum ofperr error;
uint64_t arg;
error = ofpact_pull_raw(&openflow, ofp_version, &raw, &arg);
if (!error) {
error = ofpact_decode(action, raw, ofp_version, arg, vl_mff_map,
ofpacts_tlv_bitmap, ofpacts);
}
if (error) {
log_bad_action(actions, actions_len, action, error);
return error;
}
}
return 0;
}
| 1,700 |
62,788 | 0 | static void MSLPushImage(MSLInfo *msl_info,Image *image)
{
ssize_t
n;
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(msl_info != (MSLInfo *) NULL);
msl_info->n++;
n=msl_info->n;
msl_info->image_info=(ImageInfo **) ResizeQuantumMemory(msl_info->image_info,
(n+1),sizeof(*msl_info->image_info));
msl_info->draw_info=(DrawInfo **) ResizeQuantumMemory(msl_info->draw_info,
(n+1),sizeof(*msl_info->draw_info));
msl_info->attributes=(Image **) ResizeQuantumMemory(msl_info->attributes,
(n+1),sizeof(*msl_info->attributes));
msl_info->image=(Image **) ResizeQuantumMemory(msl_info->image,(n+1),
sizeof(*msl_info->image));
if ((msl_info->image_info == (ImageInfo **) NULL) ||
(msl_info->draw_info == (DrawInfo **) NULL) ||
(msl_info->attributes == (Image **) NULL) ||
(msl_info->image == (Image **) NULL))
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed")
msl_info->image_info[n]=CloneImageInfo(msl_info->image_info[n-1]);
msl_info->draw_info[n]=CloneDrawInfo(msl_info->image_info[n-1],
msl_info->draw_info[n-1]);
if (image == (Image *) NULL)
msl_info->attributes[n]=AcquireImage(msl_info->image_info[n],
msl_info->exception);
else
msl_info->attributes[n]=CloneImage(image,0,0,MagickTrue,
msl_info->exception);
msl_info->image[n]=(Image *) image;
if ((msl_info->image_info[n] == (ImageInfo *) NULL) ||
(msl_info->attributes[n] == (Image *) NULL))
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed")
if (msl_info->number_groups != 0)
msl_info->group_info[msl_info->number_groups-1].numImages++;
}
| 1,701 |
152,988 | 0 | int PDFiumEngine::Form_GetPlatform(FPDF_FORMFILLINFO* param,
void* platform,
int length) {
int platform_flag = -1;
#if defined(WIN32)
platform_flag = 0;
#elif defined(__linux__)
platform_flag = 1;
#else
platform_flag = 2;
#endif
std::string javascript = "alert(\"Platform:"
+ base::DoubleToString(platform_flag)
+ "\")";
return platform_flag;
}
| 1,702 |
92,692 | 0 | static void switched_to_fair(struct rq *rq, struct task_struct *p)
{
attach_task_cfs_rq(p);
if (task_on_rq_queued(p)) {
/*
* We were most likely switched from sched_rt, so
* kick off the schedule if running, otherwise just see
* if we can still preempt the current task.
*/
if (rq->curr == p)
resched_curr(rq);
else
check_preempt_curr(rq, p, 0);
}
}
| 1,703 |
143,925 | 0 | png_check_cHRM_fixed(png_structp png_ptr,
png_fixed_point white_x, png_fixed_point white_y, png_fixed_point red_x,
png_fixed_point red_y, png_fixed_point green_x, png_fixed_point green_y,
png_fixed_point blue_x, png_fixed_point blue_y)
{
int ret = 1;
unsigned long xy_hi,xy_lo,yx_hi,yx_lo;
png_debug(1, "in function png_check_cHRM_fixed");
if (png_ptr == NULL)
return 0;
if (white_x < 0 || white_y <= 0 ||
red_x < 0 || red_y < 0 ||
green_x < 0 || green_y < 0 ||
blue_x < 0 || blue_y < 0)
{
png_warning(png_ptr,
"Ignoring attempt to set negative chromaticity value");
ret = 0;
}
if (white_x > (png_fixed_point) PNG_UINT_31_MAX ||
white_y > (png_fixed_point) PNG_UINT_31_MAX ||
red_x > (png_fixed_point) PNG_UINT_31_MAX ||
red_y > (png_fixed_point) PNG_UINT_31_MAX ||
green_x > (png_fixed_point) PNG_UINT_31_MAX ||
green_y > (png_fixed_point) PNG_UINT_31_MAX ||
blue_x > (png_fixed_point) PNG_UINT_31_MAX ||
blue_y > (png_fixed_point) PNG_UINT_31_MAX )
{
png_warning(png_ptr,
"Ignoring attempt to set chromaticity value exceeding 21474.83");
ret = 0;
}
if (white_x > 100000L - white_y)
{
png_warning(png_ptr, "Invalid cHRM white point");
ret = 0;
}
if (red_x > 100000L - red_y)
{
png_warning(png_ptr, "Invalid cHRM red point");
ret = 0;
}
if (green_x > 100000L - green_y)
{
png_warning(png_ptr, "Invalid cHRM green point");
ret = 0;
}
if (blue_x > 100000L - blue_y)
{
png_warning(png_ptr, "Invalid cHRM blue point");
ret = 0;
}
png_64bit_product(green_x - red_x, blue_y - red_y, &xy_hi, &xy_lo);
png_64bit_product(green_y - red_y, blue_x - red_x, &yx_hi, &yx_lo);
if (xy_hi == yx_hi && xy_lo == yx_lo)
{
png_warning(png_ptr,
"Ignoring attempt to set cHRM RGB triangle with zero area");
ret = 0;
}
return ret;
}
| 1,704 |
122,305 | 0 | String HTMLInputElement::altText() const
{
String alt = fastGetAttribute(altAttr);
if (alt.isNull())
alt = getAttribute(titleAttr);
if (alt.isNull())
alt = getAttribute(valueAttr);
if (alt.isEmpty())
alt = locale().queryString(blink::WebLocalizedString::InputElementAltText);
return alt;
}
| 1,705 |
25,694 | 0 | static inline void advance(struct pt_regs *regs)
{
regs->tpc = regs->tnpc;
regs->tnpc += 4;
if (test_thread_flag(TIF_32BIT)) {
regs->tpc &= 0xffffffff;
regs->tnpc &= 0xffffffff;
}
}
| 1,706 |
139,886 | 0 | void MidiManagerUsb::ReceiveUsbMidiData(UsbMidiDevice* device,
int endpoint_number,
const uint8* data,
size_t size,
base::TimeTicks time) {
if (!input_stream_)
return;
input_stream_->OnReceivedData(device,
endpoint_number,
data,
size,
time);
}
| 1,707 |
158,551 | 0 | bool WebLocalFrameImpl::IsLocalRoot() const {
return frame_->IsLocalRoot();
}
| 1,708 |
123,235 | 0 | void RenderWidgetHostViewAura::RenderViewGone(base::TerminationStatus status,
int error_code) {
UpdateCursorIfOverSelf();
Destroy();
}
| 1,709 |
184,296 | 1 | DateTimeFieldElement::DateTimeFieldElement(Document* document, FieldOwner& fieldOwner)
: HTMLElement(spanTag, document)
, m_fieldOwner(&fieldOwner)
{
setAttribute(roleAttr, "spinbutton");
}
| 1,710 |
116,216 | 0 | QQuickWebViewAttached::QQuickWebViewAttached(QObject* object)
: QObject(object)
, m_view(0)
{
}
| 1,711 |
84,522 | 0 | next_nonnull_line(Line *line)
{
Line *l;
for (l = line; l != NULL && l->len == 0; l = l->next) ;
if (l == NULL || l->len == 0)
return -1;
Currentbuf->currentLine = l;
if (l != line)
Currentbuf->pos = 0;
return 0;
}
| 1,712 |
123,761 | 0 | int ReadFile(const FilePath& filename, char* data, int size) {
base::ThreadRestrictions::AssertIOAllowed();
int fd = HANDLE_EINTR(open(filename.value().c_str(), O_RDONLY));
if (fd < 0)
return -1;
ssize_t bytes_read = HANDLE_EINTR(read(fd, data, size));
if (int ret = HANDLE_EINTR(close(fd)) < 0)
return ret;
return bytes_read;
}
| 1,713 |
185,814 | 1 | void MediaInterfaceProxy::CreateCdm(
media::mojom::ContentDecryptionModuleRequest request) {
DCHECK(thread_checker_.CalledOnValidThread());
GetMediaInterfaceFactory()->CreateCdm(std::move(request));
}
| 1,714 |
68,436 | 0 | static int __sctp_connect(struct sock *sk,
struct sockaddr *kaddrs,
int addrs_size,
sctp_assoc_t *assoc_id)
{
struct net *net = sock_net(sk);
struct sctp_sock *sp;
struct sctp_endpoint *ep;
struct sctp_association *asoc = NULL;
struct sctp_association *asoc2;
struct sctp_transport *transport;
union sctp_addr to;
sctp_scope_t scope;
long timeo;
int err = 0;
int addrcnt = 0;
int walk_size = 0;
union sctp_addr *sa_addr = NULL;
void *addr_buf;
unsigned short port;
unsigned int f_flags = 0;
sp = sctp_sk(sk);
ep = sp->ep;
/* connect() cannot be done on a socket that is already in ESTABLISHED
* state - UDP-style peeled off socket or a TCP-style socket that
* is already connected.
* It cannot be done even on a TCP-style listening socket.
*/
if (sctp_sstate(sk, ESTABLISHED) || sctp_sstate(sk, CLOSING) ||
(sctp_style(sk, TCP) && sctp_sstate(sk, LISTENING))) {
err = -EISCONN;
goto out_free;
}
/* Walk through the addrs buffer and count the number of addresses. */
addr_buf = kaddrs;
while (walk_size < addrs_size) {
struct sctp_af *af;
if (walk_size + sizeof(sa_family_t) > addrs_size) {
err = -EINVAL;
goto out_free;
}
sa_addr = addr_buf;
af = sctp_get_af_specific(sa_addr->sa.sa_family);
/* If the address family is not supported or if this address
* causes the address buffer to overflow return EINVAL.
*/
if (!af || (walk_size + af->sockaddr_len) > addrs_size) {
err = -EINVAL;
goto out_free;
}
port = ntohs(sa_addr->v4.sin_port);
/* Save current address so we can work with it */
memcpy(&to, sa_addr, af->sockaddr_len);
err = sctp_verify_addr(sk, &to, af->sockaddr_len);
if (err)
goto out_free;
/* Make sure the destination port is correctly set
* in all addresses.
*/
if (asoc && asoc->peer.port && asoc->peer.port != port) {
err = -EINVAL;
goto out_free;
}
/* Check if there already is a matching association on the
* endpoint (other than the one created here).
*/
asoc2 = sctp_endpoint_lookup_assoc(ep, &to, &transport);
if (asoc2 && asoc2 != asoc) {
if (asoc2->state >= SCTP_STATE_ESTABLISHED)
err = -EISCONN;
else
err = -EALREADY;
goto out_free;
}
/* If we could not find a matching association on the endpoint,
* make sure that there is no peeled-off association matching
* the peer address even on another socket.
*/
if (sctp_endpoint_is_peeled_off(ep, &to)) {
err = -EADDRNOTAVAIL;
goto out_free;
}
if (!asoc) {
/* If a bind() or sctp_bindx() is not called prior to
* an sctp_connectx() call, the system picks an
* ephemeral port and will choose an address set
* equivalent to binding with a wildcard address.
*/
if (!ep->base.bind_addr.port) {
if (sctp_autobind(sk)) {
err = -EAGAIN;
goto out_free;
}
} else {
/*
* If an unprivileged user inherits a 1-many
* style socket with open associations on a
* privileged port, it MAY be permitted to
* accept new associations, but it SHOULD NOT
* be permitted to open new associations.
*/
if (ep->base.bind_addr.port < PROT_SOCK &&
!ns_capable(net->user_ns, CAP_NET_BIND_SERVICE)) {
err = -EACCES;
goto out_free;
}
}
scope = sctp_scope(&to);
asoc = sctp_association_new(ep, sk, scope, GFP_KERNEL);
if (!asoc) {
err = -ENOMEM;
goto out_free;
}
err = sctp_assoc_set_bind_addr_from_ep(asoc, scope,
GFP_KERNEL);
if (err < 0) {
goto out_free;
}
}
/* Prime the peer's transport structures. */
transport = sctp_assoc_add_peer(asoc, &to, GFP_KERNEL,
SCTP_UNKNOWN);
if (!transport) {
err = -ENOMEM;
goto out_free;
}
addrcnt++;
addr_buf += af->sockaddr_len;
walk_size += af->sockaddr_len;
}
/* In case the user of sctp_connectx() wants an association
* id back, assign one now.
*/
if (assoc_id) {
err = sctp_assoc_set_id(asoc, GFP_KERNEL);
if (err < 0)
goto out_free;
}
err = sctp_primitive_ASSOCIATE(net, asoc, NULL);
if (err < 0) {
goto out_free;
}
/* Initialize sk's dport and daddr for getpeername() */
inet_sk(sk)->inet_dport = htons(asoc->peer.port);
sp->pf->to_sk_daddr(sa_addr, sk);
sk->sk_err = 0;
/* in-kernel sockets don't generally have a file allocated to them
* if all they do is call sock_create_kern().
*/
if (sk->sk_socket->file)
f_flags = sk->sk_socket->file->f_flags;
timeo = sock_sndtimeo(sk, f_flags & O_NONBLOCK);
if (assoc_id)
*assoc_id = asoc->assoc_id;
err = sctp_wait_for_connect(asoc, &timeo);
/* Note: the asoc may be freed after the return of
* sctp_wait_for_connect.
*/
/* Don't free association on exit. */
asoc = NULL;
out_free:
pr_debug("%s: took out_free path with asoc:%p kaddrs:%p err:%d\n",
__func__, asoc, kaddrs, err);
if (asoc) {
/* sctp_primitive_ASSOCIATE may have added this association
* To the hash table, try to unhash it, just in case, its a noop
* if it wasn't hashed so we're safe
*/
sctp_association_free(asoc);
}
return err;
}
| 1,715 |
98,346 | 0 | void FrameLoaderClient::postProgressStartedNotification()
{
WebKitWebView* webView = getViewFromFrame(m_frame);
g_signal_emit_by_name(webView, "load-started", m_frame);
g_object_notify(G_OBJECT(webView), "progress");
}
| 1,716 |
47,635 | 0 | static inline int ap_test_config(unsigned int *field, unsigned int nr)
{
if (nr > 0xFFu)
return 0;
return ap_test_bit((field + (nr >> 5)), (nr & 0x1f));
}
| 1,717 |
77,399 | 0 | ofproto_type_run(const char *datapath_type)
{
const struct ofproto_class *class;
int error;
datapath_type = ofproto_normalize_type(datapath_type);
class = ofproto_class_find__(datapath_type);
error = class->type_run ? class->type_run(datapath_type) : 0;
if (error && error != EAGAIN) {
VLOG_ERR_RL(&rl, "%s: type_run failed (%s)",
datapath_type, ovs_strerror(error));
}
return error;
}
| 1,718 |
81,072 | 0 | static void __always_inline vmx_set_intercept_for_msr(unsigned long *msr_bitmap,
u32 msr, int type, bool value)
{
if (value)
vmx_enable_intercept_for_msr(msr_bitmap, msr, type);
else
vmx_disable_intercept_for_msr(msr_bitmap, msr, type);
}
| 1,719 |
69,044 | 0 | static MagickBooleanType NegateCMYK(Image *image,ExceptionInfo *exception)
{
ChannelType
channel_mask;
MagickBooleanType
status;
channel_mask=SetImageChannelMask(image,(ChannelType)(AllChannels &~
AlphaChannel));
status=NegateImage(image,MagickFalse,exception);
(void) SetImageChannelMask(image,channel_mask);
return(status);
}
| 1,720 |
9,047 | 0 | static void vmxnet3_reset_mac(VMXNET3State *s)
{
memcpy(&s->conf.macaddr.a, &s->perm_mac.a, sizeof(s->perm_mac.a));
VMW_CFPRN("MAC address set to: " MAC_FMT, MAC_ARG(s->conf.macaddr.a));
}
| 1,721 |
108,562 | 0 | void ScrollbarThemeWin::paintButton(GraphicsContext* gc, ScrollbarThemeClient* scrollbar, const IntRect& rect, ScrollbarPart part)
{
bool horz = scrollbar->orientation() == HorizontalScrollbar;
int partId;
if (part == BackButtonStartPart || part == ForwardButtonStartPart)
partId = horz ? DFCS_SCROLLLEFT : DFCS_SCROLLUP;
else
partId = horz ? DFCS_SCROLLRIGHT : DFCS_SCROLLDOWN;
WebKit::WebCanvas* canvas = gc->canvas();
WebKit::Platform::current()->themeEngine()->paintScrollbarArrow(canvas, getThemeArrowState(scrollbar, part), partId | getClassicThemeState(scrollbar, part), WebKit::WebRect(rect));
}
| 1,722 |
94,696 | 0 | static int do_ssl3_write(SSL *s, int type, const unsigned char *buf,
unsigned int len, int create_empty_fragment)
{
unsigned char *p,*plen;
int i,mac_size,clear=0;
int prefix_len=0;
int eivlen;
long align=0;
SSL3_RECORD *wr;
SSL3_BUFFER *wb=&(s->s3->wbuf);
SSL_SESSION *sess;
/* first check if there is a SSL3_BUFFER still being written
* out. This will happen with non blocking IO */
if (wb->left != 0)
return(ssl3_write_pending(s,type,buf,len));
/* If we have an alert to send, lets send it */
if (s->s3->alert_dispatch)
{
i=s->method->ssl_dispatch_alert(s);
if (i <= 0)
return(i);
/* if it went, fall through and send more stuff */
}
if (wb->buf == NULL)
if (!ssl3_setup_write_buffer(s))
return -1;
if (len == 0 && !create_empty_fragment)
return 0;
wr= &(s->s3->wrec);
sess=s->session;
if ( (sess == NULL) ||
(s->enc_write_ctx == NULL) ||
(EVP_MD_CTX_md(s->write_hash) == NULL))
{
#if 1
clear=s->enc_write_ctx?0:1; /* must be AEAD cipher */
#else
clear=1;
#endif
mac_size=0;
}
else
{
mac_size=EVP_MD_CTX_size(s->write_hash);
if (mac_size < 0)
goto err;
}
#if 0 && !defined(OPENSSL_NO_MULTIBLOCK) && EVP_CIPH_FLAG_TLS1_1_MULTIBLOCK
if (type==SSL3_RT_APPLICATION_DATA && s->compress==NULL &&
!SSL_USE_ETM(s) && SSL_USE_EXPLICIT_IV(s) && /*!SSL_IS_DTLS(s) &&*/
EVP_CIPHER_flags(s->enc_write_ctx->cipher)&EVP_CIPH_FLAG_TLS1_1_MULTIBLOCK)
do {
unsigned char aad[13];
EVP_CTRL_TLS1_1_MULTIBLOCK_PARAM mb_param = {NULL,aad,sizeof(aad),0};
int packlen;
memcpy(aad,s->s3->write_sequence,8);
aad[8]=type;
aad[9]=(unsigned char)(s->version>>8);
aad[10]=(unsigned char)(s->version);
aad[11]=(unsigned char)(len>>8);
aad[12]=(unsigned char)len;
packlen = EVP_CIPHER_CTX_ctrl(s->enc_write_ctx,
EVP_CTRL_TLS1_1_MULTIBLOCK_AAD,
sizeof(mb_param),&mb_param);
if (packlen==0 || packlen > wb->len) break;
mb_param.out = wb->buf;
mb_param.inp = buf;
mb_param.len = len;
EVP_CIPHER_CTX_ctrl(s->enc_write_ctx,
EVP_CTRL_TLS1_1_MULTIBLOCK_ENCRYPT,
sizeof(mb_param),&mb_param);
s->s3->write_sequence[7] += mb_param.interleave;
if (s->s3->write_sequence[7] < mb_param.interleave)
{
int j=6;
while (j>=0 && (++s->s3->write_sequence[j--])==0) ;
}
wb->offset=0;
wb->left = packlen;
/* memorize arguments so that ssl3_write_pending can detect bad write retries later */
s->s3->wpend_tot=len;
s->s3->wpend_buf=buf;
s->s3->wpend_type=type;
s->s3->wpend_ret=len;
/* we now just need to write the buffer */
return ssl3_write_pending(s,type,buf,len);
} while (0);
#endif
/* 'create_empty_fragment' is true only when this function calls itself */
if (!clear && !create_empty_fragment && !s->s3->empty_fragment_done)
{
/* countermeasure against known-IV weakness in CBC ciphersuites
* (see http://www.openssl.org/~bodo/tls-cbc.txt) */
if (s->s3->need_empty_fragments && type == SSL3_RT_APPLICATION_DATA)
{
/* recursive function call with 'create_empty_fragment' set;
* this prepares and buffers the data for an empty fragment
* (these 'prefix_len' bytes are sent out later
* together with the actual payload) */
prefix_len = do_ssl3_write(s, type, buf, 0, 1);
if (prefix_len <= 0)
goto err;
if (prefix_len >
(SSL3_RT_HEADER_LENGTH + SSL3_RT_SEND_MAX_ENCRYPTED_OVERHEAD))
{
/* insufficient space */
SSLerr(SSL_F_DO_SSL3_WRITE, ERR_R_INTERNAL_ERROR);
goto err;
}
}
s->s3->empty_fragment_done = 1;
}
if (create_empty_fragment)
{
#if defined(SSL3_ALIGN_PAYLOAD) && SSL3_ALIGN_PAYLOAD!=0
/* extra fragment would be couple of cipher blocks,
* which would be multiple of SSL3_ALIGN_PAYLOAD, so
* if we want to align the real payload, then we can
* just pretent we simply have two headers. */
align = (long)wb->buf + 2*SSL3_RT_HEADER_LENGTH;
align = (-align)&(SSL3_ALIGN_PAYLOAD-1);
#endif
p = wb->buf + align;
wb->offset = align;
}
else if (prefix_len)
{
p = wb->buf + wb->offset + prefix_len;
}
else
{
#if defined(SSL3_ALIGN_PAYLOAD) && SSL3_ALIGN_PAYLOAD!=0
align = (long)wb->buf + SSL3_RT_HEADER_LENGTH;
align = (-align)&(SSL3_ALIGN_PAYLOAD-1);
#endif
p = wb->buf + align;
wb->offset = align;
}
/* write the header */
*(p++)=type&0xff;
wr->type=type;
*(p++)=(s->version>>8);
/* Some servers hang if iniatial client hello is larger than 256
* bytes and record version number > TLS 1.0
*/
if (s->state == SSL3_ST_CW_CLNT_HELLO_B
&& !s->renegotiate
&& TLS1_get_version(s) > TLS1_VERSION)
*(p++) = 0x1;
else
*(p++)=s->version&0xff;
/* field where we are to write out packet length */
plen=p;
p+=2;
/* Explicit IV length, block ciphers appropriate version flag */
if (s->enc_write_ctx && SSL_USE_EXPLICIT_IV(s))
{
int mode = EVP_CIPHER_CTX_mode(s->enc_write_ctx);
if (mode == EVP_CIPH_CBC_MODE)
{
eivlen = EVP_CIPHER_CTX_iv_length(s->enc_write_ctx);
if (eivlen <= 1)
eivlen = 0;
}
/* Need explicit part of IV for GCM mode */
else if (mode == EVP_CIPH_GCM_MODE)
eivlen = EVP_GCM_TLS_EXPLICIT_IV_LEN;
else
eivlen = 0;
}
else
eivlen = 0;
/* lets setup the record stuff. */
wr->data=p + eivlen;
wr->length=(int)len;
wr->input=(unsigned char *)buf;
/* we now 'read' from wr->input, wr->length bytes into
* wr->data */
/* first we compress */
if (s->compress != NULL)
{
if (!ssl3_do_compress(s))
{
SSLerr(SSL_F_DO_SSL3_WRITE,SSL_R_COMPRESSION_FAILURE);
goto err;
}
}
else
{
memcpy(wr->data,wr->input,wr->length);
wr->input=wr->data;
}
/* we should still have the output to wr->data and the input
* from wr->input. Length should be wr->length.
* wr->data still points in the wb->buf */
if (!SSL_USE_ETM(s) && mac_size != 0)
{
if (s->method->ssl3_enc->mac(s,&(p[wr->length + eivlen]),1) < 0)
goto err;
wr->length+=mac_size;
}
wr->input=p;
wr->data=p;
if (eivlen)
{
/* if (RAND_pseudo_bytes(p, eivlen) <= 0)
goto err; */
wr->length += eivlen;
}
if(s->method->ssl3_enc->enc(s,1)<1) goto err;
if (SSL_USE_ETM(s) && mac_size != 0)
{
if (s->method->ssl3_enc->mac(s,p + wr->length,1) < 0)
goto err;
wr->length+=mac_size;
}
/* record length after mac and block padding */
s2n(wr->length,plen);
if (s->msg_callback)
s->msg_callback(1, 0, SSL3_RT_HEADER, plen - 5, 5, s, s->msg_callback_arg);
/* we should now have
* wr->data pointing to the encrypted data, which is
* wr->length long */
wr->type=type; /* not needed but helps for debugging */
wr->length+=SSL3_RT_HEADER_LENGTH;
if (create_empty_fragment)
{
/* we are in a recursive call;
* just return the length, don't write out anything here
*/
return wr->length;
}
/* now let's set up wb */
wb->left = prefix_len + wr->length;
/* memorize arguments so that ssl3_write_pending can detect bad write retries later */
s->s3->wpend_tot=len;
s->s3->wpend_buf=buf;
s->s3->wpend_type=type;
s->s3->wpend_ret=len;
/* we now just need to write the buffer */
return ssl3_write_pending(s,type,buf,len);
err:
return -1;
}
| 1,723 |
3,257 | 0 | dict_param_list_write(dict_param_list *plist, ref *pdict, const ref *pwanted,
gs_ref_memory_t *imem)
{
check_dict_write(*pdict);
plist->u.w.write = dict_param_write;
plist->enumerate = dict_param_enumerate;
ref_param_write_init((iparam_list *) plist, pwanted, imem);
plist->dict = *pdict;
return 0;
}
| 1,724 |
87,331 | 0 | int64_t ff_guess_coded_bitrate(AVCodecContext *avctx)
{
AVRational framerate = avctx->framerate;
int bits_per_coded_sample = avctx->bits_per_coded_sample;
int64_t bitrate;
if (!(framerate.num && framerate.den))
framerate = av_inv_q(avctx->time_base);
if (!(framerate.num && framerate.den))
return 0;
if (!bits_per_coded_sample) {
const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(avctx->pix_fmt);
bits_per_coded_sample = av_get_bits_per_pixel(desc);
}
bitrate = (int64_t)bits_per_coded_sample * avctx->width * avctx->height *
framerate.num / framerate.den;
return bitrate;
}
| 1,725 |
28,735 | 0 | static inline void apic_set_isr(int vec, struct kvm_lapic *apic)
{
if (!__apic_test_and_set_vector(vec, apic->regs + APIC_ISR))
++apic->isr_count;
BUG_ON(apic->isr_count > MAX_APIC_VECTOR);
/*
* ISR (in service register) bit is set when injecting an interrupt.
* The highest vector is injected. Thus the latest bit set matches
* the highest bit in ISR.
*/
apic->highest_isr_cache = vec;
}
| 1,726 |
92,400 | 0 | int open_terminal_in_namespace(pid_t pid, const char *name, int mode) {
_cleanup_close_ int pidnsfd = -1, mntnsfd = -1, usernsfd = -1, rootfd = -1;
_cleanup_close_pair_ int pair[2] = { -1, -1 };
pid_t child;
int r;
r = namespace_open(pid, &pidnsfd, &mntnsfd, NULL, &usernsfd, &rootfd);
if (r < 0)
return r;
if (socketpair(AF_UNIX, SOCK_DGRAM, 0, pair) < 0)
return -errno;
r = namespace_fork("(sd-terminalns)", "(sd-terminal)", NULL, 0, FORK_RESET_SIGNALS|FORK_DEATHSIG,
pidnsfd, mntnsfd, -1, usernsfd, rootfd, &child);
if (r < 0)
return r;
if (r == 0) {
int master;
pair[0] = safe_close(pair[0]);
master = open_terminal(name, mode|O_NOCTTY|O_CLOEXEC);
if (master < 0)
_exit(EXIT_FAILURE);
if (send_one_fd(pair[1], master, 0) < 0)
_exit(EXIT_FAILURE);
_exit(EXIT_SUCCESS);
}
pair[1] = safe_close(pair[1]);
r = wait_for_terminate_and_check("(sd-terminalns)", child, 0);
if (r < 0)
return r;
if (r != EXIT_SUCCESS)
return -EIO;
return receive_one_fd(pair[0], 0);
}
| 1,727 |
138,665 | 0 | void RenderFrameHostImpl::DisableBeforeUnloadHangMonitorForTesting() {
beforeunload_timeout_.reset();
}
| 1,728 |
47,290 | 0 | static int md5_import(struct shash_desc *desc, const void *in)
{
struct md5_state *ctx = shash_desc_ctx(desc);
memcpy(ctx, in, sizeof(*ctx));
return 0;
}
| 1,729 |
45,564 | 0 | static void crypto_cbc_free(struct crypto_instance *inst)
{
crypto_drop_spawn(crypto_instance_ctx(inst));
kfree(inst);
}
| 1,730 |
50,765 | 0 | static int parse_part_number (void **ret_buffer, size_t *ret_buffer_len,
uint64_t *value)
{
char *buffer = *ret_buffer;
size_t buffer_len = *ret_buffer_len;
uint16_t tmp16;
uint64_t tmp64;
size_t exp_size = 2 * sizeof (uint16_t) + sizeof (uint64_t);
uint16_t pkg_length;
if (buffer_len < exp_size)
{
WARNING ("network plugin: parse_part_number: "
"Packet too short: "
"Chunk of size %zu expected, "
"but buffer has only %zu bytes left.",
exp_size, buffer_len);
return (-1);
}
memcpy ((void *) &tmp16, buffer, sizeof (tmp16));
buffer += sizeof (tmp16);
/* pkg_type = ntohs (tmp16); */
memcpy ((void *) &tmp16, buffer, sizeof (tmp16));
buffer += sizeof (tmp16);
pkg_length = ntohs (tmp16);
memcpy ((void *) &tmp64, buffer, sizeof (tmp64));
buffer += sizeof (tmp64);
*value = ntohll (tmp64);
*ret_buffer = buffer;
*ret_buffer_len = buffer_len - pkg_length;
return (0);
} /* int parse_part_number */
| 1,731 |
177,084 | 0 | status_t SoftMPEG2::setNumCores() {
ivdext_ctl_set_num_cores_ip_t s_set_cores_ip;
ivdext_ctl_set_num_cores_op_t s_set_cores_op;
IV_API_CALL_STATUS_T status;
s_set_cores_ip.e_cmd = IVD_CMD_VIDEO_CTL;
s_set_cores_ip.e_sub_cmd = IVDEXT_CMD_CTL_SET_NUM_CORES;
s_set_cores_ip.u4_num_cores = MIN(mNumCores, CODEC_MAX_NUM_CORES);
s_set_cores_ip.u4_size = sizeof(ivdext_ctl_set_num_cores_ip_t);
s_set_cores_op.u4_size = sizeof(ivdext_ctl_set_num_cores_op_t);
status = ivdec_api_function(mCodecCtx, (void *)&s_set_cores_ip, (void *)&s_set_cores_op);
if (IV_SUCCESS != status) {
ALOGE("Error in setting number of cores: 0x%x",
s_set_cores_op.u4_error_code);
return UNKNOWN_ERROR;
}
return OK;
}
| 1,732 |
59,809 | 0 | static int hid_pre_reset(struct usb_interface *intf)
{
struct hid_device *hid = usb_get_intfdata(intf);
struct usbhid_device *usbhid = hid->driver_data;
spin_lock_irq(&usbhid->lock);
set_bit(HID_RESET_PENDING, &usbhid->iofl);
spin_unlock_irq(&usbhid->lock);
hid_cease_io(usbhid);
return 0;
}
| 1,733 |
164,010 | 0 | ServiceWorkerPaymentInstrument::~ServiceWorkerPaymentInstrument() {
if (delegate_ && !needs_installation_) {
content::PaymentAppProvider::GetInstance()->AbortPayment(
browser_context_, stored_payment_app_info_->registration_id,
base::DoNothing());
}
}
| 1,734 |
12,695 | 0 | void ssl3_cbc_copy_mac(unsigned char *out,
const SSL3_RECORD *rec, unsigned md_size)
{
#if defined(CBC_MAC_ROTATE_IN_PLACE)
unsigned char rotated_mac_buf[64 + EVP_MAX_MD_SIZE];
unsigned char *rotated_mac;
#else
unsigned char rotated_mac[EVP_MAX_MD_SIZE];
#endif
/*
* mac_end is the index of |rec->data| just after the end of the MAC.
*/
unsigned mac_end = rec->length;
unsigned mac_start = mac_end - md_size;
/*
* scan_start contains the number of bytes that we can ignore because the
* MAC's position can only vary by 255 bytes.
*/
unsigned scan_start = 0;
unsigned i, j;
unsigned div_spoiler;
unsigned rotate_offset;
OPENSSL_assert(rec->orig_len >= md_size);
OPENSSL_assert(md_size <= EVP_MAX_MD_SIZE);
#if defined(CBC_MAC_ROTATE_IN_PLACE)
rotated_mac = rotated_mac_buf + ((0 - (size_t)rotated_mac_buf) & 63);
#endif
/* This information is public so it's safe to branch based on it. */
if (rec->orig_len > md_size + 255 + 1)
scan_start = rec->orig_len - (md_size + 255 + 1);
/*
* div_spoiler contains a multiple of md_size that is used to cause the
* modulo operation to be constant time. Without this, the time varies
* based on the amount of padding when running on Intel chips at least.
* The aim of right-shifting md_size is so that the compiler doesn't
* figure out that it can remove div_spoiler as that would require it to
* prove that md_size is always even, which I hope is beyond it.
*/
div_spoiler = md_size >> 1;
div_spoiler <<= (sizeof(div_spoiler) - 1) * 8;
rotate_offset = (div_spoiler + mac_start - scan_start) % md_size;
memset(rotated_mac, 0, md_size);
for (i = scan_start, j = 0; i < rec->orig_len; i++) {
unsigned char mac_started = constant_time_ge_8(i, mac_start);
unsigned char mac_ended = constant_time_ge_8(i, mac_end);
unsigned char b = rec->data[i];
rotated_mac[j++] |= b & mac_started & ~mac_ended;
j &= constant_time_lt(j, md_size);
}
/* Now rotate the MAC */
#if defined(CBC_MAC_ROTATE_IN_PLACE)
j = 0;
for (i = 0; i < md_size; i++) {
/* in case cache-line is 32 bytes, touch second line */
((volatile unsigned char *)rotated_mac)[rotate_offset ^ 32];
out[j++] = rotated_mac[rotate_offset++];
rotate_offset &= constant_time_lt(rotate_offset, md_size);
}
#else
memset(out, 0, md_size);
rotate_offset = md_size - rotate_offset;
rotate_offset &= constant_time_lt(rotate_offset, md_size);
for (i = 0; i < md_size; i++) {
for (j = 0; j < md_size; j++)
out[j] |= rotated_mac[i] & constant_time_eq_8(j, rotate_offset);
rotate_offset++;
rotate_offset &= constant_time_lt(rotate_offset, md_size);
}
#endif
}
| 1,735 |
119,847 | 0 | virtual ~MockScreenshotManager() {
}
| 1,736 |
131,069 | 0 | static void strictFunctionMethod(const v8::FunctionCallbackInfo<v8::Value>& info)
{
ExceptionState exceptionState(ExceptionState::ExecutionContext, "strictFunction", "TestObject", info.Holder(), info.GetIsolate());
if (UNLIKELY(info.Length() < 3)) {
exceptionState.throwTypeError(ExceptionMessages::notEnoughArguments(3, info.Length()));
exceptionState.throwIfNeeded();
return;
}
TestObject* imp = V8TestObject::toNative(info.Holder());
V8TRYCATCH_FOR_V8STRINGRESOURCE_VOID(V8StringResource<>, str, info[0]);
V8TRYCATCH_VOID(float, a, static_cast<float>(info[1]->NumberValue()));
V8TRYCATCH_EXCEPTION_VOID(int, b, toInt32(info[2], exceptionState), exceptionState);
bool result = imp->strictFunction(str, a, b, exceptionState);
if (exceptionState.throwIfNeeded())
return;
v8SetReturnValueBool(info, result);
}
| 1,737 |
157,152 | 0 | ResourceMultiBuffer::ResourceMultiBuffer(UrlData* url_data, int block_shift)
: MultiBuffer(block_shift, url_data->url_index_->lru_),
url_data_(url_data) {}
| 1,738 |
19,891 | 0 | static void nfs4_delegreturn_done(struct rpc_task *task, void *calldata)
{
struct nfs4_delegreturndata *data = calldata;
if (!nfs4_sequence_done(task, &data->res.seq_res))
return;
switch (task->tk_status) {
case -NFS4ERR_STALE_STATEID:
case -NFS4ERR_EXPIRED:
case 0:
renew_lease(data->res.server, data->timestamp);
break;
default:
if (nfs4_async_handle_error(task, data->res.server, NULL) ==
-EAGAIN) {
rpc_restart_call_prepare(task);
return;
}
}
data->rpc_status = task->tk_status;
}
| 1,739 |
40,057 | 0 | static int kvm_guest_time_update(struct kvm_vcpu *v)
{
unsigned long flags, this_tsc_khz;
struct kvm_vcpu_arch *vcpu = &v->arch;
struct kvm_arch *ka = &v->kvm->arch;
s64 kernel_ns;
u64 tsc_timestamp, host_tsc;
struct pvclock_vcpu_time_info guest_hv_clock;
u8 pvclock_flags;
bool use_master_clock;
kernel_ns = 0;
host_tsc = 0;
/*
* If the host uses TSC clock, then passthrough TSC as stable
* to the guest.
*/
spin_lock(&ka->pvclock_gtod_sync_lock);
use_master_clock = ka->use_master_clock;
if (use_master_clock) {
host_tsc = ka->master_cycle_now;
kernel_ns = ka->master_kernel_ns;
}
spin_unlock(&ka->pvclock_gtod_sync_lock);
/* Keep irq disabled to prevent changes to the clock */
local_irq_save(flags);
this_tsc_khz = __get_cpu_var(cpu_tsc_khz);
if (unlikely(this_tsc_khz == 0)) {
local_irq_restore(flags);
kvm_make_request(KVM_REQ_CLOCK_UPDATE, v);
return 1;
}
if (!use_master_clock) {
host_tsc = native_read_tsc();
kernel_ns = get_kernel_ns();
}
tsc_timestamp = kvm_x86_ops->read_l1_tsc(v, host_tsc);
/*
* We may have to catch up the TSC to match elapsed wall clock
* time for two reasons, even if kvmclock is used.
* 1) CPU could have been running below the maximum TSC rate
* 2) Broken TSC compensation resets the base at each VCPU
* entry to avoid unknown leaps of TSC even when running
* again on the same CPU. This may cause apparent elapsed
* time to disappear, and the guest to stand still or run
* very slowly.
*/
if (vcpu->tsc_catchup) {
u64 tsc = compute_guest_tsc(v, kernel_ns);
if (tsc > tsc_timestamp) {
adjust_tsc_offset_guest(v, tsc - tsc_timestamp);
tsc_timestamp = tsc;
}
}
local_irq_restore(flags);
if (!vcpu->pv_time_enabled)
return 0;
if (unlikely(vcpu->hw_tsc_khz != this_tsc_khz)) {
kvm_get_time_scale(NSEC_PER_SEC / 1000, this_tsc_khz,
&vcpu->hv_clock.tsc_shift,
&vcpu->hv_clock.tsc_to_system_mul);
vcpu->hw_tsc_khz = this_tsc_khz;
}
/* With all the info we got, fill in the values */
vcpu->hv_clock.tsc_timestamp = tsc_timestamp;
vcpu->hv_clock.system_time = kernel_ns + v->kvm->arch.kvmclock_offset;
vcpu->last_kernel_ns = kernel_ns;
vcpu->last_guest_tsc = tsc_timestamp;
/*
* The interface expects us to write an even number signaling that the
* update is finished. Since the guest won't see the intermediate
* state, we just increase by 2 at the end.
*/
vcpu->hv_clock.version += 2;
if (unlikely(kvm_read_guest_cached(v->kvm, &vcpu->pv_time,
&guest_hv_clock, sizeof(guest_hv_clock))))
return 0;
/* retain PVCLOCK_GUEST_STOPPED if set in guest copy */
pvclock_flags = (guest_hv_clock.flags & PVCLOCK_GUEST_STOPPED);
if (vcpu->pvclock_set_guest_stopped_request) {
pvclock_flags |= PVCLOCK_GUEST_STOPPED;
vcpu->pvclock_set_guest_stopped_request = false;
}
/* If the host uses TSC clocksource, then it is stable */
if (use_master_clock)
pvclock_flags |= PVCLOCK_TSC_STABLE_BIT;
vcpu->hv_clock.flags = pvclock_flags;
kvm_write_guest_cached(v->kvm, &vcpu->pv_time,
&vcpu->hv_clock,
sizeof(vcpu->hv_clock));
return 0;
}
| 1,740 |
56,690 | 0 | static int ext4_release_dquot(struct dquot *dquot)
{
int ret, err;
handle_t *handle;
handle = ext4_journal_start(dquot_to_inode(dquot), EXT4_HT_QUOTA,
EXT4_QUOTA_DEL_BLOCKS(dquot->dq_sb));
if (IS_ERR(handle)) {
/* Release dquot anyway to avoid endless cycle in dqput() */
dquot_release(dquot);
return PTR_ERR(handle);
}
ret = dquot_release(dquot);
err = ext4_journal_stop(handle);
if (!ret)
ret = err;
return ret;
}
| 1,741 |
152,557 | 0 | blink::WebString RenderFrameImpl::UserAgentOverride() {
if (!render_view_->webview() || !render_view_->webview()->MainFrame() ||
render_view_->renderer_preferences_.user_agent_override.empty()) {
return blink::WebString();
}
if (render_view_->webview()->MainFrame()->IsWebRemoteFrame())
return blink::WebString();
WebLocalFrame* main_frame =
render_view_->webview()->MainFrame()->ToWebLocalFrame();
WebDocumentLoader* document_loader = nullptr;
if (main_frame->GetProvisionalDocumentLoader())
document_loader = main_frame->GetProvisionalDocumentLoader();
else
document_loader = main_frame->GetDocumentLoader();
InternalDocumentStateData* internal_data =
document_loader
? InternalDocumentStateData::FromDocumentLoader(document_loader)
: nullptr;
if (internal_data && internal_data->is_overriding_user_agent())
return WebString::FromUTF8(
render_view_->renderer_preferences_.user_agent_override);
return blink::WebString();
}
| 1,742 |
50,290 | 0 | static void display_motd() {
FILE *fp;
int c;
if ((fp = fopen("/etc/motd", "r"))) {
while ((c = getc(fp)) != EOF) {
putchar(c);
}
fclose(fp);
}
}
| 1,743 |
170,384 | 0 | static bool underMetaDataPath(const Vector<uint32_t> &path) {
return path.size() >= 5
&& path[0] == FOURCC('m', 'o', 'o', 'v')
&& path[1] == FOURCC('u', 'd', 't', 'a')
&& path[2] == FOURCC('m', 'e', 't', 'a')
&& path[3] == FOURCC('i', 'l', 's', 't');
}
| 1,744 |
102,919 | 0 | void DefaultTabHandler::CreateHistoricalTab(TabContentsWrapper* contents) {
delegate_->AsBrowser()->CreateHistoricalTab(contents);
}
| 1,745 |
78,328 | 0 | struct sc_card_driver * sc_get_coolkey_driver(void)
{
return sc_get_driver();
}
| 1,746 |
103,562 | 0 | void ContainerNode::detach()
{
detachChildren();
clearChildNeedsStyleRecalc();
Node::detach();
}
| 1,747 |
22,929 | 0 | void nfs4_copy_stateid(nfs4_stateid *dst, struct nfs4_state *state, fl_owner_t fl_owner)
{
struct nfs4_lock_state *lsp;
int seq;
do {
seq = read_seqbegin(&state->seqlock);
memcpy(dst, &state->stateid, sizeof(*dst));
} while (read_seqretry(&state->seqlock, seq));
if (test_bit(LK_STATE_IN_USE, &state->flags) == 0)
return;
spin_lock(&state->state_lock);
lsp = __nfs4_find_lock_state(state, fl_owner);
if (lsp != NULL && (lsp->ls_flags & NFS_LOCK_INITIALIZED) != 0)
memcpy(dst, &lsp->ls_stateid, sizeof(*dst));
spin_unlock(&state->state_lock);
nfs4_put_lock_state(lsp);
}
| 1,748 |
115,409 | 0 | void InjectedBundlePage::didReceiveTitleForFrame(WKBundlePageRef page, WKStringRef title, WKBundleFrameRef frame, WKTypeRef*, const void *clientInfo)
{
static_cast<InjectedBundlePage*>(const_cast<void*>(clientInfo))->didReceiveTitleForFrame(title, frame);
}
| 1,749 |
42,501 | 0 | new_dev_store(struct mddev *mddev, const char *buf, size_t len)
{
/* buf must be %d:%d\n? giving major and minor numbers */
/* The new device is added to the array.
* If the array has a persistent superblock, we read the
* superblock to initialise info and check validity.
* Otherwise, only checking done is that in bind_rdev_to_array,
* which mainly checks size.
*/
char *e;
int major = simple_strtoul(buf, &e, 10);
int minor;
dev_t dev;
struct md_rdev *rdev;
int err;
if (!*buf || *e != ':' || !e[1] || e[1] == '\n')
return -EINVAL;
minor = simple_strtoul(e+1, &e, 10);
if (*e && *e != '\n')
return -EINVAL;
dev = MKDEV(major, minor);
if (major != MAJOR(dev) ||
minor != MINOR(dev))
return -EOVERFLOW;
flush_workqueue(md_misc_wq);
err = mddev_lock(mddev);
if (err)
return err;
if (mddev->persistent) {
rdev = md_import_device(dev, mddev->major_version,
mddev->minor_version);
if (!IS_ERR(rdev) && !list_empty(&mddev->disks)) {
struct md_rdev *rdev0
= list_entry(mddev->disks.next,
struct md_rdev, same_set);
err = super_types[mddev->major_version]
.load_super(rdev, rdev0, mddev->minor_version);
if (err < 0)
goto out;
}
} else if (mddev->external)
rdev = md_import_device(dev, -2, -1);
else
rdev = md_import_device(dev, -1, -1);
if (IS_ERR(rdev)) {
mddev_unlock(mddev);
return PTR_ERR(rdev);
}
err = bind_rdev_to_array(rdev, mddev);
out:
if (err)
export_rdev(rdev);
mddev_unlock(mddev);
return err ? err : len;
}
| 1,750 |
93,729 | 0 | static cJSON *create_reference(cJSON *item) {cJSON *ref=cJSON_New_Item();if (!ref) return 0;memcpy(ref,item,sizeof(cJSON));ref->string=0;ref->type|=cJSON_IsReference;ref->next=ref->prev=0;return ref;}
| 1,751 |
9,097 | 0 | static int vrend_decode_create_rasterizer(struct vrend_decode_ctx *ctx, uint32_t handle, uint16_t length)
{
struct pipe_rasterizer_state *rs_state;
uint32_t tmp;
if (length != VIRGL_OBJ_RS_SIZE)
return EINVAL;
rs_state = CALLOC_STRUCT(pipe_rasterizer_state);
if (!rs_state)
return ENOMEM;
tmp = get_buf_entry(ctx, VIRGL_OBJ_RS_S0);
#define ebit(name, bit) rs_state->name = (tmp >> bit) & 0x1
#define emask(name, bit, mask) rs_state->name = (tmp >> bit) & mask
ebit(flatshade, 0);
ebit(depth_clip, 1);
ebit(clip_halfz, 2);
ebit(rasterizer_discard, 3);
ebit(flatshade_first, 4);
ebit(light_twoside, 5);
ebit(sprite_coord_mode, 6);
ebit(point_quad_rasterization, 7);
emask(cull_face, 8, 0x3);
emask(fill_front, 10, 0x3);
emask(fill_back, 12, 0x3);
ebit(scissor, 14);
ebit(front_ccw, 15);
ebit(clamp_vertex_color, 16);
ebit(clamp_fragment_color, 17);
ebit(offset_line, 18);
ebit(offset_point, 19);
ebit(offset_tri, 20);
ebit(poly_smooth, 21);
ebit(poly_stipple_enable, 22);
ebit(point_smooth, 23);
ebit(point_size_per_vertex, 24);
ebit(multisample, 25);
ebit(line_smooth, 26);
ebit(line_stipple_enable, 27);
ebit(line_last_pixel, 28);
ebit(half_pixel_center, 29);
ebit(bottom_edge_rule, 30);
rs_state->point_size = uif(get_buf_entry(ctx, VIRGL_OBJ_RS_POINT_SIZE));
rs_state->sprite_coord_enable = get_buf_entry(ctx, VIRGL_OBJ_RS_SPRITE_COORD_ENABLE);
tmp = get_buf_entry(ctx, VIRGL_OBJ_RS_S3);
emask(line_stipple_pattern, 0, 0xffff);
emask(line_stipple_factor, 16, 0xff);
emask(clip_plane_enable, 24, 0xff);
rs_state->line_width = uif(get_buf_entry(ctx, VIRGL_OBJ_RS_LINE_WIDTH));
rs_state->offset_units = uif(get_buf_entry(ctx, VIRGL_OBJ_RS_OFFSET_UNITS));
rs_state->offset_scale = uif(get_buf_entry(ctx, VIRGL_OBJ_RS_OFFSET_SCALE));
rs_state->offset_clamp = uif(get_buf_entry(ctx, VIRGL_OBJ_RS_OFFSET_CLAMP));
tmp = vrend_renderer_object_insert(ctx->grctx, rs_state, sizeof(struct pipe_rasterizer_state), handle,
VIRGL_OBJECT_RASTERIZER);
if (tmp == 0) {
FREE(rs_state);
return ENOMEM;
}
return 0;
}
| 1,752 |
157,026 | 0 | void MultibufferDataSource::CreateResourceLoader_Locked(
int64_t first_byte_position,
int64_t last_byte_position) {
DCHECK(render_task_runner_->BelongsToCurrentThread());
lock_.AssertAcquired();
reader_.reset(new MultiBufferReader(
url_data()->multibuffer(), first_byte_position, last_byte_position,
base::Bind(&MultibufferDataSource::ProgressCallback, weak_ptr_)));
UpdateBufferSizes();
}
| 1,753 |
36,079 | 0 | static int __init init_inodecache(void)
{
isofs_inode_cachep = kmem_cache_create("isofs_inode_cache",
sizeof(struct iso_inode_info),
0, (SLAB_RECLAIM_ACCOUNT|
SLAB_MEM_SPREAD),
init_once);
if (isofs_inode_cachep == NULL)
return -ENOMEM;
return 0;
}
| 1,754 |
87,739 | 0 | static int hns_roce_mmap(struct ib_ucontext *context,
struct vm_area_struct *vma)
{
struct hns_roce_dev *hr_dev = to_hr_dev(context->device);
if (((vma->vm_end - vma->vm_start) % PAGE_SIZE) != 0)
return -EINVAL;
if (vma->vm_pgoff == 0) {
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
if (io_remap_pfn_range(vma, vma->vm_start,
to_hr_ucontext(context)->uar.pfn,
PAGE_SIZE, vma->vm_page_prot))
return -EAGAIN;
} else if (vma->vm_pgoff == 1 && hr_dev->tptr_dma_addr &&
hr_dev->tptr_size) {
/* vm_pgoff: 1 -- TPTR */
if (io_remap_pfn_range(vma, vma->vm_start,
hr_dev->tptr_dma_addr >> PAGE_SHIFT,
hr_dev->tptr_size,
vma->vm_page_prot))
return -EAGAIN;
} else
return -EINVAL;
return 0;
}
| 1,755 |
128,905 | 0 | bool contains(const v8::Handle<GCObject>& handle)
{
return m_map.contains(*handle);
}
| 1,756 |
67,856 | 0 | IPV6DefragSturgesNovakLastTest(void)
{
/* Expected data. */
u_char expected[] = {
"AAAAAAAA"
"JJJJJJJJ"
"JJJJJJJJ"
"JJJJJJJJ"
"JJJJJJJJ"
"BBBBBBBB"
"KKKKKKKK"
"KKKKKKKK"
"KKKKKKKK"
"LLLLLLLL"
"LLLLLLLL"
"LLLLLLLL"
"MMMMMMMM"
"MMMMMMMM"
"MMMMMMMM"
"FFFFFFFF"
"NNNNNNNN"
"FFFFFFFF"
"GGGGGGGG"
"OOOOOOOO"
"PPPPPPPP"
"HHHHHHHH"
"QQQQQQQQ"
"QQQQQQQQ"
};
return IPV6DefragDoSturgesNovakTest(DEFRAG_POLICY_LAST, expected,
sizeof(expected));
}
| 1,757 |
79,056 | 0 | void __init native_pv_lock_init(void)
{
if (!static_cpu_has(X86_FEATURE_HYPERVISOR))
static_branch_disable(&virt_spin_lock_key);
}
| 1,758 |
79,007 | 0 | VarCreate(ExprDef *name, ExprDef *value)
{
VarDef *def = malloc(sizeof(*def));
if (!def)
return NULL;
def->common.type = STMT_VAR;
def->common.next = NULL;
def->name = name;
def->value = value;
return def;
}
| 1,759 |
117,305 | 0 | xmlXPtrEvalRangePredicate(xmlXPathParserContextPtr ctxt) {
const xmlChar *cur;
xmlXPathObjectPtr res;
xmlXPathObjectPtr obj, tmp;
xmlLocationSetPtr newset = NULL;
xmlLocationSetPtr oldset;
int i;
if (ctxt == NULL) return;
SKIP_BLANKS;
if (CUR != '[') {
XP_ERROR(XPATH_INVALID_PREDICATE_ERROR);
}
NEXT;
SKIP_BLANKS;
/*
* Extract the old set, and then evaluate the result of the
* expression for all the element in the set. use it to grow
* up a new set.
*/
CHECK_TYPE(XPATH_LOCATIONSET);
obj = valuePop(ctxt);
oldset = obj->user;
ctxt->context->node = NULL;
if ((oldset == NULL) || (oldset->locNr == 0)) {
ctxt->context->contextSize = 0;
ctxt->context->proximityPosition = 0;
xmlXPathEvalExpr(ctxt);
res = valuePop(ctxt);
if (res != NULL)
xmlXPathFreeObject(res);
valuePush(ctxt, obj);
CHECK_ERROR;
} else {
/*
* Save the expression pointer since we will have to evaluate
* it multiple times. Initialize the new set.
*/
cur = ctxt->cur;
newset = xmlXPtrLocationSetCreate(NULL);
for (i = 0; i < oldset->locNr; i++) {
ctxt->cur = cur;
/*
* Run the evaluation with a node list made of a single item
* in the nodeset.
*/
ctxt->context->node = oldset->locTab[i]->user;
tmp = xmlXPathNewNodeSet(ctxt->context->node);
valuePush(ctxt, tmp);
ctxt->context->contextSize = oldset->locNr;
ctxt->context->proximityPosition = i + 1;
xmlXPathEvalExpr(ctxt);
CHECK_ERROR;
/*
* The result of the evaluation need to be tested to
* decided whether the filter succeeded or not
*/
res = valuePop(ctxt);
if (xmlXPathEvaluatePredicateResult(ctxt, res)) {
xmlXPtrLocationSetAdd(newset,
xmlXPathObjectCopy(oldset->locTab[i]));
}
/*
* Cleanup
*/
if (res != NULL)
xmlXPathFreeObject(res);
if (ctxt->value == tmp) {
res = valuePop(ctxt);
xmlXPathFreeObject(res);
}
ctxt->context->node = NULL;
}
/*
* The result is used as the new evaluation set.
*/
xmlXPathFreeObject(obj);
ctxt->context->node = NULL;
ctxt->context->contextSize = -1;
ctxt->context->proximityPosition = -1;
valuePush(ctxt, xmlXPtrWrapLocationSet(newset));
}
if (CUR != ']') {
XP_ERROR(XPATH_INVALID_PREDICATE_ERROR);
}
NEXT;
SKIP_BLANKS;
}
| 1,760 |
156,696 | 0 | void Reset() { user_interaction_received_ = false; }
| 1,761 |
111,798 | 0 | void SyncBackendHost::HandleActionableErrorEventOnFrontendLoop(
const browser_sync::SyncProtocolError& sync_error) {
if (!frontend_)
return;
DCHECK_EQ(MessageLoop::current(), frontend_loop_);
frontend_->OnActionableError(sync_error);
}
| 1,762 |
9,414 | 0 | static void mdc2_body(MDC2_CTX *c, const unsigned char *in, size_t len)
{
register DES_LONG tin0, tin1;
register DES_LONG ttin0, ttin1;
DES_LONG d[2], dd[2];
DES_key_schedule k;
unsigned char *p;
size_t i;
for (i = 0; i < len; i += 8) {
c2l(in, tin0);
d[0] = dd[0] = tin0;
c2l(in, tin1);
d[1] = dd[1] = tin1;
c->h[0] = (c->h[0] & 0x9f) | 0x40;
c->hh[0] = (c->hh[0] & 0x9f) | 0x20;
DES_set_odd_parity(&c->h);
DES_set_key_unchecked(&c->h, &k);
DES_encrypt1(d, &k, 1);
DES_set_odd_parity(&c->hh);
DES_set_key_unchecked(&c->hh, &k);
DES_encrypt1(dd, &k, 1);
ttin0 = tin0 ^ dd[0];
ttin1 = tin1 ^ dd[1];
tin0 ^= d[0];
tin1 ^= d[1];
p = c->h;
l2c(tin0, p);
l2c(ttin1, p);
p = c->hh;
l2c(ttin0, p);
l2c(tin1, p);
}
}
| 1,763 |
4,739 | 0 | user_get_system_account (User *user)
{
return accounts_user_get_system_account (ACCOUNTS_USER (user));
}
| 1,764 |
42,869 | 0 | static void search_timeout(GtkEntry *entry)
{
/* this little hack makes the search start searching after 500 milisec after
* user stops writing into entry box
* if this part is removed, then the search will be started on every
* change of the search entry
*/
if (g_timeout != 0)
g_source_remove(g_timeout);
g_timeout = g_timeout_add(500, &highlight_search_on_timeout, (gpointer)entry);
}
| 1,765 |
91,914 | 0 | static struct usb_function_instance *f_midi_alloc_inst(void)
{
struct f_midi_opts *opts;
opts = kzalloc(sizeof(*opts), GFP_KERNEL);
if (!opts)
return ERR_PTR(-ENOMEM);
mutex_init(&opts->lock);
opts->func_inst.free_func_inst = f_midi_free_inst;
opts->index = SNDRV_DEFAULT_IDX1;
opts->id = SNDRV_DEFAULT_STR1;
opts->buflen = 512;
opts->qlen = 32;
opts->in_ports = 1;
opts->out_ports = 1;
config_group_init_type_name(&opts->func_inst.group, "",
&midi_func_type);
return &opts->func_inst;
}
| 1,766 |
184,199 | 1 | void TranslateInfoBarBase::Layout() {
// Layout the close button.
InfoBar::Layout();
// Layout the icon on left of bar.
gfx::Size icon_ps = icon_->GetPreferredSize();
icon_->SetBounds(InfoBar::kHorizontalPadding, InfoBar::OffsetY(this, icon_ps),
icon_ps.width(), icon_ps.height());
}
| 1,767 |
126,138 | 0 | void BrowserLauncherItemController::Close() {
views::Widget* widget = views::Widget::GetWidgetForNativeView(window_);
if (widget)
widget->Close();
}
| 1,768 |
68,079 | 0 | static int already_entry(RList *entries, ut64 vaddr) {
RBinAddr *e;
RListIter *iter;
r_list_foreach (entries, iter, e) {
if (e->vaddr == vaddr) {
return 1;
}
}
return 0;
}
| 1,769 |
22,659 | 0 | int __sched wait_for_completion_interruptible(struct completion *x)
{
long t = wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_INTERRUPTIBLE);
if (t == -ERESTARTSYS)
return t;
return 0;
}
| 1,770 |
158,951 | 0 | bool PDFiumEngineExports::GetPDFPageSizeByIndex(const void* pdf_buffer,
int pdf_buffer_size,
int page_number,
double* width,
double* height) {
FPDF_DOCUMENT doc =
FPDF_LoadMemDocument(pdf_buffer, pdf_buffer_size, nullptr);
if (!doc)
return false;
bool success = FPDF_GetPageSizeByIndex(doc, page_number, width, height) != 0;
FPDF_CloseDocument(doc);
return success;
}
| 1,771 |
121,098 | 0 | void BaseMultipleFieldsDateAndTimeInputType::readonlyAttributeChanged()
{
m_spinButtonElement->releaseCapture();
m_clearButton->releaseCapture();
if (m_dateTimeEditElement)
m_dateTimeEditElement->readOnlyStateChanged();
}
| 1,772 |
151,380 | 0 | ResourceRequestBlockedReason BaseFetchContext::CanRequestInternal(
Resource::Type type,
const ResourceRequest& resource_request,
const KURL& url,
const ResourceLoaderOptions& options,
SecurityViolationReportingPolicy reporting_policy,
FetchParameters::OriginRestriction origin_restriction,
ResourceRequest::RedirectStatus redirect_status) const {
if (IsDetached()) {
if (!resource_request.GetKeepalive() ||
redirect_status == ResourceRequest::RedirectStatus::kNoRedirect) {
return ResourceRequestBlockedReason::kOther;
}
}
if (ShouldBlockRequestByInspector(resource_request.Url()))
return ResourceRequestBlockedReason::kInspector;
SecurityOrigin* security_origin = options.security_origin.get();
if (!security_origin)
security_origin = GetSecurityOrigin();
if (origin_restriction != FetchParameters::kNoOriginRestriction &&
security_origin && !security_origin->CanDisplay(url)) {
if (reporting_policy == SecurityViolationReportingPolicy::kReport) {
AddErrorConsoleMessage(
"Not allowed to load local resource: " + url.GetString(), kJSSource);
}
RESOURCE_LOADING_DVLOG(1) << "ResourceFetcher::requestResource URL was not "
"allowed by SecurityOrigin::CanDisplay";
return ResourceRequestBlockedReason::kOther;
}
switch (type) {
case Resource::kMainResource:
case Resource::kImage:
case Resource::kCSSStyleSheet:
case Resource::kScript:
case Resource::kFont:
case Resource::kRaw:
case Resource::kLinkPrefetch:
case Resource::kTextTrack:
case Resource::kImportResource:
case Resource::kMedia:
case Resource::kManifest:
case Resource::kMock:
if (origin_restriction == FetchParameters::kRestrictToSameOrigin &&
!security_origin->CanRequest(url)) {
PrintAccessDeniedMessage(url);
return ResourceRequestBlockedReason::kOrigin;
}
break;
case Resource::kXSLStyleSheet:
DCHECK(RuntimeEnabledFeatures::XSLTEnabled());
case Resource::kSVGDocument:
if (!security_origin->CanRequest(url)) {
PrintAccessDeniedMessage(url);
return ResourceRequestBlockedReason::kOrigin;
}
break;
}
WebURLRequest::RequestContext request_context =
resource_request.GetRequestContext();
if (CheckCSPForRequestInternal(
request_context, url, options, reporting_policy, redirect_status,
ContentSecurityPolicy::CheckHeaderType::kCheckEnforce) ==
ResourceRequestBlockedReason::kCSP) {
return ResourceRequestBlockedReason::kCSP;
}
if (type == Resource::kScript || type == Resource::kImportResource) {
if (!AllowScriptFromSource(url)) {
return ResourceRequestBlockedReason::kCSP;
}
}
if (type != Resource::kMainResource && IsSVGImageChromeClient() &&
!url.ProtocolIsData())
return ResourceRequestBlockedReason::kOrigin;
WebURLRequest::FrameType frame_type = resource_request.GetFrameType();
if (frame_type != WebURLRequest::kFrameTypeTopLevel) {
bool is_subresource = frame_type == WebURLRequest::kFrameTypeNone;
const SecurityOrigin* embedding_origin =
is_subresource ? GetSecurityOrigin() : GetParentSecurityOrigin();
DCHECK(embedding_origin);
if (SchemeRegistry::ShouldTreatURLSchemeAsLegacy(url.Protocol()) &&
!SchemeRegistry::ShouldTreatURLSchemeAsLegacy(
embedding_origin->Protocol())) {
CountDeprecation(WebFeature::kLegacyProtocolEmbeddedAsSubresource);
return ResourceRequestBlockedReason::kOrigin;
}
if (ShouldBlockFetchAsCredentialedSubresource(resource_request, url))
return ResourceRequestBlockedReason::kOrigin;
}
if (ShouldBlockFetchByMixedContentCheck(request_context, frame_type,
resource_request.GetRedirectStatus(),
url, reporting_policy))
return ResourceRequestBlockedReason::kMixedContent;
if (url.PotentiallyDanglingMarkup() && url.ProtocolIsInHTTPFamily()) {
CountDeprecation(WebFeature::kCanRequestURLHTTPContainingNewline);
if (RuntimeEnabledFeatures::RestrictCanRequestURLCharacterSetEnabled())
return ResourceRequestBlockedReason::kOther;
}
if (GetSubresourceFilter() && type != Resource::kMainResource &&
type != Resource::kImportResource) {
if (!GetSubresourceFilter()->AllowLoad(url, request_context,
reporting_policy)) {
return ResourceRequestBlockedReason::kSubresourceFilter;
}
}
return ResourceRequestBlockedReason::kNone;
}
| 1,773 |
90,842 | 0 | Bool gf_sys_get_rti(u32 refresh_time_ms, GF_SystemRTInfo *rti, u32 flags)
{
Bool res = gf_sys_get_rti_os(refresh_time_ms, rti, flags);
if (res) {
if (!rti->process_memory) rti->process_memory = memory_at_gpac_startup - rti->physical_memory_avail;
if (!rti->gpac_memory) rti->gpac_memory = memory_at_gpac_startup - rti->physical_memory_avail;
}
return res;
}
| 1,774 |
17,478 | 0 | ProcXvStopVideo(ClientPtr client)
{
int ret;
DrawablePtr pDraw;
XvPortPtr pPort;
REQUEST(xvStopVideoReq);
REQUEST_SIZE_MATCH(xvStopVideoReq);
VALIDATE_XV_PORT(stuff->port, pPort, DixReadAccess);
ret = dixLookupDrawable(&pDraw, stuff->drawable, client, 0, DixWriteAccess);
if (ret != Success)
return ret;
return XvdiStopVideo(client, pPort, pDraw);
}
| 1,775 |
160,007 | 0 | int BackendImpl::CheckAllEntries() {
int num_dirty = 0;
int num_entries = 0;
DCHECK(mask_ < std::numeric_limits<uint32_t>::max());
for (unsigned int i = 0; i <= mask_; i++) {
Addr address(data_->table[i]);
if (!address.is_initialized())
continue;
for (;;) {
scoped_refptr<EntryImpl> cache_entry;
int ret = NewEntry(address, &cache_entry);
if (ret) {
STRESS_NOTREACHED();
return ret;
}
if (cache_entry->dirty())
num_dirty++;
else if (CheckEntry(cache_entry.get()))
num_entries++;
else
return ERR_INVALID_ENTRY;
DCHECK_EQ(i, cache_entry->entry()->Data()->hash & mask_);
address.set_value(cache_entry->GetNextAddress());
if (!address.is_initialized())
break;
}
}
Trace("CheckAllEntries End");
if (num_entries + num_dirty != data_->header.num_entries) {
LOG(ERROR) << "Number of entries " << num_entries << " " << num_dirty <<
" " << data_->header.num_entries;
DCHECK_LT(num_entries, data_->header.num_entries);
return ERR_NUM_ENTRIES_MISMATCH;
}
return num_dirty;
}
| 1,776 |
133,760 | 0 | int GetNetSSLVersion(SSL* ssl) {
switch (SSL_version(ssl)) {
case SSL2_VERSION:
return SSL_CONNECTION_VERSION_SSL2;
case SSL3_VERSION:
return SSL_CONNECTION_VERSION_SSL3;
case TLS1_VERSION:
return SSL_CONNECTION_VERSION_TLS1;
case 0x0302:
return SSL_CONNECTION_VERSION_TLS1_1;
case 0x0303:
return SSL_CONNECTION_VERSION_TLS1_2;
default:
return SSL_CONNECTION_VERSION_UNKNOWN;
}
}
| 1,777 |
63,263 | 0 | void _WM_do_sysex_roland_reset(struct _mdi *mdi, struct _event_data *data) {
#ifdef DEBUG_MIDI
uint8_t ch = data->channel;
MIDI_EVENT_DEBUG(__FUNCTION__, ch, data->data.value);
#else
UNUSED(data);
#endif
_WM_do_sysex_gm_reset(mdi,data);
}
| 1,778 |
77,376 | 0 | ofproto_set_controllers(struct ofproto *p,
const struct ofproto_controller *controllers,
size_t n_controllers, uint32_t allowed_versions)
{
connmgr_set_controllers(p->connmgr, controllers, n_controllers,
allowed_versions);
}
| 1,779 |
13,303 | 0 | pdf14_ctx_new(gs_int_rect *rect, int n_chan, bool additive, gx_device *dev)
{
pdf14_ctx *result;
pdf14_buf *buf;
gs_memory_t *memory = dev->memory;
bool has_tags = dev->graphics_type_tag & GS_DEVICE_ENCODES_TAGS;
pdf14_device *pdev = (pdf14_device *)dev;
result = gs_alloc_struct(memory, pdf14_ctx, &st_pdf14_ctx, "pdf14_ctx_new");
if (result == NULL)
return result;
/* Note: buffer creation expects alpha to be in number of channels */
buf = pdf14_buf_new(rect, has_tags, false, false, false, n_chan + 1,
pdev->devn_params.page_spot_colors, memory);
if (buf == NULL) {
gs_free_object(memory, result, "pdf14_ctx_new");
return NULL;
}
if_debug4m('v', memory,
"[v]base buf: %d x %d, %d color channels, %d planes\n",
buf->rect.q.x, buf->rect.q.y, buf->n_chan, buf->n_planes);
if (buf->data != NULL) {
if (buf->has_tags) {
memset(buf->data, 0, buf->planestride * (buf->n_planes-1));
} else {
memset(buf->data, 0, buf->planestride * buf->n_planes);
}
}
buf->saved = NULL;
result->stack = buf;
result->mask_stack = pdf14_mask_element_new(memory);
result->mask_stack->rc_mask = pdf14_rcmask_new(memory);
result->n_chan = n_chan;
result->memory = memory;
result->rect = *rect;
result->additive = additive;
result->smask_depth = 0;
result->smask_blend = false;
return result;
}
| 1,780 |
131,185 | 0 | static void activityLoggingGetterForAllWorldsLongAttributeAttributeGetterCallback(v8::Local<v8::String>, const v8::PropertyCallbackInfo<v8::Value>& info)
{
TRACE_EVENT_SET_SAMPLING_STATE("Blink", "DOMGetter");
V8PerContextData* contextData = V8PerContextData::from(info.GetIsolate()->GetCurrentContext());
if (contextData && contextData->activityLogger())
contextData->activityLogger()->log("TestObjectPython.activityLoggingGetterForAllWorldsLongAttribute", 0, 0, "Getter");
TestObjectPythonV8Internal::activityLoggingGetterForAllWorldsLongAttributeAttributeGetter(info);
TRACE_EVENT_SET_SAMPLING_STATE("V8", "V8Execution");
}
| 1,781 |
25,332 | 0 | static int ptrace_write_user(struct task_struct *tsk, unsigned long off,
unsigned long val)
{
if (off & 3 || off >= sizeof(struct user))
return -EIO;
if (off >= sizeof(struct pt_regs))
return 0;
return put_user_reg(tsk, off >> 2, val);
}
| 1,782 |
21,130 | 0 | static void mem_cgroup_start_move(struct mem_cgroup *memcg)
{
int cpu;
get_online_cpus();
spin_lock(&memcg->pcp_counter_lock);
for_each_online_cpu(cpu)
per_cpu(memcg->stat->count[MEM_CGROUP_ON_MOVE], cpu) += 1;
memcg->nocpu_base.count[MEM_CGROUP_ON_MOVE] += 1;
spin_unlock(&memcg->pcp_counter_lock);
put_online_cpus();
synchronize_rcu();
}
| 1,783 |
87,736 | 0 | static struct net_device *hns_roce_get_netdev(struct ib_device *ib_dev,
u8 port_num)
{
struct hns_roce_dev *hr_dev = to_hr_dev(ib_dev);
struct net_device *ndev;
if (port_num < 1 || port_num > hr_dev->caps.num_ports)
return NULL;
rcu_read_lock();
ndev = hr_dev->iboe.netdevs[port_num - 1];
if (ndev)
dev_hold(ndev);
rcu_read_unlock();
return ndev;
}
| 1,784 |
24,494 | 0 | cifs_reclassify_socket4(struct socket *sock)
{
struct sock *sk = sock->sk;
BUG_ON(sock_owned_by_user(sk));
sock_lock_init_class_and_name(sk, "slock-AF_INET-CIFS",
&cifs_slock_key[0], "sk_lock-AF_INET-CIFS", &cifs_key[0]);
}
| 1,785 |
122,413 | 0 | void HTMLSelectElement::setOption(unsigned index, HTMLOptionElement* option, ExceptionState& exceptionState)
{
if (index > maxSelectItems - 1)
index = maxSelectItems - 1;
int diff = index - length();
RefPtr<HTMLElement> before = 0;
if (diff > 0) {
setLength(index, exceptionState);
} else if (diff < 0) {
before = toHTMLElement(options()->item(index+1));
remove(index);
}
if (!exceptionState.hadException()) {
add(option, before.get(), exceptionState);
if (diff >= 0 && option->selected())
optionSelectionStateChanged(option, true);
}
}
| 1,786 |
161,322 | 0 | std::unique_ptr<NavigationThrottle> NetworkHandler::CreateThrottleForNavigation(
NavigationHandle* navigation_handle) {
if (!interception_handle_)
return nullptr;
std::unique_ptr<NavigationThrottle> throttle(new NetworkNavigationThrottle(
weak_factory_.GetWeakPtr(), navigation_handle));
return throttle;
}
| 1,787 |
13,279 | 0 | pdf14_buf_free(pdf14_buf *buf, gs_memory_t *memory)
{
pdf14_parent_color_t *old_parent_color_info = buf->parent_color_info_procs;
if (buf->mask_stack && buf->mask_stack->rc_mask)
rc_decrement(buf->mask_stack->rc_mask, "pdf14_buf_free");
gs_free_object(memory, buf->mask_stack, "pdf14_buf_free");
gs_free_object(memory, buf->transfer_fn, "pdf14_buf_free");
gs_free_object(memory, buf->matte, "pdf14_buf_free");
gs_free_object(memory, buf->data, "pdf14_buf_free");
while (old_parent_color_info) {
if (old_parent_color_info->icc_profile != NULL) {
rc_decrement(old_parent_color_info->icc_profile, "pdf14_buf_free");
}
buf->parent_color_info_procs = old_parent_color_info->previous;
gs_free_object(memory, old_parent_color_info, "pdf14_buf_free");
old_parent_color_info = buf->parent_color_info_procs;
}
gs_free_object(memory, buf->backdrop, "pdf14_buf_free");
gs_free_object(memory, buf, "pdf14_buf_free");
}
| 1,788 |
166,789 | 0 | void NormalPageArena::VerifyObjectStartBitmap() {
#if DCHECK_IS_ON()
SetAllocationPoint(nullptr, 0);
for (NormalPage* page = static_cast<NormalPage*>(first_page_); page;
page = static_cast<NormalPage*>(page->Next()))
page->VerifyObjectStartBitmapIsConsistentWithPayload();
#endif // DCHECK_IS_ON()
}
| 1,789 |
16,759 | 0 | int qcow2_backing_read1(BlockDriverState *bs, QEMUIOVector *qiov,
int64_t sector_num, int nb_sectors)
{
int n1;
if ((sector_num + nb_sectors) <= bs->total_sectors)
return nb_sectors;
if (sector_num >= bs->total_sectors)
n1 = 0;
else
n1 = bs->total_sectors - sector_num;
qemu_iovec_memset(qiov, 512 * n1, 0, 512 * (nb_sectors - n1));
return n1;
}
| 1,790 |
106,830 | 0 | LayoutUnit RenderBox::clientWidth() const
{
return width() - borderLeft() - borderRight() - verticalScrollbarWidth();
}
| 1,791 |
25,465 | 0 | int register_fsl_emb_pmu(struct fsl_emb_pmu *pmu)
{
if (ppmu)
return -EBUSY; /* something's already registered */
ppmu = pmu;
pr_info("%s performance monitor hardware support registered\n",
pmu->name);
perf_pmu_register(&fsl_emb_pmu, "cpu", PERF_TYPE_RAW);
return 0;
}
| 1,792 |
176,100 | 0 | SkIcoCodec::SkIcoCodec(int width, int height, const SkEncodedInfo& info,
SkTArray<std::unique_ptr<SkCodec>, true>* codecs,
sk_sp<SkColorSpace> colorSpace)
: INHERITED(width, height, info, SkColorSpaceXform::ColorFormat(), nullptr,
std::move(colorSpace))
, fEmbeddedCodecs(codecs)
, fCurrScanlineCodec(nullptr)
, fCurrIncrementalCodec(nullptr)
{}
| 1,793 |
128,956 | 0 | v8::Isolate* isolate() { return m_scriptState->isolate(); }
| 1,794 |
109,867 | 0 | bool Document::useSecureKeyboardEntryWhenActive() const
{
return m_useSecureKeyboardEntryWhenActive;
}
| 1,795 |
13,525 | 0 | static void Free_ChainPosClassRule( HB_ChainPosClassRule* cpcr )
{
FREE( cpcr->PosLookupRecord );
FREE( cpcr->Lookahead );
FREE( cpcr->Input );
FREE( cpcr->Backtrack );
}
| 1,796 |
134,566 | 0 | WebContentsViewAura::WebContentsViewAura(
WebContentsImpl* web_contents,
WebContentsViewDelegate* delegate)
: web_contents_(web_contents),
delegate_(delegate),
current_drag_op_(blink::WebDragOperationNone),
drag_dest_delegate_(NULL),
current_rvh_for_drag_(NULL),
overscroll_change_brightness_(false),
current_overscroll_gesture_(OVERSCROLL_NONE),
completed_overscroll_gesture_(OVERSCROLL_NONE),
touch_editable_(TouchEditableImplAura::Create()) {
}
| 1,797 |
179,853 | 1 | cleanup_pathname(struct archive_write_disk *a)
{
char *dest, *src;
char separator = '\0';
dest = src = a->name;
if (*src == '\0') {
archive_set_error(&a->archive, ARCHIVE_ERRNO_MISC,
"Invalid empty pathname");
return (ARCHIVE_FAILED);
}
#if defined(__CYGWIN__)
cleanup_pathname_win(a);
#endif
/* Skip leading '/'. */
if (*src == '/')
separator = *src++;
/* Scan the pathname one element at a time. */
for (;;) {
/* src points to first char after '/' */
if (src[0] == '\0') {
break;
} else if (src[0] == '/') {
/* Found '//', ignore second one. */
src++;
continue;
} else if (src[0] == '.') {
if (src[1] == '\0') {
/* Ignore trailing '.' */
break;
} else if (src[1] == '/') {
/* Skip './'. */
src += 2;
continue;
} else if (src[1] == '.') {
if (src[2] == '/' || src[2] == '\0') {
/* Conditionally warn about '..' */
if (a->flags & ARCHIVE_EXTRACT_SECURE_NODOTDOT) {
archive_set_error(&a->archive,
ARCHIVE_ERRNO_MISC,
"Path contains '..'");
return (ARCHIVE_FAILED);
}
}
/*
* Note: Under no circumstances do we
* remove '..' elements. In
* particular, restoring
* '/foo/../bar/' should create the
* 'foo' dir as a side-effect.
*/
}
}
/* Copy current element, including leading '/'. */
if (separator)
*dest++ = '/';
while (*src != '\0' && *src != '/') {
*dest++ = *src++;
}
if (*src == '\0')
break;
/* Skip '/' separator. */
separator = *src++;
}
/*
* We've just copied zero or more path elements, not including the
* final '/'.
*/
if (dest == a->name) {
/*
* Nothing got copied. The path must have been something
* like '.' or '/' or './' or '/././././/./'.
*/
if (separator)
*dest++ = '/';
else
*dest++ = '.';
}
/* Terminate the result. */
*dest = '\0';
return (ARCHIVE_OK);
}
| 1,798 |
160,194 | 0 | bool MemBackendImpl::SetMaxSize(int max_bytes) {
static_assert(sizeof(max_bytes) == sizeof(max_size_),
"unsupported int model");
if (max_bytes < 0)
return false;
if (!max_bytes)
return true;
max_size_ = max_bytes;
return true;
}
| 1,799 |
Subsets and Splits