unique_id
int64 13
189k
| target
int64 0
1
| code
stringlengths 20
241k
| __index_level_0__
int64 0
18.9k
|
---|---|---|---|
115,257 | 0 | void OmniboxViewWin::OnPaint(HDC bogus_hdc) {
CRect paint_clip_rect;
if (!GetUpdateRect(&paint_clip_rect, true))
return;
CPaintDC paint_dc(m_hWnd);
CDC memory_dc(CreateCompatibleDC(paint_dc));
CRect rect;
GetClientRect(&rect);
CBitmap memory_bitmap(CreateCompatibleBitmap(paint_dc, rect.Width(),
rect.Height()));
HBITMAP old_bitmap = memory_dc.SelectBitmap(memory_bitmap);
HWND old_edit_hwnd = edit_hwnd;
edit_hwnd = m_hWnd;
paint_struct = paint_dc.m_ps;
paint_struct.hdc = memory_dc;
DefWindowProc(WM_PAINT, reinterpret_cast<WPARAM>(bogus_hdc), 0);
EraseTopOfSelection(&memory_dc, rect, paint_clip_rect);
if (insecure_scheme_component_.is_nonempty())
DrawSlashForInsecureScheme(memory_dc, rect, paint_clip_rect);
if (drop_highlight_position_ != -1)
DrawDropHighlight(memory_dc, rect, paint_clip_rect);
BitBlt(paint_dc, rect.left, rect.top, rect.Width(), rect.Height(), memory_dc,
rect.left, rect.top, SRCCOPY);
memory_dc.SelectBitmap(old_bitmap);
edit_hwnd = old_edit_hwnd;
}
| 6,300 |
170,374 | 0 | status_t MPEG4Source::parseTrackFragmentHeader(off64_t offset, off64_t size) {
if (size < 8) {
return -EINVAL;
}
uint32_t flags;
if (!mDataSource->getUInt32(offset, &flags)) { // actually version + flags
return ERROR_MALFORMED;
}
if (flags & 0xff000000) {
return -EINVAL;
}
if (!mDataSource->getUInt32(offset + 4, (uint32_t*)&mLastParsedTrackId)) {
return ERROR_MALFORMED;
}
if (mLastParsedTrackId != mTrackId) {
return OK;
}
mTrackFragmentHeaderInfo.mFlags = flags;
mTrackFragmentHeaderInfo.mTrackID = mLastParsedTrackId;
offset += 8;
size -= 8;
ALOGV("fragment header: %08x %08x", flags, mTrackFragmentHeaderInfo.mTrackID);
if (flags & TrackFragmentHeaderInfo::kBaseDataOffsetPresent) {
if (size < 8) {
return -EINVAL;
}
if (!mDataSource->getUInt64(offset, &mTrackFragmentHeaderInfo.mBaseDataOffset)) {
return ERROR_MALFORMED;
}
offset += 8;
size -= 8;
}
if (flags & TrackFragmentHeaderInfo::kSampleDescriptionIndexPresent) {
if (size < 4) {
return -EINVAL;
}
if (!mDataSource->getUInt32(offset, &mTrackFragmentHeaderInfo.mSampleDescriptionIndex)) {
return ERROR_MALFORMED;
}
offset += 4;
size -= 4;
}
if (flags & TrackFragmentHeaderInfo::kDefaultSampleDurationPresent) {
if (size < 4) {
return -EINVAL;
}
if (!mDataSource->getUInt32(offset, &mTrackFragmentHeaderInfo.mDefaultSampleDuration)) {
return ERROR_MALFORMED;
}
offset += 4;
size -= 4;
}
if (flags & TrackFragmentHeaderInfo::kDefaultSampleSizePresent) {
if (size < 4) {
return -EINVAL;
}
if (!mDataSource->getUInt32(offset, &mTrackFragmentHeaderInfo.mDefaultSampleSize)) {
return ERROR_MALFORMED;
}
offset += 4;
size -= 4;
}
if (flags & TrackFragmentHeaderInfo::kDefaultSampleFlagsPresent) {
if (size < 4) {
return -EINVAL;
}
if (!mDataSource->getUInt32(offset, &mTrackFragmentHeaderInfo.mDefaultSampleFlags)) {
return ERROR_MALFORMED;
}
offset += 4;
size -= 4;
}
if (!(flags & TrackFragmentHeaderInfo::kBaseDataOffsetPresent)) {
mTrackFragmentHeaderInfo.mBaseDataOffset = mCurrentMoofOffset;
}
mTrackFragmentHeaderInfo.mDataOffset = 0;
return OK;
}
| 6,301 |
34,042 | 0 | static int xen_netbk_tx_check_gop(struct xen_netbk *netbk,
struct sk_buff *skb,
struct gnttab_copy **gopp)
{
struct gnttab_copy *gop = *gopp;
u16 pending_idx = *((u16 *)skb->data);
struct pending_tx_info *pending_tx_info = netbk->pending_tx_info;
struct xenvif *vif = pending_tx_info[pending_idx].vif;
struct xen_netif_tx_request *txp;
struct skb_shared_info *shinfo = skb_shinfo(skb);
int nr_frags = shinfo->nr_frags;
int i, err, start;
/* Check status of header. */
err = gop->status;
if (unlikely(err)) {
pending_ring_idx_t index;
index = pending_index(netbk->pending_prod++);
txp = &pending_tx_info[pending_idx].req;
make_tx_response(vif, txp, XEN_NETIF_RSP_ERROR);
netbk->pending_ring[index] = pending_idx;
xenvif_put(vif);
}
/* Skip first skb fragment if it is on same page as header fragment. */
start = (frag_get_pending_idx(&shinfo->frags[0]) == pending_idx);
for (i = start; i < nr_frags; i++) {
int j, newerr;
pending_ring_idx_t index;
pending_idx = frag_get_pending_idx(&shinfo->frags[i]);
/* Check error status: if okay then remember grant handle. */
newerr = (++gop)->status;
if (likely(!newerr)) {
/* Had a previous error? Invalidate this fragment. */
if (unlikely(err))
xen_netbk_idx_release(netbk, pending_idx);
continue;
}
/* Error on this fragment: respond to client with an error. */
txp = &netbk->pending_tx_info[pending_idx].req;
make_tx_response(vif, txp, XEN_NETIF_RSP_ERROR);
index = pending_index(netbk->pending_prod++);
netbk->pending_ring[index] = pending_idx;
xenvif_put(vif);
/* Not the first error? Preceding frags already invalidated. */
if (err)
continue;
/* First error: invalidate header and preceding fragments. */
pending_idx = *((u16 *)skb->data);
xen_netbk_idx_release(netbk, pending_idx);
for (j = start; j < i; j++) {
pending_idx = frag_get_pending_idx(&shinfo->frags[j]);
xen_netbk_idx_release(netbk, pending_idx);
}
/* Remember the error: invalidate all subsequent fragments. */
err = newerr;
}
*gopp = gop + 1;
return err;
}
| 6,302 |
105,472 | 0 | static gboolean webkit_web_view_motion_event(GtkWidget* widget, GdkEventMotion* event)
{
WebKitWebView* webView = WEBKIT_WEB_VIEW(widget);
Frame* frame = core(webView)->mainFrame();
if (!frame->view())
return FALSE;
return frame->eventHandler()->mouseMoved(PlatformMouseEvent(event));
}
| 6,303 |
183,800 | 1 | void RTCSessionDescriptionRequestImpl::requestFailed(const String& error)
{
if (m_errorCallback)
m_errorCallback->handleEvent(error);
clear();
}
| 6,304 |
46,259 | 0 | struct block_device *blkdev_get_by_path(const char *path, fmode_t mode,
void *holder)
{
struct block_device *bdev;
int err;
bdev = lookup_bdev(path);
if (IS_ERR(bdev))
return bdev;
err = blkdev_get(bdev, mode, holder);
if (err)
return ERR_PTR(err);
if ((mode & FMODE_WRITE) && bdev_read_only(bdev)) {
blkdev_put(bdev, mode);
return ERR_PTR(-EACCES);
}
return bdev;
}
| 6,305 |
148,440 | 0 | bool WebContentsImpl::IsWidgetForMainFrame(
RenderWidgetHostImpl* render_widget_host) {
return render_widget_host == GetMainFrame()->GetRenderWidgetHost();
}
| 6,306 |
184,681 | 1 | void ResourceDispatcherHostImpl::OnSSLCertificateError(
net::URLRequest* request,
const net::SSLInfo& ssl_info,
bool is_hsts_host) {
DCHECK(request);
ResourceRequestInfoImpl* info = ResourceRequestInfoImpl::ForRequest(request);
DCHECK(info);
GlobalRequestID request_id(info->GetChildID(), info->GetRequestID());
int render_process_id;
int render_view_id;
if(!info->GetAssociatedRenderView(&render_process_id, &render_view_id))
NOTREACHED();
SSLManager::OnSSLCertificateError(ssl_delegate_weak_factory_.GetWeakPtr(),
request_id, info->GetResourceType(), request->url(), render_process_id,
render_view_id, ssl_info, is_hsts_host);
}
| 6,307 |
126,202 | 0 | void Browser::MarkHomePageAsChanged(PrefService* pref_service) {
pref_service->SetBoolean(prefs::kHomePageChanged, true);
}
| 6,308 |
937 | 0 | void CairoOutputDev::updateStrokeColor(GfxState *state) {
state->getStrokeRGB(&stroke_color);
cairo_pattern_destroy(stroke_pattern);
stroke_pattern = cairo_pattern_create_rgba(stroke_color.r / 65535.0,
stroke_color.g / 65535.0,
stroke_color.b / 65535.0,
stroke_opacity);
LOG(printf ("stroke color: %d %d %d\n",
stroke_color.r, stroke_color.g, stroke_color.b));
}
| 6,309 |
126,970 | 0 | AudioInputRendererHost::~AudioInputRendererHost() {
DCHECK(audio_entries_.empty());
}
| 6,310 |
57,987 | 0 | static struct nft_rule *nf_tables_rule_lookup(const struct nft_chain *chain,
const struct nlattr *nla)
{
if (nla == NULL)
return ERR_PTR(-EINVAL);
return __nf_tables_rule_lookup(chain, be64_to_cpu(nla_get_be64(nla)));
}
| 6,311 |
165,993 | 0 | LocalRTCStatsRequest::LocalRTCStatsRequest(blink::WebRTCStatsRequest impl)
: impl_(impl) {
}
| 6,312 |
9,387 | 0 | int ssl3_client_hello(SSL *s)
{
unsigned char *buf;
unsigned char *p, *d;
int i;
unsigned long l;
#ifndef OPENSSL_NO_COMP
int j;
SSL_COMP *comp;
#endif
buf = (unsigned char *)s->init_buf->data;
if (s->state == SSL3_ST_CW_CLNT_HELLO_A) {
SSL_SESSION *sess = s->session;
if ((sess == NULL) || (sess->ssl_version != s->version) ||
#ifdef OPENSSL_NO_TLSEXT
!sess->session_id_length ||
#else
/*
* In the case of EAP-FAST, we can have a pre-shared
* "ticket" without a session ID.
*/
(!sess->session_id_length && !sess->tlsext_tick) ||
#endif
(sess->not_resumable)) {
if (!ssl_get_new_session(s, 0))
goto err;
}
/* else use the pre-loaded session */
p = s->s3->client_random;
if (ssl_fill_hello_random(s, 0, p, SSL3_RANDOM_SIZE) <= 0)
goto err;
/* Do the message type and length last */
d = p = &(buf[4]);
/*-
* version indicates the negotiated version: for example from
* an SSLv2/v3 compatible client hello). The client_version
* field is the maximum version we permit and it is also
* used in RSA encrypted premaster secrets. Some servers can
* choke if we initially report a higher version then
* renegotiate to a lower one in the premaster secret. This
* didn't happen with TLS 1.0 as most servers supported it
* but it can with TLS 1.1 or later if the server only supports
* 1.0.
*
* Possible scenario with previous logic:
* 1. Client hello indicates TLS 1.2
* 2. Server hello says TLS 1.0
* 3. RSA encrypted premaster secret uses 1.2.
* 4. Handhaked proceeds using TLS 1.0.
* 5. Server sends hello request to renegotiate.
* 6. Client hello indicates TLS v1.0 as we now
* know that is maximum server supports.
* 7. Server chokes on RSA encrypted premaster secret
* containing version 1.0.
*
* For interoperability it should be OK to always use the
* maximum version we support in client hello and then rely
* on the checking of version to ensure the servers isn't
* being inconsistent: for example initially negotiating with
* TLS 1.0 and renegotiating with TLS 1.2. We do this by using
* client_version in client hello and not resetting it to
* the negotiated version.
*/
#if 0
*(p++) = s->version >> 8;
*(p++) = s->version & 0xff;
s->client_version = s->version;
#else
*(p++) = s->client_version >> 8;
*(p++) = s->client_version & 0xff;
#endif
/* Random stuff */
memcpy(p, s->s3->client_random, SSL3_RANDOM_SIZE);
p += SSL3_RANDOM_SIZE;
/* Session ID */
if (s->new_session)
i = 0;
else
i = s->session->session_id_length;
*(p++) = i;
if (i != 0) {
if (i > (int)sizeof(s->session->session_id)) {
SSLerr(SSL_F_SSL3_CLIENT_HELLO, ERR_R_INTERNAL_ERROR);
goto err;
}
memcpy(p, s->session->session_id, i);
p += i;
}
/* Ciphers supported */
i = ssl_cipher_list_to_bytes(s, SSL_get_ciphers(s), &(p[2]), 0);
if (i == 0) {
SSLerr(SSL_F_SSL3_CLIENT_HELLO, SSL_R_NO_CIPHERS_AVAILABLE);
goto err;
}
#ifdef OPENSSL_MAX_TLS1_2_CIPHER_LENGTH
/*
* Some servers hang if client hello > 256 bytes as hack workaround
* chop number of supported ciphers to keep it well below this if we
* use TLS v1.2
*/
if (TLS1_get_version(s) >= TLS1_2_VERSION
&& i > OPENSSL_MAX_TLS1_2_CIPHER_LENGTH)
i = OPENSSL_MAX_TLS1_2_CIPHER_LENGTH & ~1;
#endif
s2n(i, p);
p += i;
/* COMPRESSION */
#ifdef OPENSSL_NO_COMP
*(p++) = 1;
#else
if ((s->options & SSL_OP_NO_COMPRESSION)
|| !s->ctx->comp_methods)
j = 0;
else
j = sk_SSL_COMP_num(s->ctx->comp_methods);
*(p++) = 1 + j;
for (i = 0; i < j; i++) {
comp = sk_SSL_COMP_value(s->ctx->comp_methods, i);
*(p++) = comp->id;
}
#endif
*(p++) = 0; /* Add the NULL method */
#ifndef OPENSSL_NO_TLSEXT
/* TLS extensions */
if (ssl_prepare_clienthello_tlsext(s) <= 0) {
SSLerr(SSL_F_SSL3_CLIENT_HELLO, SSL_R_CLIENTHELLO_TLSEXT);
goto err;
}
if ((p =
ssl_add_clienthello_tlsext(s, p,
buf + SSL3_RT_MAX_PLAIN_LENGTH)) ==
NULL) {
SSLerr(SSL_F_SSL3_CLIENT_HELLO, ERR_R_INTERNAL_ERROR);
goto err;
}
#endif
l = (p - d);
d = buf;
*(d++) = SSL3_MT_CLIENT_HELLO;
l2n3(l, d);
s->state = SSL3_ST_CW_CLNT_HELLO_B;
/* number of bytes to write */
s->init_num = p - buf;
s->init_off = 0;
}
/* SSL3_ST_CW_CLNT_HELLO_B */
return (ssl3_do_write(s, SSL3_RT_HANDSHAKE));
err:
s->state = SSL_ST_ERR;
return (-1);
}
| 6,313 |
77,349 | 0 | ofproto_port_dump_next(struct ofproto_port_dump *dump,
struct ofproto_port *port)
{
const struct ofproto *ofproto = dump->ofproto;
if (dump->error) {
return false;
}
dump->error = ofproto->ofproto_class->port_dump_next(ofproto, dump->state,
port);
if (dump->error) {
ofproto->ofproto_class->port_dump_done(ofproto, dump->state);
return false;
}
return true;
}
| 6,314 |
143,061 | 0 | DefaultAudioDestinationNode* DefaultAudioDestinationNode::Create(
BaseAudioContext* context,
const WebAudioLatencyHint& latency_hint) {
return MakeGarbageCollected<DefaultAudioDestinationNode>(*context,
latency_hint);
}
| 6,315 |
633 | 0 | static bool ldap_encode_control(void *mem_ctx, struct asn1_data *data,
const struct ldap_control_handler *handlers,
struct ldb_control *ctrl)
{
DATA_BLOB value;
int i;
if (!handlers) {
return false;
}
for (i = 0; handlers[i].oid != NULL; i++) {
if (!ctrl->oid) {
/* not encoding this control, the OID has been
* set to NULL indicating it isn't really
* here */
return true;
}
if (strcmp(handlers[i].oid, ctrl->oid) == 0) {
if (!handlers[i].encode) {
if (ctrl->critical) {
return false;
} else {
/* not encoding this control */
return true;
}
}
if (!handlers[i].encode(mem_ctx, ctrl->data, &value)) {
return false;
}
break;
}
}
if (handlers[i].oid == NULL) {
return false;
}
if (!asn1_push_tag(data, ASN1_SEQUENCE(0))) {
return false;
}
if (!asn1_write_OctetString(data, ctrl->oid, strlen(ctrl->oid))) {
return false;
}
if (ctrl->critical) {
if (!asn1_write_BOOLEAN(data, ctrl->critical)) {
return false;
}
}
if (!ctrl->data) {
goto pop_tag;
}
if (!asn1_write_OctetString(data, value.data, value.length)) {
return false;
}
pop_tag:
if (!asn1_pop_tag(data)) {
return false;
}
return true;
}
| 6,316 |
28,492 | 0 | static void qeth_clear_dbf_list(void)
{
struct qeth_dbf_entry *entry, *tmp;
mutex_lock(&qeth_dbf_list_mutex);
list_for_each_entry_safe(entry, tmp, &qeth_dbf_list, dbf_list) {
list_del(&entry->dbf_list);
debug_unregister(entry->dbf_info);
kfree(entry);
}
mutex_unlock(&qeth_dbf_list_mutex);
}
| 6,317 |
141,445 | 0 | GraphicsLayer* PaintLayerScrollableArea::LayerForScrolling() const {
return Layer()->HasCompositedLayerMapping()
? Layer()->GetCompositedLayerMapping()->ScrollingContentsLayer()
: nullptr;
}
| 6,318 |
153,051 | 0 | void PDFiumEngine::SelectionChangeInvalidator::GetVisibleSelectionsScreenRects(
std::vector<pp::Rect>* rects) {
pp::Rect visible_rect = engine_->GetVisibleRect();
for (auto& range : engine_->selection_) {
int page_index = range.page_index();
if (!engine_->IsPageVisible(page_index))
continue; // This selection is on a page that's not currently visible.
std::vector<pp::Rect> selection_rects =
range.GetScreenRects(
visible_rect.point(),
engine_->current_zoom_,
engine_->current_rotation_);
rects->insert(rects->end(), selection_rects.begin(), selection_rects.end());
}
}
| 6,319 |
162,264 | 0 | gpu::CommandBufferNamespace CommandBufferProxyImpl::GetNamespaceID() const {
return gpu::CommandBufferNamespace::GPU_IO;
}
| 6,320 |
50,573 | 0 | static inline loff_t blk_to_logical(struct inode *inode, sector_t blk)
{
return (blk << inode->i_blkbits);
}
| 6,321 |
21,321 | 0 | void mpol_fix_fork_child_flag(struct task_struct *p)
{
if (p->mempolicy)
p->flags |= PF_MEMPOLICY;
else
p->flags &= ~PF_MEMPOLICY;
}
| 6,322 |
67,930 | 0 | long jas_stream_seek(jas_stream_t *stream, long offset, int origin)
{
long newpos;
JAS_DBGLOG(100, ("jas_stream_seek(%p, %ld, %d)\n", stream, offset,
origin));
/* The buffer cannot be in use for both reading and writing. */
assert(!((stream->bufmode_ & JAS_STREAM_RDBUF) && (stream->bufmode_ &
JAS_STREAM_WRBUF)));
/* Reset the EOF indicator (since we may not be at the EOF anymore). */
stream->flags_ &= ~JAS_STREAM_EOF;
if (stream->bufmode_ & JAS_STREAM_RDBUF) {
if (origin == SEEK_CUR) {
offset -= stream->cnt_;
}
} else if (stream->bufmode_ & JAS_STREAM_WRBUF) {
if (jas_stream_flush(stream)) {
return -1;
}
}
stream->cnt_ = 0;
stream->ptr_ = stream->bufstart_;
stream->bufmode_ &= ~(JAS_STREAM_RDBUF | JAS_STREAM_WRBUF);
if ((newpos = (*stream->ops_->seek_)(stream->obj_, offset, origin))
< 0) {
return -1;
}
return newpos;
}
| 6,323 |
70,681 | 0 | request_swap_ns(struct request *req, struct nameserver *ns) {
if (ns && req->ns != ns) {
EVUTIL_ASSERT(req->ns->requests_inflight > 0);
req->ns->requests_inflight--;
ns->requests_inflight++;
req->ns = ns;
}
}
| 6,324 |
146,494 | 0 | void WebGLRenderingContextBase::framebufferRenderbuffer(
GLenum target,
GLenum attachment,
GLenum renderbuffertarget,
WebGLRenderbuffer* buffer) {
if (isContextLost() || !ValidateFramebufferFuncParameters(
"framebufferRenderbuffer", target, attachment))
return;
if (renderbuffertarget != GL_RENDERBUFFER) {
SynthesizeGLError(GL_INVALID_ENUM, "framebufferRenderbuffer",
"invalid target");
return;
}
if (buffer && (!buffer->HasEverBeenBound() ||
!buffer->Validate(ContextGroup(), this))) {
SynthesizeGLError(GL_INVALID_OPERATION, "framebufferRenderbuffer",
"buffer never bound or buffer not from this context");
return;
}
WebGLFramebuffer* framebuffer_binding = GetFramebufferBinding(target);
if (!framebuffer_binding || !framebuffer_binding->Object()) {
SynthesizeGLError(GL_INVALID_OPERATION, "framebufferRenderbuffer",
"no framebuffer bound");
return;
}
framebuffer_binding->SetAttachmentForBoundFramebuffer(target, attachment,
buffer);
ApplyStencilTest();
}
| 6,325 |
9,496 | 0 | int ntpd_main(int argc UNUSED_PARAM, char **argv)
{
#undef G
struct globals G;
struct pollfd *pfd;
peer_t **idx2peer;
unsigned cnt;
memset(&G, 0, sizeof(G));
SET_PTR_TO_GLOBALS(&G);
ntp_init(argv);
/* If ENABLE_FEATURE_NTPD_SERVER, + 1 for listen_fd: */
cnt = G.peer_cnt + ENABLE_FEATURE_NTPD_SERVER;
idx2peer = xzalloc(sizeof(idx2peer[0]) * cnt);
pfd = xzalloc(sizeof(pfd[0]) * cnt);
/* Countdown: we never sync before we sent INITIAL_SAMPLES+1
* packets to each peer.
* NB: if some peer is not responding, we may end up sending
* fewer packets to it and more to other peers.
* NB2: sync usually happens using INITIAL_SAMPLES packets,
* since last reply does not come back instantaneously.
*/
cnt = G.peer_cnt * (INITIAL_SAMPLES + 1);
write_pidfile(CONFIG_PID_FILE_PATH "/ntpd.pid");
while (!bb_got_signal) {
llist_t *item;
unsigned i, j;
int nfds, timeout;
double nextaction;
/* Nothing between here and poll() blocks for any significant time */
nextaction = G.cur_time + 3600;
i = 0;
#if ENABLE_FEATURE_NTPD_SERVER
if (G_listen_fd != -1) {
pfd[0].fd = G_listen_fd;
pfd[0].events = POLLIN;
i++;
}
#endif
/* Pass over peer list, send requests, time out on receives */
for (item = G.ntp_peers; item != NULL; item = item->link) {
peer_t *p = (peer_t *) item->data;
if (p->next_action_time <= G.cur_time) {
if (p->p_fd == -1) {
/* Time to send new req */
if (--cnt == 0) {
VERB4 bb_error_msg("disabling burst mode");
G.polladj_count = 0;
G.poll_exp = MINPOLL;
}
send_query_to_peer(p);
} else {
/* Timed out waiting for reply */
close(p->p_fd);
p->p_fd = -1;
/* If poll interval is small, increase it */
if (G.poll_exp < BIGPOLL)
adjust_poll(MINPOLL);
timeout = poll_interval(NOREPLY_INTERVAL);
bb_error_msg("timed out waiting for %s, reach 0x%02x, next query in %us",
p->p_dotted, p->reachable_bits, timeout);
/* What if don't see it because it changed its IP? */
if (p->reachable_bits == 0)
resolve_peer_hostname(p, /*loop_on_fail=*/ 0);
set_next(p, timeout);
}
}
if (p->next_action_time < nextaction)
nextaction = p->next_action_time;
if (p->p_fd >= 0) {
/* Wait for reply from this peer */
pfd[i].fd = p->p_fd;
pfd[i].events = POLLIN;
idx2peer[i] = p;
i++;
}
}
timeout = nextaction - G.cur_time;
if (timeout < 0)
timeout = 0;
timeout++; /* (nextaction - G.cur_time) rounds down, compensating */
/* Here we may block */
VERB2 {
if (i > (ENABLE_FEATURE_NTPD_SERVER && G_listen_fd != -1)) {
/* We wait for at least one reply.
* Poll for it, without wasting time for message.
* Since replies often come under 1 second, this also
* reduces clutter in logs.
*/
nfds = poll(pfd, i, 1000);
if (nfds != 0)
goto did_poll;
if (--timeout <= 0)
goto did_poll;
}
bb_error_msg("poll:%us sockets:%u interval:%us", timeout, i, 1 << G.poll_exp);
}
nfds = poll(pfd, i, timeout * 1000);
did_poll:
gettime1900d(); /* sets G.cur_time */
if (nfds <= 0) {
if (!bb_got_signal /* poll wasn't interrupted by a signal */
&& G.cur_time - G.last_script_run > 11*60
) {
/* Useful for updating battery-backed RTC and such */
run_script("periodic", G.last_update_offset);
gettime1900d(); /* sets G.cur_time */
}
goto check_unsync;
}
/* Process any received packets */
j = 0;
#if ENABLE_FEATURE_NTPD_SERVER
if (G.listen_fd != -1) {
if (pfd[0].revents /* & (POLLIN|POLLERR)*/) {
nfds--;
recv_and_process_client_pkt(/*G.listen_fd*/);
gettime1900d(); /* sets G.cur_time */
}
j = 1;
}
#endif
for (; nfds != 0 && j < i; j++) {
if (pfd[j].revents /* & (POLLIN|POLLERR)*/) {
/*
* At init, alarm was set to 10 sec.
* Now we did get a reply.
* Increase timeout to 50 seconds to finish syncing.
*/
if (option_mask32 & OPT_qq) {
option_mask32 &= ~OPT_qq;
alarm(50);
}
nfds--;
recv_and_process_peer_pkt(idx2peer[j]);
gettime1900d(); /* sets G.cur_time */
}
}
check_unsync:
if (G.ntp_peers && G.stratum != MAXSTRAT) {
for (item = G.ntp_peers; item != NULL; item = item->link) {
peer_t *p = (peer_t *) item->data;
if (p->reachable_bits)
goto have_reachable_peer;
}
/* No peer responded for last 8 packets, panic */
clamp_pollexp_and_set_MAXSTRAT();
run_script("unsync", 0.0);
have_reachable_peer: ;
}
} /* while (!bb_got_signal) */
remove_pidfile(CONFIG_PID_FILE_PATH "/ntpd.pid");
kill_myself_with_sig(bb_got_signal);
}
| 6,326 |
62,531 | 0 | static OPJ_BOOL bmp_read_rle8_data(FILE* IN, OPJ_UINT8* pData,
OPJ_UINT32 stride, OPJ_UINT32 width, OPJ_UINT32 height)
{
OPJ_UINT32 x, y;
OPJ_UINT8 *pix;
const OPJ_UINT8 *beyond;
beyond = pData + stride * height;
pix = pData;
x = y = 0U;
while (y < height) {
int c = getc(IN);
if (c) {
int j;
OPJ_UINT8 c1 = (OPJ_UINT8)getc(IN);
for (j = 0; (j < c) && (x < width) &&
((OPJ_SIZE_T)pix < (OPJ_SIZE_T)beyond); j++, x++, pix++) {
*pix = c1;
}
} else {
c = getc(IN);
if (c == 0x00) { /* EOL */
x = 0;
++y;
pix = pData + y * stride + x;
} else if (c == 0x01) { /* EOP */
break;
} else if (c == 0x02) { /* MOVE by dxdy */
c = getc(IN);
x += (OPJ_UINT32)c;
c = getc(IN);
y += (OPJ_UINT32)c;
pix = pData + y * stride + x;
} else { /* 03 .. 255 */
int j;
for (j = 0; (j < c) && (x < width) &&
((OPJ_SIZE_T)pix < (OPJ_SIZE_T)beyond); j++, x++, pix++) {
OPJ_UINT8 c1 = (OPJ_UINT8)getc(IN);
*pix = c1;
}
if ((OPJ_UINT32)c & 1U) { /* skip padding byte */
getc(IN);
}
}
}
}/* while() */
return OPJ_TRUE;
}
| 6,327 |
157,692 | 0 | explicit MockScreenshotManager(content::NavigationControllerImpl* owner)
: content::NavigationEntryScreenshotManager(owner),
encoding_screenshot_in_progress_(false) {
}
| 6,328 |
55,335 | 0 | static int atl2_resume(struct pci_dev *pdev)
{
struct net_device *netdev = pci_get_drvdata(pdev);
struct atl2_adapter *adapter = netdev_priv(netdev);
u32 err;
pci_set_power_state(pdev, PCI_D0);
pci_restore_state(pdev);
err = pci_enable_device(pdev);
if (err) {
printk(KERN_ERR
"atl2: Cannot enable PCI device from suspend\n");
return err;
}
pci_set_master(pdev);
ATL2_READ_REG(&adapter->hw, REG_WOL_CTRL); /* clear WOL status */
pci_enable_wake(pdev, PCI_D3hot, 0);
pci_enable_wake(pdev, PCI_D3cold, 0);
ATL2_WRITE_REG(&adapter->hw, REG_WOL_CTRL, 0);
if (netif_running(netdev)) {
err = atl2_request_irq(adapter);
if (err)
return err;
}
atl2_reset_hw(&adapter->hw);
if (netif_running(netdev))
atl2_up(adapter);
netif_device_attach(netdev);
return 0;
}
| 6,329 |
22,903 | 0 | int nfs4_proc_setclientid_confirm(struct nfs_client *clp, struct rpc_cred *cred)
{
long timeout = 0;
int err;
do {
err = _nfs4_proc_setclientid_confirm(clp, cred);
switch (err) {
case 0:
return err;
case -NFS4ERR_RESOURCE:
/* The IBM lawyers misread another document! */
case -NFS4ERR_DELAY:
err = nfs4_delay(clp->cl_rpcclient, &timeout);
}
} while (err == 0);
return err;
}
| 6,330 |
162,516 | 0 | bool ClassicPendingScript::WasCanceled() const {
if (!is_external_)
return false;
return GetResource()->WasCanceled();
}
| 6,331 |
138,860 | 0 | TestRenderViewHost* RenderViewHostImplTestHarness::active_test_rvh() {
return static_cast<TestRenderViewHost*>(active_rvh());
}
| 6,332 |
74,283 | 0 | void prep_exit()
{
}
| 6,333 |
93,826 | 0 | virDomainGetVcpuPinInfo(virDomainPtr domain, int ncpumaps,
unsigned char *cpumaps, int maplen, unsigned int flags)
{
virConnectPtr conn;
VIR_DOMAIN_DEBUG(domain, "ncpumaps=%d, cpumaps=%p, maplen=%d, flags=%x",
ncpumaps, cpumaps, maplen, flags);
virResetLastError();
virCheckDomainReturn(domain, -1);
conn = domain->conn;
virCheckNonNullArgGoto(cpumaps, error);
virCheckPositiveArgGoto(ncpumaps, error);
virCheckPositiveArgGoto(maplen, error);
if (INT_MULTIPLY_OVERFLOW(ncpumaps, maplen)) {
virReportError(VIR_ERR_OVERFLOW, _("input too large: %d * %d"),
ncpumaps, maplen);
goto error;
}
VIR_EXCLUSIVE_FLAGS_GOTO(VIR_DOMAIN_AFFECT_LIVE,
VIR_DOMAIN_AFFECT_CONFIG,
error);
if (conn->driver->domainGetVcpuPinInfo) {
int ret;
ret = conn->driver->domainGetVcpuPinInfo(domain, ncpumaps,
cpumaps, maplen, flags);
if (ret < 0)
goto error;
return ret;
}
virReportUnsupportedError();
error:
virDispatchError(domain->conn);
return -1;
}
| 6,334 |
140,940 | 0 | void Document::ActiveChainNodeDetached(Element& element) {
if (active_element_ && element == active_element_) {
active_element_ =
SkipDisplayNoneAncestorsOrReturnNullIfFlatTreeIsDirty(element);
}
}
| 6,335 |
22,001 | 0 | raptor_rdfxml_characters_handler(void *user_data,
raptor_xml_element* xml_element,
const unsigned char *s, int len)
{
raptor_parser* rdf_parser = (raptor_parser*)user_data;
raptor_rdfxml_cdata_grammar(rdf_parser, s, len, 0);
}
| 6,336 |
17,427 | 0 | AddFragment(struct xorg_list *frags, int bytes)
{
FragmentList *f = malloc(sizeof(FragmentList) + bytes);
if (!f) {
return NULL;
} else {
f->bytes = bytes;
xorg_list_add(&f->l, frags->prev);
return (char*) f + sizeof(*f);
}
}
| 6,337 |
70,856 | 0 | static int http_get_line(HTTPContext *s, char *line, int line_size)
{
int ch;
char *q;
q = line;
for (;;) {
ch = http_getc(s);
if (ch < 0)
return ch;
if (ch == '\n') {
/* process line */
if (q > line && q[-1] == '\r')
q--;
*q = '\0';
return 0;
} else {
if ((q - line) < line_size - 1)
*q++ = ch;
}
}
}
| 6,338 |
55,070 | 0 | static void file_change_m(const char *p, struct branch *b)
{
static struct strbuf uq = STRBUF_INIT;
const char *endp;
struct object_entry *oe;
unsigned char sha1[20];
uint16_t mode, inline_data = 0;
p = get_mode(p, &mode);
if (!p)
die("Corrupt mode: %s", command_buf.buf);
switch (mode) {
case 0644:
case 0755:
mode |= S_IFREG;
case S_IFREG | 0644:
case S_IFREG | 0755:
case S_IFLNK:
case S_IFDIR:
case S_IFGITLINK:
/* ok */
break;
default:
die("Corrupt mode: %s", command_buf.buf);
}
if (*p == ':') {
oe = find_mark(parse_mark_ref_space(&p));
hashcpy(sha1, oe->idx.sha1);
} else if (skip_prefix(p, "inline ", &p)) {
inline_data = 1;
oe = NULL; /* not used with inline_data, but makes gcc happy */
} else {
if (get_sha1_hex(p, sha1))
die("Invalid dataref: %s", command_buf.buf);
oe = find_object(sha1);
p += 40;
if (*p++ != ' ')
die("Missing space after SHA1: %s", command_buf.buf);
}
strbuf_reset(&uq);
if (!unquote_c_style(&uq, p, &endp)) {
if (*endp)
die("Garbage after path in: %s", command_buf.buf);
p = uq.buf;
}
/* Git does not track empty, non-toplevel directories. */
if (S_ISDIR(mode) && !hashcmp(sha1, EMPTY_TREE_SHA1_BIN) && *p) {
tree_content_remove(&b->branch_tree, p, NULL, 0);
return;
}
if (S_ISGITLINK(mode)) {
if (inline_data)
die("Git links cannot be specified 'inline': %s",
command_buf.buf);
else if (oe) {
if (oe->type != OBJ_COMMIT)
die("Not a commit (actually a %s): %s",
typename(oe->type), command_buf.buf);
}
/*
* Accept the sha1 without checking; it expected to be in
* another repository.
*/
} else if (inline_data) {
if (S_ISDIR(mode))
die("Directories cannot be specified 'inline': %s",
command_buf.buf);
if (p != uq.buf) {
strbuf_addstr(&uq, p);
p = uq.buf;
}
read_next_command();
parse_and_store_blob(&last_blob, sha1, 0);
} else {
enum object_type expected = S_ISDIR(mode) ?
OBJ_TREE: OBJ_BLOB;
enum object_type type = oe ? oe->type :
sha1_object_info(sha1, NULL);
if (type < 0)
die("%s not found: %s",
S_ISDIR(mode) ? "Tree" : "Blob",
command_buf.buf);
if (type != expected)
die("Not a %s (actually a %s): %s",
typename(expected), typename(type),
command_buf.buf);
}
if (!*p) {
tree_content_replace(&b->branch_tree, sha1, mode, NULL);
return;
}
tree_content_set(&b->branch_tree, p, sha1, mode, NULL);
}
| 6,339 |
73,403 | 0 | MagickPrivate MagickBooleanType SyncImageProfiles(Image *image)
{
MagickBooleanType
status;
StringInfo
*profile;
status=MagickTrue;
profile=(StringInfo *) GetImageProfile(image,"8BIM");
if (profile != (StringInfo *) NULL)
if (Sync8BimProfile(image,profile) == MagickFalse)
status=MagickFalse;
profile=(StringInfo *) GetImageProfile(image,"EXIF");
if (profile != (StringInfo *) NULL)
if (SyncExifProfile(image,profile) == MagickFalse)
status=MagickFalse;
return(status);
}
| 6,340 |
83,249 | 0 | void __noreturn do_exit(long code)
{
struct task_struct *tsk = current;
int group_dead;
TASKS_RCU(int tasks_rcu_i);
profile_task_exit(tsk);
kcov_task_exit(tsk);
WARN_ON(blk_needs_flush_plug(tsk));
if (unlikely(in_interrupt()))
panic("Aiee, killing interrupt handler!");
if (unlikely(!tsk->pid))
panic("Attempted to kill the idle task!");
/*
* If do_exit is called because this processes oopsed, it's possible
* that get_fs() was left as KERNEL_DS, so reset it to USER_DS before
* continuing. Amongst other possible reasons, this is to prevent
* mm_release()->clear_child_tid() from writing to a user-controlled
* kernel address.
*/
set_fs(USER_DS);
ptrace_event(PTRACE_EVENT_EXIT, code);
validate_creds_for_do_exit(tsk);
/*
* We're taking recursive faults here in do_exit. Safest is to just
* leave this task alone and wait for reboot.
*/
if (unlikely(tsk->flags & PF_EXITING)) {
pr_alert("Fixing recursive fault but reboot is needed!\n");
/*
* We can do this unlocked here. The futex code uses
* this flag just to verify whether the pi state
* cleanup has been done or not. In the worst case it
* loops once more. We pretend that the cleanup was
* done as there is no way to return. Either the
* OWNER_DIED bit is set by now or we push the blocked
* task into the wait for ever nirwana as well.
*/
tsk->flags |= PF_EXITPIDONE;
set_current_state(TASK_UNINTERRUPTIBLE);
schedule();
}
exit_signals(tsk); /* sets PF_EXITING */
/*
* Ensure that all new tsk->pi_lock acquisitions must observe
* PF_EXITING. Serializes against futex.c:attach_to_pi_owner().
*/
smp_mb();
/*
* Ensure that we must observe the pi_state in exit_mm() ->
* mm_release() -> exit_pi_state_list().
*/
raw_spin_unlock_wait(&tsk->pi_lock);
if (unlikely(in_atomic())) {
pr_info("note: %s[%d] exited with preempt_count %d\n",
current->comm, task_pid_nr(current),
preempt_count());
preempt_count_set(PREEMPT_ENABLED);
}
/* sync mm's RSS info before statistics gathering */
if (tsk->mm)
sync_mm_rss(tsk->mm);
acct_update_integrals(tsk);
group_dead = atomic_dec_and_test(&tsk->signal->live);
if (group_dead) {
#ifdef CONFIG_POSIX_TIMERS
hrtimer_cancel(&tsk->signal->real_timer);
exit_itimers(tsk->signal);
#endif
if (tsk->mm)
setmax_mm_hiwater_rss(&tsk->signal->maxrss, tsk->mm);
}
acct_collect(code, group_dead);
if (group_dead)
tty_audit_exit();
audit_free(tsk);
tsk->exit_code = code;
taskstats_exit(tsk, group_dead);
exit_mm();
if (group_dead)
acct_process();
trace_sched_process_exit(tsk);
exit_sem(tsk);
exit_shm(tsk);
exit_files(tsk);
exit_fs(tsk);
if (group_dead)
disassociate_ctty(1);
exit_task_namespaces(tsk);
exit_task_work(tsk);
exit_thread(tsk);
/*
* Flush inherited counters to the parent - before the parent
* gets woken up by child-exit notifications.
*
* because of cgroup mode, must be called before cgroup_exit()
*/
perf_event_exit_task(tsk);
sched_autogroup_exit_task(tsk);
cgroup_exit(tsk);
/*
* FIXME: do that only when needed, using sched_exit tracepoint
*/
flush_ptrace_hw_breakpoint(tsk);
TASKS_RCU(preempt_disable());
TASKS_RCU(tasks_rcu_i = __srcu_read_lock(&tasks_rcu_exit_srcu));
TASKS_RCU(preempt_enable());
exit_notify(tsk, group_dead);
proc_exit_connector(tsk);
mpol_put_task_policy(tsk);
#ifdef CONFIG_FUTEX
if (unlikely(current->pi_state_cache))
kfree(current->pi_state_cache);
#endif
/*
* Make sure we are holding no locks:
*/
debug_check_no_locks_held();
/*
* We can do this unlocked here. The futex code uses this flag
* just to verify whether the pi state cleanup has been done
* or not. In the worst case it loops once more.
*/
tsk->flags |= PF_EXITPIDONE;
if (tsk->io_context)
exit_io_context(tsk);
if (tsk->splice_pipe)
free_pipe_info(tsk->splice_pipe);
if (tsk->task_frag.page)
put_page(tsk->task_frag.page);
validate_creds_for_do_exit(tsk);
check_stack_usage();
preempt_disable();
if (tsk->nr_dirtied)
__this_cpu_add(dirty_throttle_leaks, tsk->nr_dirtied);
exit_rcu();
TASKS_RCU(__srcu_read_unlock(&tasks_rcu_exit_srcu, tasks_rcu_i));
do_task_dead();
}
| 6,341 |
21,872 | 0 | int drm_framebuffer_init(struct drm_device *dev, struct drm_framebuffer *fb,
const struct drm_framebuffer_funcs *funcs)
{
int ret;
ret = drm_mode_object_get(dev, &fb->base, DRM_MODE_OBJECT_FB);
if (ret) {
return ret;
}
fb->dev = dev;
fb->funcs = funcs;
dev->mode_config.num_fb++;
list_add(&fb->head, &dev->mode_config.fb_list);
return 0;
}
| 6,342 |
138,109 | 0 | void AXNodeObject::setSequentialFocusNavigationStartingPoint() {
if (!getNode())
return;
getNode()->document().clearFocusedElement();
getNode()->document().setSequentialFocusNavigationStartingPoint(getNode());
}
| 6,343 |
53,404 | 0 | iperf_new_stream(struct iperf_test *test, int s)
{
int i;
struct iperf_stream *sp;
char template[] = "/tmp/iperf3.XXXXXX";
h_errno = 0;
sp = (struct iperf_stream *) malloc(sizeof(struct iperf_stream));
if (!sp) {
i_errno = IECREATESTREAM;
return NULL;
}
memset(sp, 0, sizeof(struct iperf_stream));
sp->test = test;
sp->settings = test->settings;
sp->result = (struct iperf_stream_result *) malloc(sizeof(struct iperf_stream_result));
if (!sp->result) {
free(sp);
i_errno = IECREATESTREAM;
return NULL;
}
memset(sp->result, 0, sizeof(struct iperf_stream_result));
TAILQ_INIT(&sp->result->interval_results);
/* Create and randomize the buffer */
sp->buffer_fd = mkstemp(template);
if (sp->buffer_fd == -1) {
i_errno = IECREATESTREAM;
free(sp->result);
free(sp);
return NULL;
}
if (unlink(template) < 0) {
i_errno = IECREATESTREAM;
free(sp->result);
free(sp);
return NULL;
}
if (ftruncate(sp->buffer_fd, test->settings->blksize) < 0) {
i_errno = IECREATESTREAM;
free(sp->result);
free(sp);
return NULL;
}
sp->buffer = (char *) mmap(NULL, test->settings->blksize, PROT_READ|PROT_WRITE, MAP_PRIVATE, sp->buffer_fd, 0);
if (sp->buffer == MAP_FAILED) {
i_errno = IECREATESTREAM;
free(sp->result);
free(sp);
return NULL;
}
srandom(time(NULL));
for (i = 0; i < test->settings->blksize; ++i)
sp->buffer[i] = random();
/* Set socket */
sp->socket = s;
sp->snd = test->protocol->send;
sp->rcv = test->protocol->recv;
if (test->diskfile_name != (char*) 0) {
sp->diskfile_fd = open(test->diskfile_name, test->sender ? O_RDONLY : (O_WRONLY|O_CREAT|O_TRUNC), S_IRUSR|S_IWUSR);
if (sp->diskfile_fd == -1) {
i_errno = IEFILE;
munmap(sp->buffer, sp->test->settings->blksize);
free(sp->result);
free(sp);
return NULL;
}
sp->snd2 = sp->snd;
sp->snd = diskfile_send;
sp->rcv2 = sp->rcv;
sp->rcv = diskfile_recv;
} else
sp->diskfile_fd = -1;
/* Initialize stream */
if (iperf_init_stream(sp, test) < 0) {
close(sp->buffer_fd);
munmap(sp->buffer, sp->test->settings->blksize);
free(sp->result);
free(sp);
return NULL;
}
iperf_add_stream(test, sp);
return sp;
}
| 6,344 |
46,920 | 0 | static int xts_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
struct scatterlist *src, unsigned int nbytes)
{
struct cast6_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
return glue_xts_crypt_128bit(&cast6_dec_xts, desc, dst, src, nbytes,
XTS_TWEAK_CAST(__cast6_encrypt),
&ctx->tweak_ctx, &ctx->crypt_ctx);
}
| 6,345 |
73,393 | 0 | static void GetProfilesFromResourceBlock(Image *image,
const StringInfo *resource_block,ExceptionInfo *exception)
{
const unsigned char
*datum;
register const unsigned char
*p;
size_t
length;
ssize_t
count;
StringInfo
*profile;
unsigned char
length_byte;
unsigned int
value;
unsigned short
id;
datum=GetStringInfoDatum(resource_block);
length=GetStringInfoLength(resource_block);
for (p=datum; p < (datum+length-16); )
{
if (LocaleNCompare((char *) p,"8BIM",4) != 0)
break;
p+=4;
p=ReadResourceShort(p,&id);
p=ReadResourceByte(p,&length_byte);
p+=length_byte;
if (((length_byte+1) & 0x01) != 0)
p++;
if (p > (datum+length-4))
break;
p=ReadResourceLong(p,&value);
count=(ssize_t) value;
if ((p > (datum+length-count)) || (count > (ssize_t) length) ||
(count < 0))
break;
switch (id)
{
case 0x03ed:
{
unsigned int
resolution;
unsigned short
units;
/*
Resolution.
*/
p=ReadResourceLong(p,&resolution);
image->resolution.x=((double) resolution)/65536.0;
p=ReadResourceShort(p,&units)+2;
p=ReadResourceLong(p,&resolution)+4;
image->resolution.y=((double) resolution)/65536.0;
/*
Values are always stored as pixels per inch.
*/
if ((ResolutionType) units != PixelsPerCentimeterResolution)
image->units=PixelsPerInchResolution;
else
{
image->units=PixelsPerCentimeterResolution;
image->resolution.x/=2.54;
image->resolution.y/=2.54;
}
break;
}
case 0x0404:
{
/*
IPTC Profile
*/
profile=AcquireStringInfo(count);
SetStringInfoDatum(profile,p);
(void) SetImageProfileInternal(image,"iptc",profile,MagickTrue,
exception);
profile=DestroyStringInfo(profile);
p+=count;
break;
}
case 0x040c:
{
/*
Thumbnail.
*/
p+=count;
break;
}
case 0x040f:
{
/*
ICC Profile.
*/
profile=AcquireStringInfo(count);
SetStringInfoDatum(profile,p);
(void) SetImageProfileInternal(image,"icc",profile,MagickTrue,
exception);
profile=DestroyStringInfo(profile);
p+=count;
break;
}
case 0x0422:
{
/*
EXIF Profile.
*/
profile=AcquireStringInfo(count);
SetStringInfoDatum(profile,p);
(void) SetImageProfileInternal(image,"exif",profile,MagickTrue,
exception);
profile=DestroyStringInfo(profile);
p+=count;
break;
}
case 0x0424:
{
/*
XMP Profile.
*/
profile=AcquireStringInfo(count);
SetStringInfoDatum(profile,p);
(void) SetImageProfileInternal(image,"xmp",profile,MagickTrue,
exception);
profile=DestroyStringInfo(profile);
p+=count;
break;
}
default:
{
p+=count;
break;
}
}
if ((count & 0x01) != 0)
p++;
}
}
| 6,346 |
110,386 | 0 | bool LoadEntryPointsFromLibrary(const base::NativeLibrary& library,
PluginModule::EntryPoints* entry_points) {
entry_points->get_interface =
reinterpret_cast<PluginModule::GetInterfaceFunc>(
base::GetFunctionPointerFromNativeLibrary(library,
"PPP_GetInterface"));
if (!entry_points->get_interface) {
LOG(WARNING) << "No PPP_GetInterface in plugin library";
return false;
}
entry_points->initialize_module =
reinterpret_cast<PluginModule::PPP_InitializeModuleFunc>(
base::GetFunctionPointerFromNativeLibrary(library,
"PPP_InitializeModule"));
if (!entry_points->initialize_module) {
LOG(WARNING) << "No PPP_InitializeModule in plugin library";
return false;
}
entry_points->shutdown_module =
reinterpret_cast<PluginModule::PPP_ShutdownModuleFunc>(
base::GetFunctionPointerFromNativeLibrary(library,
"PPP_ShutdownModule"));
return true;
}
| 6,347 |
162,949 | 0 | int GetConsoleErrorCount() const {
blink::mojom::ManifestManagerAssociatedPtr ptr;
shell()
->web_contents()
->GetMainFrame()
->GetRemoteAssociatedInterfaces()
->GetInterface(&ptr);
ptr.FlushForTesting();
return console_error_count_;
}
| 6,348 |
56,517 | 0 | static int ext4_ext_split(handle_t *handle, struct inode *inode,
unsigned int flags,
struct ext4_ext_path *path,
struct ext4_extent *newext, int at)
{
struct buffer_head *bh = NULL;
int depth = ext_depth(inode);
struct ext4_extent_header *neh;
struct ext4_extent_idx *fidx;
int i = at, k, m, a;
ext4_fsblk_t newblock, oldblock;
__le32 border;
ext4_fsblk_t *ablocks = NULL; /* array of allocated blocks */
int err = 0;
/* make decision: where to split? */
/* FIXME: now decision is simplest: at current extent */
/* if current leaf will be split, then we should use
* border from split point */
if (unlikely(path[depth].p_ext > EXT_MAX_EXTENT(path[depth].p_hdr))) {
EXT4_ERROR_INODE(inode, "p_ext > EXT_MAX_EXTENT!");
return -EFSCORRUPTED;
}
if (path[depth].p_ext != EXT_MAX_EXTENT(path[depth].p_hdr)) {
border = path[depth].p_ext[1].ee_block;
ext_debug("leaf will be split."
" next leaf starts at %d\n",
le32_to_cpu(border));
} else {
border = newext->ee_block;
ext_debug("leaf will be added."
" next leaf starts at %d\n",
le32_to_cpu(border));
}
/*
* If error occurs, then we break processing
* and mark filesystem read-only. index won't
* be inserted and tree will be in consistent
* state. Next mount will repair buffers too.
*/
/*
* Get array to track all allocated blocks.
* We need this to handle errors and free blocks
* upon them.
*/
ablocks = kzalloc(sizeof(ext4_fsblk_t) * depth, GFP_NOFS);
if (!ablocks)
return -ENOMEM;
/* allocate all needed blocks */
ext_debug("allocate %d blocks for indexes/leaf\n", depth - at);
for (a = 0; a < depth - at; a++) {
newblock = ext4_ext_new_meta_block(handle, inode, path,
newext, &err, flags);
if (newblock == 0)
goto cleanup;
ablocks[a] = newblock;
}
/* initialize new leaf */
newblock = ablocks[--a];
if (unlikely(newblock == 0)) {
EXT4_ERROR_INODE(inode, "newblock == 0!");
err = -EFSCORRUPTED;
goto cleanup;
}
bh = sb_getblk_gfp(inode->i_sb, newblock, __GFP_MOVABLE | GFP_NOFS);
if (unlikely(!bh)) {
err = -ENOMEM;
goto cleanup;
}
lock_buffer(bh);
err = ext4_journal_get_create_access(handle, bh);
if (err)
goto cleanup;
neh = ext_block_hdr(bh);
neh->eh_entries = 0;
neh->eh_max = cpu_to_le16(ext4_ext_space_block(inode, 0));
neh->eh_magic = EXT4_EXT_MAGIC;
neh->eh_depth = 0;
/* move remainder of path[depth] to the new leaf */
if (unlikely(path[depth].p_hdr->eh_entries !=
path[depth].p_hdr->eh_max)) {
EXT4_ERROR_INODE(inode, "eh_entries %d != eh_max %d!",
path[depth].p_hdr->eh_entries,
path[depth].p_hdr->eh_max);
err = -EFSCORRUPTED;
goto cleanup;
}
/* start copy from next extent */
m = EXT_MAX_EXTENT(path[depth].p_hdr) - path[depth].p_ext++;
ext4_ext_show_move(inode, path, newblock, depth);
if (m) {
struct ext4_extent *ex;
ex = EXT_FIRST_EXTENT(neh);
memmove(ex, path[depth].p_ext, sizeof(struct ext4_extent) * m);
le16_add_cpu(&neh->eh_entries, m);
}
ext4_extent_block_csum_set(inode, neh);
set_buffer_uptodate(bh);
unlock_buffer(bh);
err = ext4_handle_dirty_metadata(handle, inode, bh);
if (err)
goto cleanup;
brelse(bh);
bh = NULL;
/* correct old leaf */
if (m) {
err = ext4_ext_get_access(handle, inode, path + depth);
if (err)
goto cleanup;
le16_add_cpu(&path[depth].p_hdr->eh_entries, -m);
err = ext4_ext_dirty(handle, inode, path + depth);
if (err)
goto cleanup;
}
/* create intermediate indexes */
k = depth - at - 1;
if (unlikely(k < 0)) {
EXT4_ERROR_INODE(inode, "k %d < 0!", k);
err = -EFSCORRUPTED;
goto cleanup;
}
if (k)
ext_debug("create %d intermediate indices\n", k);
/* insert new index into current index block */
/* current depth stored in i var */
i = depth - 1;
while (k--) {
oldblock = newblock;
newblock = ablocks[--a];
bh = sb_getblk(inode->i_sb, newblock);
if (unlikely(!bh)) {
err = -ENOMEM;
goto cleanup;
}
lock_buffer(bh);
err = ext4_journal_get_create_access(handle, bh);
if (err)
goto cleanup;
neh = ext_block_hdr(bh);
neh->eh_entries = cpu_to_le16(1);
neh->eh_magic = EXT4_EXT_MAGIC;
neh->eh_max = cpu_to_le16(ext4_ext_space_block_idx(inode, 0));
neh->eh_depth = cpu_to_le16(depth - i);
fidx = EXT_FIRST_INDEX(neh);
fidx->ei_block = border;
ext4_idx_store_pblock(fidx, oldblock);
ext_debug("int.index at %d (block %llu): %u -> %llu\n",
i, newblock, le32_to_cpu(border), oldblock);
/* move remainder of path[i] to the new index block */
if (unlikely(EXT_MAX_INDEX(path[i].p_hdr) !=
EXT_LAST_INDEX(path[i].p_hdr))) {
EXT4_ERROR_INODE(inode,
"EXT_MAX_INDEX != EXT_LAST_INDEX ee_block %d!",
le32_to_cpu(path[i].p_ext->ee_block));
err = -EFSCORRUPTED;
goto cleanup;
}
/* start copy indexes */
m = EXT_MAX_INDEX(path[i].p_hdr) - path[i].p_idx++;
ext_debug("cur 0x%p, last 0x%p\n", path[i].p_idx,
EXT_MAX_INDEX(path[i].p_hdr));
ext4_ext_show_move(inode, path, newblock, i);
if (m) {
memmove(++fidx, path[i].p_idx,
sizeof(struct ext4_extent_idx) * m);
le16_add_cpu(&neh->eh_entries, m);
}
ext4_extent_block_csum_set(inode, neh);
set_buffer_uptodate(bh);
unlock_buffer(bh);
err = ext4_handle_dirty_metadata(handle, inode, bh);
if (err)
goto cleanup;
brelse(bh);
bh = NULL;
/* correct old index */
if (m) {
err = ext4_ext_get_access(handle, inode, path + i);
if (err)
goto cleanup;
le16_add_cpu(&path[i].p_hdr->eh_entries, -m);
err = ext4_ext_dirty(handle, inode, path + i);
if (err)
goto cleanup;
}
i--;
}
/* insert new index */
err = ext4_ext_insert_index(handle, inode, path + at,
le32_to_cpu(border), newblock);
cleanup:
if (bh) {
if (buffer_locked(bh))
unlock_buffer(bh);
brelse(bh);
}
if (err) {
/* free all allocated blocks in error case */
for (i = 0; i < depth; i++) {
if (!ablocks[i])
continue;
ext4_free_blocks(handle, inode, NULL, ablocks[i], 1,
EXT4_FREE_BLOCKS_METADATA);
}
}
kfree(ablocks);
return err;
}
| 6,349 |
94,647 | 0 | X509_SIG *d2i_PKCS8_fp(FILE *fp, X509_SIG **p8)
{
return ASN1_d2i_fp_of(X509_SIG,X509_SIG_new,d2i_X509_SIG,fp,p8);
}
| 6,350 |
103,994 | 0 | void GLES2DecoderImpl::DoBindFramebuffer(GLenum target, GLuint client_id) {
FramebufferManager::FramebufferInfo* info = NULL;
GLuint service_id = 0;
if (client_id != 0) {
info = GetFramebufferInfo(client_id);
if (!info) {
if (!group_->bind_generates_resource()) {
SetGLError(GL_INVALID_VALUE,
"glBindFramebuffer: id not generated by glGenFramebuffers");
return;
}
glGenFramebuffersEXT(1, &service_id);
CreateFramebufferInfo(client_id, service_id);
info = GetFramebufferInfo(client_id);
IdAllocatorInterface* id_allocator =
group_->GetIdAllocator(id_namespaces::kFramebuffers);
id_allocator->MarkAsUsed(client_id);
} else {
service_id = info->service_id();
}
info->MarkAsValid();
} else {
service_id = surface_->GetBackingFrameBufferObject();
}
if (target == GL_FRAMEBUFFER || target == GL_DRAW_FRAMEBUFFER_EXT) {
bound_draw_framebuffer_ = info;
}
if (target == GL_FRAMEBUFFER || target == GL_READ_FRAMEBUFFER_EXT) {
bound_read_framebuffer_ = info;
}
state_dirty_ = true;
if (info == NULL && offscreen_target_frame_buffer_.get()) {
service_id = offscreen_target_frame_buffer_->id();
}
glBindFramebufferEXT(target, service_id);
}
| 6,351 |
12,949 | 0 | ssh_packet_backup_state(struct ssh *ssh,
struct ssh *backup_state)
{
struct ssh *tmp;
close(ssh->state->connection_in);
ssh->state->connection_in = -1;
close(ssh->state->connection_out);
ssh->state->connection_out = -1;
if (backup_state)
tmp = backup_state;
else
tmp = ssh_alloc_session_state();
backup_state = ssh;
ssh = tmp;
}
| 6,352 |
101,579 | 0 | void Browser::BrowserRenderWidgetShowing() {
RenderWidgetShowing();
}
| 6,353 |
87,896 | 0 | PAM_EXTERN int pam_sm_chauthtok(pam_handle_t * pamh, int flags, int argc,
const char **argv)
{
int r;
PKCS11_CTX *ctx;
unsigned int nslots;
PKCS11_KEY *authkey;
PKCS11_SLOT *slots, *authslot;
const char *user, *pin_regex;
r = module_refresh(pamh, flags, argc, argv,
&user, &ctx, &slots, &nslots, &pin_regex);
if (PAM_SUCCESS != r) {
goto err;
}
if (flags & PAM_CHANGE_EXPIRED_AUTHTOK) {
/* Yes, we explicitly don't want to check CRLs, OCSP or exparation of
* certificates (use pam_pkcs11 for this). */
r = PAM_SUCCESS;
goto err;
}
if (1 != key_find(pamh, flags, user, ctx, slots, nslots,
&authslot, &authkey)) {
r = PAM_AUTHINFO_UNAVAIL;
goto err;
}
if (flags & PAM_PRELIM_CHECK) {
r = PAM_TRY_AGAIN;
goto err;
}
if (flags & PAM_UPDATE_AUTHTOK) {
if (1 != key_change_login(pamh, flags, authslot, pin_regex)) {
if (authslot->token->userPinLocked) {
r = PAM_MAXTRIES;
} else {
r = PAM_AUTH_ERR;
}
goto err;
}
}
r = PAM_SUCCESS;
err:
#ifdef TEST
module_data_cleanup(pamh, global_module_data, r);
#endif
return r;
}
| 6,354 |
86,556 | 0 | void jpc_ns_invlift_row(jpc_fix_t *a, int numcols, int parity)
{
register jpc_fix_t *lptr;
register jpc_fix_t *hptr;
register int n;
int llen;
llen = (numcols + 1 - parity) >> 1;
if (numcols > 1) {
/* Apply the scaling step. */
#if defined(WT_DOSCALE)
lptr = &a[0];
n = llen;
while (n-- > 0) {
lptr[0] = jpc_fix_mul(lptr[0], jpc_dbltofix(1.0 / LGAIN));
++lptr;
}
hptr = &a[llen];
n = numcols - llen;
while (n-- > 0) {
hptr[0] = jpc_fix_mul(hptr[0], jpc_dbltofix(1.0 / HGAIN));
++hptr;
}
#endif
/* Apply the first lifting step. */
lptr = &a[0];
hptr = &a[llen];
if (!parity) {
jpc_fix_minuseq(lptr[0], jpc_fix_mul(jpc_dbltofix(2.0 * DELTA),
hptr[0]));
++lptr;
}
n = llen - (!parity) - (parity != (numcols & 1));
while (n-- > 0) {
jpc_fix_minuseq(lptr[0], jpc_fix_mul(jpc_dbltofix(DELTA),
jpc_fix_add(hptr[0], hptr[1])));
++lptr;
++hptr;
}
if (parity != (numcols & 1)) {
jpc_fix_minuseq(lptr[0], jpc_fix_mul(jpc_dbltofix(2.0 * DELTA),
hptr[0]));
}
/* Apply the second lifting step. */
lptr = &a[0];
hptr = &a[llen];
if (parity) {
jpc_fix_minuseq(hptr[0], jpc_fix_mul(jpc_dbltofix(2.0 * GAMMA),
lptr[0]));
++hptr;
}
n = numcols - llen - parity - (parity == (numcols & 1));
while (n-- > 0) {
jpc_fix_minuseq(hptr[0], jpc_fix_mul(jpc_dbltofix(GAMMA),
jpc_fix_add(lptr[0], lptr[1])));
++hptr;
++lptr;
}
if (parity == (numcols & 1)) {
jpc_fix_minuseq(hptr[0], jpc_fix_mul(jpc_dbltofix(2.0 * GAMMA),
lptr[0]));
}
/* Apply the third lifting step. */
lptr = &a[0];
hptr = &a[llen];
if (!parity) {
jpc_fix_minuseq(lptr[0], jpc_fix_mul(jpc_dbltofix(2.0 * BETA),
hptr[0]));
++lptr;
}
n = llen - (!parity) - (parity != (numcols & 1));
while (n-- > 0) {
jpc_fix_minuseq(lptr[0], jpc_fix_mul(jpc_dbltofix(BETA),
jpc_fix_add(hptr[0], hptr[1])));
++lptr;
++hptr;
}
if (parity != (numcols & 1)) {
jpc_fix_minuseq(lptr[0], jpc_fix_mul(jpc_dbltofix(2.0 * BETA),
hptr[0]));
}
/* Apply the fourth lifting step. */
lptr = &a[0];
hptr = &a[llen];
if (parity) {
jpc_fix_minuseq(hptr[0], jpc_fix_mul(jpc_dbltofix(2.0 * ALPHA),
lptr[0]));
++hptr;
}
n = numcols - llen - parity - (parity == (numcols & 1));
while (n-- > 0) {
jpc_fix_minuseq(hptr[0], jpc_fix_mul(jpc_dbltofix(ALPHA),
jpc_fix_add(lptr[0], lptr[1])));
++hptr;
++lptr;
}
if (parity == (numcols & 1)) {
jpc_fix_minuseq(hptr[0], jpc_fix_mul(jpc_dbltofix(2.0 * ALPHA),
lptr[0]));
}
} else {
#if defined(WT_LENONE)
if (parity) {
lptr = &a[0];
lptr[0] = jpc_fix_asr(lptr[0], 1);
}
#endif
}
}
| 6,355 |
43,839 | 0 | _load_ucode_intel_bsp(struct mc_saved_data *mc_saved_data,
unsigned long *mc_saved_in_initrd,
unsigned long initrd_start_early,
unsigned long initrd_end_early,
struct ucode_cpu_info *uci)
{
enum ucode_state ret;
collect_cpu_info_early(uci);
scan_microcode(initrd_start_early, initrd_end_early, mc_saved_data,
mc_saved_in_initrd, uci);
ret = load_microcode(mc_saved_data, mc_saved_in_initrd,
initrd_start_early, uci);
if (ret == UCODE_OK)
apply_microcode_early(uci, true);
}
| 6,356 |
34,191 | 0 | int __net_init ip_vs_control_net_init_sysctl(struct net *net)
{
int idx;
struct netns_ipvs *ipvs = net_ipvs(net);
struct ctl_table *tbl;
atomic_set(&ipvs->dropentry, 0);
spin_lock_init(&ipvs->dropentry_lock);
spin_lock_init(&ipvs->droppacket_lock);
spin_lock_init(&ipvs->securetcp_lock);
if (!net_eq(net, &init_net)) {
tbl = kmemdup(vs_vars, sizeof(vs_vars), GFP_KERNEL);
if (tbl == NULL)
return -ENOMEM;
} else
tbl = vs_vars;
/* Initialize sysctl defaults */
idx = 0;
ipvs->sysctl_amemthresh = 1024;
tbl[idx++].data = &ipvs->sysctl_amemthresh;
ipvs->sysctl_am_droprate = 10;
tbl[idx++].data = &ipvs->sysctl_am_droprate;
tbl[idx++].data = &ipvs->sysctl_drop_entry;
tbl[idx++].data = &ipvs->sysctl_drop_packet;
#ifdef CONFIG_IP_VS_NFCT
tbl[idx++].data = &ipvs->sysctl_conntrack;
#endif
tbl[idx++].data = &ipvs->sysctl_secure_tcp;
ipvs->sysctl_snat_reroute = 1;
tbl[idx++].data = &ipvs->sysctl_snat_reroute;
ipvs->sysctl_sync_ver = 1;
tbl[idx++].data = &ipvs->sysctl_sync_ver;
ipvs->sysctl_sync_ports = 1;
tbl[idx++].data = &ipvs->sysctl_sync_ports;
ipvs->sysctl_sync_qlen_max = nr_free_buffer_pages() / 32;
tbl[idx++].data = &ipvs->sysctl_sync_qlen_max;
ipvs->sysctl_sync_sock_size = 0;
tbl[idx++].data = &ipvs->sysctl_sync_sock_size;
tbl[idx++].data = &ipvs->sysctl_cache_bypass;
tbl[idx++].data = &ipvs->sysctl_expire_nodest_conn;
tbl[idx++].data = &ipvs->sysctl_expire_quiescent_template;
ipvs->sysctl_sync_threshold[0] = DEFAULT_SYNC_THRESHOLD;
ipvs->sysctl_sync_threshold[1] = DEFAULT_SYNC_PERIOD;
tbl[idx].data = &ipvs->sysctl_sync_threshold;
tbl[idx++].maxlen = sizeof(ipvs->sysctl_sync_threshold);
ipvs->sysctl_sync_refresh_period = DEFAULT_SYNC_REFRESH_PERIOD;
tbl[idx++].data = &ipvs->sysctl_sync_refresh_period;
ipvs->sysctl_sync_retries = clamp_t(int, DEFAULT_SYNC_RETRIES, 0, 3);
tbl[idx++].data = &ipvs->sysctl_sync_retries;
tbl[idx++].data = &ipvs->sysctl_nat_icmp_send;
ipvs->sysctl_hdr = register_net_sysctl(net, "net/ipv4/vs", tbl);
if (ipvs->sysctl_hdr == NULL) {
if (!net_eq(net, &init_net))
kfree(tbl);
return -ENOMEM;
}
ip_vs_start_estimator(net, &ipvs->tot_stats);
ipvs->sysctl_tbl = tbl;
/* Schedule defense work */
INIT_DELAYED_WORK(&ipvs->defense_work, defense_work_handler);
schedule_delayed_work(&ipvs->defense_work, DEFENSE_TIMER_PERIOD);
return 0;
}
| 6,357 |
170,944 | 0 | static vpx_codec_err_t vp8_get_last_ref_frame(vpx_codec_alg_priv_t *ctx,
va_list args)
{
int *ref_info = va_arg(args, int *);
if (ref_info && !ctx->yv12_frame_buffers.use_frame_threads)
{
VP8D_COMP *pbi = (VP8D_COMP *)ctx->yv12_frame_buffers.pbi[0];
VP8_COMMON *oci = &pbi->common;
*ref_info =
(vp8dx_references_buffer( oci, ALTREF_FRAME )?VP8_ALTR_FRAME:0) |
(vp8dx_references_buffer( oci, GOLDEN_FRAME )?VP8_GOLD_FRAME:0) |
(vp8dx_references_buffer( oci, LAST_FRAME )?VP8_LAST_FRAME:0);
return VPX_CODEC_OK;
}
else
return VPX_CODEC_INVALID_PARAM;
}
| 6,358 |
10,669 | 0 | Ins_SHP( TT_ExecContext exc )
{
TT_GlyphZoneRec zp;
FT_UShort refp;
FT_F26Dot6 dx, dy;
FT_UShort point;
if ( exc->top < exc->GS.loop )
{
if ( exc->pedantic_hinting )
exc->error = FT_THROW( Invalid_Reference );
goto Fail;
}
if ( Compute_Point_Displacement( exc, &dx, &dy, &zp, &refp ) )
return;
while ( exc->GS.loop > 0 )
{
exc->args--;
point = (FT_UShort)exc->stack[exc->args];
if ( BOUNDS( point, exc->zp2.n_points ) )
{
if ( exc->pedantic_hinting )
{
exc->error = FT_THROW( Invalid_Reference );
return;
}
}
else
#ifdef TT_SUPPORT_SUBPIXEL_HINTING_INFINALITY
/* doesn't follow Cleartype spec but produces better result */
if ( SUBPIXEL_HINTING_INFINALITY && exc->ignore_x_mode )
Move_Zp2_Point( exc, point, 0, dy, TRUE );
else
#endif /* TT_SUPPORT_SUBPIXEL_HINTING_INFINALITY */
Move_Zp2_Point( exc, point, dx, dy, TRUE );
exc->GS.loop--;
}
Fail:
exc->GS.loop = 1;
exc->new_top = exc->args;
}
| 6,359 |
152,967 | 0 | void EnumFonts(struct _FPDF_SYSFONTINFO* sysfontinfo, void* mapper) {
FPDF_AddInstalledFont(mapper, "Arial", FXFONT_DEFAULT_CHARSET);
const FPDF_CharsetFontMap* font_map = FPDF_GetDefaultTTFMap();
for (; font_map->charset != -1; ++font_map) {
FPDF_AddInstalledFont(mapper, font_map->fontname, font_map->charset);
}
}
| 6,360 |
120,486 | 0 | PassRefPtr<Node> Element::cloneNode(bool deep)
{
return deep ? cloneElementWithChildren() : cloneElementWithoutChildren();
}
| 6,361 |
87,748 | 0 | static int hns_roce_register_device(struct hns_roce_dev *hr_dev)
{
int ret;
struct hns_roce_ib_iboe *iboe = NULL;
struct ib_device *ib_dev = NULL;
struct device *dev = hr_dev->dev;
iboe = &hr_dev->iboe;
spin_lock_init(&iboe->lock);
ib_dev = &hr_dev->ib_dev;
strlcpy(ib_dev->name, "hns_%d", IB_DEVICE_NAME_MAX);
ib_dev->owner = THIS_MODULE;
ib_dev->node_type = RDMA_NODE_IB_CA;
ib_dev->dev.parent = dev;
ib_dev->phys_port_cnt = hr_dev->caps.num_ports;
ib_dev->local_dma_lkey = hr_dev->caps.reserved_lkey;
ib_dev->num_comp_vectors = hr_dev->caps.num_comp_vectors;
ib_dev->uverbs_abi_ver = 1;
ib_dev->uverbs_cmd_mask =
(1ULL << IB_USER_VERBS_CMD_GET_CONTEXT) |
(1ULL << IB_USER_VERBS_CMD_QUERY_DEVICE) |
(1ULL << IB_USER_VERBS_CMD_QUERY_PORT) |
(1ULL << IB_USER_VERBS_CMD_ALLOC_PD) |
(1ULL << IB_USER_VERBS_CMD_DEALLOC_PD) |
(1ULL << IB_USER_VERBS_CMD_REG_MR) |
(1ULL << IB_USER_VERBS_CMD_DEREG_MR) |
(1ULL << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) |
(1ULL << IB_USER_VERBS_CMD_CREATE_CQ) |
(1ULL << IB_USER_VERBS_CMD_DESTROY_CQ) |
(1ULL << IB_USER_VERBS_CMD_CREATE_QP) |
(1ULL << IB_USER_VERBS_CMD_MODIFY_QP) |
(1ULL << IB_USER_VERBS_CMD_QUERY_QP) |
(1ULL << IB_USER_VERBS_CMD_DESTROY_QP);
/* HCA||device||port */
ib_dev->modify_device = hns_roce_modify_device;
ib_dev->query_device = hns_roce_query_device;
ib_dev->query_port = hns_roce_query_port;
ib_dev->modify_port = hns_roce_modify_port;
ib_dev->get_link_layer = hns_roce_get_link_layer;
ib_dev->get_netdev = hns_roce_get_netdev;
ib_dev->query_gid = hns_roce_query_gid;
ib_dev->add_gid = hns_roce_add_gid;
ib_dev->del_gid = hns_roce_del_gid;
ib_dev->query_pkey = hns_roce_query_pkey;
ib_dev->alloc_ucontext = hns_roce_alloc_ucontext;
ib_dev->dealloc_ucontext = hns_roce_dealloc_ucontext;
ib_dev->mmap = hns_roce_mmap;
/* PD */
ib_dev->alloc_pd = hns_roce_alloc_pd;
ib_dev->dealloc_pd = hns_roce_dealloc_pd;
/* AH */
ib_dev->create_ah = hns_roce_create_ah;
ib_dev->query_ah = hns_roce_query_ah;
ib_dev->destroy_ah = hns_roce_destroy_ah;
/* QP */
ib_dev->create_qp = hns_roce_create_qp;
ib_dev->modify_qp = hns_roce_modify_qp;
ib_dev->query_qp = hr_dev->hw->query_qp;
ib_dev->destroy_qp = hr_dev->hw->destroy_qp;
ib_dev->post_send = hr_dev->hw->post_send;
ib_dev->post_recv = hr_dev->hw->post_recv;
/* CQ */
ib_dev->create_cq = hns_roce_ib_create_cq;
ib_dev->modify_cq = hr_dev->hw->modify_cq;
ib_dev->destroy_cq = hns_roce_ib_destroy_cq;
ib_dev->req_notify_cq = hr_dev->hw->req_notify_cq;
ib_dev->poll_cq = hr_dev->hw->poll_cq;
/* MR */
ib_dev->get_dma_mr = hns_roce_get_dma_mr;
ib_dev->reg_user_mr = hns_roce_reg_user_mr;
ib_dev->dereg_mr = hns_roce_dereg_mr;
if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_REREG_MR) {
ib_dev->rereg_user_mr = hns_roce_rereg_user_mr;
ib_dev->uverbs_cmd_mask |= (1ULL << IB_USER_VERBS_CMD_REREG_MR);
}
/* OTHERS */
ib_dev->get_port_immutable = hns_roce_port_immutable;
ret = ib_register_device(ib_dev, NULL);
if (ret) {
dev_err(dev, "ib_register_device failed!\n");
return ret;
}
ret = hns_roce_setup_mtu_mac(hr_dev);
if (ret) {
dev_err(dev, "setup_mtu_mac failed!\n");
goto error_failed_setup_mtu_mac;
}
iboe->nb.notifier_call = hns_roce_netdev_event;
ret = register_netdevice_notifier(&iboe->nb);
if (ret) {
dev_err(dev, "register_netdevice_notifier failed!\n");
goto error_failed_setup_mtu_mac;
}
return 0;
error_failed_setup_mtu_mac:
ib_unregister_device(ib_dev);
return ret;
}
| 6,362 |
105,136 | 0 | Node* Range::pastLastNode() const
{
if (!m_start.container() || !m_end.container())
return 0;
if (m_end.container()->offsetInCharacters())
return m_end.container()->traverseNextSibling();
if (Node* child = m_end.container()->childNode(m_end.offset()))
return child;
return m_end.container()->traverseNextSibling();
}
| 6,363 |
21,132 | 0 | static void mem_cgroup_swap_statistics(struct mem_cgroup *memcg,
bool charge)
{
int val = (charge) ? 1 : -1;
this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_SWAPOUT], val);
}
| 6,364 |
151,343 | 0 | void InspectorTraceEvents::DidReceiveResourceResponse(
unsigned long identifier,
DocumentLoader* loader,
const ResourceResponse& response,
Resource*) {
LocalFrame* frame = loader ? loader->GetFrame() : nullptr;
TRACE_EVENT_INSTANT1(
"devtools.timeline", "ResourceReceiveResponse", TRACE_EVENT_SCOPE_THREAD,
"data", InspectorReceiveResponseEvent::Data(identifier, frame, response));
probe::AsyncTask async_task(frame ? frame->GetDocument() : nullptr,
AsyncId(identifier), "response");
}
| 6,365 |
167,165 | 0 | void HTMLMediaElement::PlaybackStateChanged() {
BLINK_MEDIA_LOG << "playbackStateChanged(" << (void*)this << ")";
if (!GetWebMediaPlayer())
return;
if (GetWebMediaPlayer()->Paused())
PauseInternal();
else
PlayInternal();
}
| 6,366 |
76,430 | 0 | static int update_stack_depth(struct bpf_verifier_env *env,
const struct bpf_func_state *func,
int off)
{
u16 stack = env->subprog_info[func->subprogno].stack_depth;
if (stack >= -off)
return 0;
/* update known max for given subprogram */
env->subprog_info[func->subprogno].stack_depth = -off;
return 0;
}
| 6,367 |
39,725 | 0 | static int vfs_rename_other(struct inode *old_dir, struct dentry *old_dentry,
struct inode *new_dir, struct dentry *new_dentry)
{
struct inode *target;
int error;
error = security_inode_rename(old_dir, old_dentry, new_dir, new_dentry);
if (error)
return error;
dget(new_dentry);
target = new_dentry->d_inode;
if (target)
mutex_lock(&target->i_mutex);
if (d_mountpoint(old_dentry)||d_mountpoint(new_dentry))
error = -EBUSY;
else
error = old_dir->i_op->rename(old_dir, old_dentry, new_dir, new_dentry);
if (!error) {
if (!(old_dir->i_sb->s_type->fs_flags & FS_RENAME_DOES_D_MOVE))
d_move(old_dentry, new_dentry);
}
if (target)
mutex_unlock(&target->i_mutex);
dput(new_dentry);
return error;
}
| 6,368 |
10,053 | 0 | Render_All( int num_indices,
int first_index )
{
int start_x, start_y, step_x, step_y, x, y;
int i;
FT_Size size;
error = FTDemo_Get_Size( handle, &size );
if ( error )
{
/* probably a non-existent bitmap font size */
return error;
}
INIT_SIZE( size, start_x, start_y, step_x, step_y, x, y );
i = first_index;
while ( i < num_indices )
{
int gindex;
if ( handle->encoding == FT_ENCODING_NONE )
gindex = i;
else
gindex = FTDemo_Get_Index( handle, i );
error = FTDemo_Draw_Index( handle, display, gindex, &x, &y );
if ( error )
status.Fail++;
else if ( X_TOO_LONG( x, size, display ) )
{
x = start_x;
y += step_y;
if ( Y_TOO_LONG( y, size, display ) )
break;
}
i++;
}
return FT_Err_Ok;
}
| 6,369 |
32,921 | 0 | static struct dentry *ext3_fh_to_dentry(struct super_block *sb, struct fid *fid,
int fh_len, int fh_type)
{
return generic_fh_to_dentry(sb, fid, fh_len, fh_type,
ext3_nfs_get_inode);
}
| 6,370 |
7,505 | 0 | simple_method_return (void)
{
DBusMessage *message;
message = dbus_message_new (DBUS_MESSAGE_TYPE_METHOD_RETURN);
if (message == NULL)
_dbus_assert_not_reached ("oom");
set_reply_serial (message);
return message;
}
| 6,371 |
36,323 | 0 | getname(const char __user * filename)
{
return getname_flags(filename, 0, NULL);
}
| 6,372 |
140,112 | 0 | void HTMLMediaElement::setVolume(double vol, ExceptionState& exceptionState) {
BLINK_MEDIA_LOG << "setVolume(" << (void*)this << ", " << vol << ")";
if (m_volume == vol)
return;
if (vol < 0.0f || vol > 1.0f) {
exceptionState.throwDOMException(
IndexSizeError,
ExceptionMessages::indexOutsideRange(
"volume", vol, 0.0, ExceptionMessages::InclusiveBound, 1.0,
ExceptionMessages::InclusiveBound));
return;
}
m_volume = vol;
if (webMediaPlayer())
webMediaPlayer()->setVolume(effectiveMediaVolume());
scheduleEvent(EventTypeNames::volumechange);
}
| 6,373 |
160,515 | 0 | void WebContentsImpl::OnAudioStateChanged(bool is_audible) {
SendPageMessage(new PageMsg_AudioStateChanged(MSG_ROUTING_NONE, is_audible));
NotifyNavigationStateChanged(INVALIDATE_TYPE_TAB);
was_ever_audible_ = was_ever_audible_ || is_audible;
if (delegate_)
delegate_->OnAudioStateChanged(this, is_audible);
}
| 6,374 |
46,777 | 0 | static bool __init sparc64_has_des_opcode(void)
{
unsigned long cfr;
if (!(sparc64_elf_hwcap & HWCAP_SPARC_CRYPTO))
return false;
__asm__ __volatile__("rd %%asr26, %0" : "=r" (cfr));
if (!(cfr & CFR_DES))
return false;
return true;
}
| 6,375 |
34,463 | 0 | int btrfs_clean_old_snapshots(struct btrfs_root *root)
{
LIST_HEAD(list);
struct btrfs_fs_info *fs_info = root->fs_info;
spin_lock(&fs_info->trans_lock);
list_splice_init(&fs_info->dead_roots, &list);
spin_unlock(&fs_info->trans_lock);
while (!list_empty(&list)) {
int ret;
root = list_entry(list.next, struct btrfs_root, root_list);
list_del(&root->root_list);
btrfs_kill_all_delayed_nodes(root);
if (btrfs_header_backref_rev(root->node) <
BTRFS_MIXED_BACKREF_REV)
ret = btrfs_drop_snapshot(root, NULL, 0, 0);
else
ret =btrfs_drop_snapshot(root, NULL, 1, 0);
BUG_ON(ret < 0);
}
return 0;
}
| 6,376 |
63,176 | 0 | void MSG_WriteShort( msg_t *sb, int c ) {
#ifdef PARANOID
if (c < ((short)0x8000) || c > (short)0x7fff)
Com_Error (ERR_FATAL, "MSG_WriteShort: range error");
#endif
MSG_WriteBits( sb, c, 16 );
}
| 6,377 |
101,199 | 0 | void SetEntrySpecifics(MutableEntry* meta_entry, SyncEntity* sync_entry) {
sync_entry->mutable_specifics()->CopyFrom(meta_entry->Get(SPECIFICS));
sync_entry->set_folder(meta_entry->Get(syncable::IS_DIR));
DCHECK(meta_entry->GetModelType() == sync_entry->GetModelType());
}
| 6,378 |
103,929 | 0 | RenderView::~RenderView() {
history_page_ids_.clear();
if (decrement_shared_popup_at_destruction_)
shared_popup_counter_->data--;
while (!file_chooser_completions_.empty()) {
if (file_chooser_completions_.front()->completion) {
file_chooser_completions_.front()->completion->didChooseFile(
WebVector<WebString>());
}
file_chooser_completions_.pop_front();
}
#if defined(OS_MACOSX)
while (!fake_plugin_window_handles_.empty()) {
DCHECK(*fake_plugin_window_handles_.begin());
DestroyFakePluginWindowHandle(*fake_plugin_window_handles_.begin());
}
#endif
#ifndef NDEBUG
ViewMap* views = g_view_map.Pointer();
for (ViewMap::iterator it = views->begin(); it != views->end(); ++it)
DCHECK_NE(this, it->second) << "Failed to call Close?";
#endif
FOR_EACH_OBSERVER(RenderViewObserver, observers_, set_render_view(NULL));
FOR_EACH_OBSERVER(RenderViewObserver, observers_, OnDestruct());
}
| 6,379 |
139,549 | 0 | static TriState StateOrderedList(LocalFrame& frame, Event*) {
return SelectionListState(frame.Selection(), olTag);
}
| 6,380 |
38,156 | 0 | find_option(name)
const char *name;
{
option_t *opt;
struct option_list *list;
int i, dowild;
for (dowild = 0; dowild <= 1; ++dowild) {
for (opt = general_options; opt->name != NULL; ++opt)
if (match_option(name, opt, dowild))
return opt;
for (opt = auth_options; opt->name != NULL; ++opt)
if (match_option(name, opt, dowild))
return opt;
for (list = extra_options; list != NULL; list = list->next)
for (opt = list->options; opt->name != NULL; ++opt)
if (match_option(name, opt, dowild))
return opt;
for (opt = the_channel->options; opt->name != NULL; ++opt)
if (match_option(name, opt, dowild))
return opt;
for (i = 0; protocols[i] != NULL; ++i)
if ((opt = protocols[i]->options) != NULL)
for (; opt->name != NULL; ++opt)
if (match_option(name, opt, dowild))
return opt;
}
return NULL;
}
| 6,381 |
109,832 | 0 | void Document::setXMLStandalone(bool standalone, ExceptionState& es)
{
if (!implementation()->hasFeature("XML", String())) {
es.throwUninformativeAndGenericDOMException(NotSupportedError);
return;
}
m_xmlStandalone = standalone ? Standalone : NotStandalone;
}
| 6,382 |
126,782 | 0 | bool BrowserView::GetSavedWindowPlacement(
gfx::Rect* bounds,
ui::WindowShowState* show_state) const {
if (!ShouldSaveOrRestoreWindowPos())
return false;
chrome::GetSavedWindowBoundsAndShowState(browser_.get(), bounds, show_state);
#if defined(USE_ASH)
if (browser_->is_type_popup() || browser_->is_type_panel()) {
if (bounds->x() == 0 && bounds->y() == 0) {
*bounds = ChromeShellDelegate::instance()->window_positioner()->
GetPopupPosition(*bounds);
}
}
#endif
if ((browser_->is_type_popup() &&
!browser_->is_devtools() && !browser_->is_app()) ||
(browser_->is_type_panel())) {
if (IsToolbarVisible()) {
bounds->set_height(
bounds->height() + toolbar_->GetPreferredSize().height());
}
gfx::Rect window_rect = frame_->non_client_view()->
GetWindowBoundsForClientBounds(*bounds);
window_rect.set_origin(bounds->origin());
if (window_rect.x() == 0 && window_rect.y() == 0) {
gfx::Size size = window_rect.size();
window_rect.set_origin(
WindowSizer::GetDefaultPopupOrigin(size,
browser_->host_desktop_type()));
}
*bounds = window_rect;
*show_state = ui::SHOW_STATE_NORMAL;
}
return true;
}
| 6,383 |
130,778 | 0 | static void enumAttrAttributeGetterCallback(v8::Local<v8::String>, const v8::PropertyCallbackInfo<v8::Value>& info)
{
TRACE_EVENT_SET_SAMPLING_STATE("Blink", "DOMGetter");
TestObjectV8Internal::enumAttrAttributeGetter(info);
TRACE_EVENT_SET_SAMPLING_STATE("V8", "V8Execution");
}
| 6,384 |
159,368 | 0 | void SetExpectedResponse(const std::string& expected_response) {
expected_response_ = expected_response;
}
| 6,385 |
176,820 | 0 | bool isJavaPackageName(const StringPiece16& str) {
if (str.empty()) {
return false;
}
size_t pieces = 0;
for (const StringPiece16& piece : tokenize(str, u'.')) {
pieces++;
if (piece.empty()) {
return false;
}
if (piece.data()[0] == u'_' || piece.data()[piece.size() - 1] == u'_') {
return false;
}
if (findNonAlphaNumericAndNotInSet(piece, u"_") != piece.end()) {
return false;
}
}
return pieces >= 1;
}
| 6,386 |
34,157 | 0 | static u32 ccid3_hc_tx_idle_rtt(struct ccid3_hc_tx_sock *hc, ktime_t now)
{
u32 delta = ktime_us_delta(now, hc->tx_t_last_win_count);
return delta / hc->tx_rtt;
}
| 6,387 |
23,593 | 0 | static void multipath_dtr(struct dm_target *ti)
{
struct multipath *m = ti->private;
flush_multipath_work(m);
free_multipath(m);
}
| 6,388 |
48,905 | 0 | static int netif_alloc_rx_queues(struct net_device *dev)
{
unsigned int i, count = dev->num_rx_queues;
struct netdev_rx_queue *rx;
size_t sz = count * sizeof(*rx);
BUG_ON(count < 1);
rx = kzalloc(sz, GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT);
if (!rx) {
rx = vzalloc(sz);
if (!rx)
return -ENOMEM;
}
dev->_rx = rx;
for (i = 0; i < count; i++)
rx[i].dev = dev;
return 0;
}
| 6,389 |
79,553 | 0 | static struct ImapCommand *cmd_new(struct ImapData *idata)
{
struct ImapCommand *cmd = NULL;
if (cmd_queue_full(idata))
{
mutt_debug(3, "IMAP command queue full\n");
return NULL;
}
cmd = idata->cmds + idata->nextcmd;
idata->nextcmd = (idata->nextcmd + 1) % idata->cmdslots;
snprintf(cmd->seq, sizeof(cmd->seq), "a%04u", idata->seqno++);
if (idata->seqno > 9999)
idata->seqno = 0;
cmd->state = IMAP_CMD_NEW;
return cmd;
}
| 6,390 |
87,634 | 0 | LIBOPENMPT_MODPLUG_API char* ModPlug_GetMessage(ModPlugFile* file)
{
if(!file) return NULL;
return file->message;
}
| 6,391 |
140,029 | 0 | bool HTMLMediaElement::loop() const {
return fastHasAttribute(loopAttr);
}
| 6,392 |
40,995 | 0 | void CMSEXPORT cmsMLUfree(cmsMLU* mlu)
{
if (mlu) {
if (mlu -> Entries) _cmsFree(mlu ->ContextID, mlu->Entries);
if (mlu -> MemPool) _cmsFree(mlu ->ContextID, mlu->MemPool);
_cmsFree(mlu ->ContextID, mlu);
}
}
| 6,393 |
110,032 | 0 | int HTMLSelectElement::nextSelectableListIndex(int startIndex) const
{
return nextValidIndex(startIndex, SkipForwards, 1);
}
| 6,394 |
7,029 | 0 | Init_Linked( TProfileList* l )
{
*l = NULL;
}
| 6,395 |
25,603 | 0 | fmov_reg_idx(struct sh_fpu_soft_struct *fregs, struct pt_regs *regs, int m,
int n)
{
if (FPSCR_SZ) {
FMOV_EXT(m);
WRITE(FRm, Rn + R0 + 4);
m++;
WRITE(FRm, Rn + R0);
} else {
WRITE(FRm, Rn + R0);
}
return 0;
}
| 6,396 |
21,706 | 0 | address_mask(struct x86_emulate_ctxt *ctxt, unsigned long reg)
{
if (ctxt->ad_bytes == sizeof(unsigned long))
return reg;
else
return reg & ad_mask(ctxt);
}
| 6,397 |
84,675 | 0 | kadm5_modify_principal(void *server_handle,
kadm5_principal_ent_t entry, long mask)
{
int ret, ret2, i;
kadm5_policy_ent_rec pol;
krb5_boolean have_pol = FALSE;
krb5_db_entry *kdb;
krb5_tl_data *tl_data_orig;
osa_princ_ent_rec adb;
kadm5_server_handle_t handle = server_handle;
CHECK_HANDLE(server_handle);
krb5_clear_error_message(handle->context);
if(entry == NULL)
return EINVAL;
if((mask & KADM5_PRINCIPAL) || (mask & KADM5_LAST_PWD_CHANGE) ||
(mask & KADM5_MOD_TIME) || (mask & KADM5_MOD_NAME) ||
(mask & KADM5_MKVNO) || (mask & KADM5_AUX_ATTRIBUTES) ||
(mask & KADM5_KEY_DATA) || (mask & KADM5_LAST_SUCCESS) ||
(mask & KADM5_LAST_FAILED))
return KADM5_BAD_MASK;
if((mask & ~ALL_PRINC_MASK))
return KADM5_BAD_MASK;
if((mask & KADM5_POLICY) && entry->policy == NULL)
return KADM5_BAD_MASK;
if((mask & KADM5_POLICY) && (mask & KADM5_POLICY_CLR))
return KADM5_BAD_MASK;
if (mask & KADM5_TL_DATA) {
tl_data_orig = entry->tl_data;
while (tl_data_orig) {
if (tl_data_orig->tl_data_type < 256)
return KADM5_BAD_TL_TYPE;
tl_data_orig = tl_data_orig->tl_data_next;
}
}
ret = kdb_get_entry(handle, entry->principal, &kdb, &adb);
if (ret)
return(ret);
/*
* This is pretty much the same as create ...
*/
if ((mask & KADM5_POLICY)) {
ret = get_policy(handle, entry->policy, &pol, &have_pol);
if (ret)
goto done;
/* set us up to use the new policy */
adb.aux_attributes |= KADM5_POLICY;
if (adb.policy)
free(adb.policy);
adb.policy = strdup(entry->policy);
}
if (have_pol) {
/* set pw_max_life based on new policy */
if (pol.pw_max_life) {
ret = krb5_dbe_lookup_last_pwd_change(handle->context, kdb,
&(kdb->pw_expiration));
if (ret)
goto done;
kdb->pw_expiration = ts_incr(kdb->pw_expiration, pol.pw_max_life);
} else {
kdb->pw_expiration = 0;
}
}
if ((mask & KADM5_POLICY_CLR) && (adb.aux_attributes & KADM5_POLICY)) {
free(adb.policy);
adb.policy = NULL;
adb.aux_attributes &= ~KADM5_POLICY;
kdb->pw_expiration = 0;
}
if ((mask & KADM5_ATTRIBUTES))
kdb->attributes = entry->attributes;
if ((mask & KADM5_MAX_LIFE))
kdb->max_life = entry->max_life;
if ((mask & KADM5_PRINC_EXPIRE_TIME))
kdb->expiration = entry->princ_expire_time;
if (mask & KADM5_PW_EXPIRATION)
kdb->pw_expiration = entry->pw_expiration;
if (mask & KADM5_MAX_RLIFE)
kdb->max_renewable_life = entry->max_renewable_life;
if((mask & KADM5_KVNO)) {
for (i = 0; i < kdb->n_key_data; i++)
kdb->key_data[i].key_data_kvno = entry->kvno;
}
if (mask & KADM5_TL_DATA) {
krb5_tl_data *tl;
/* may have to change the version number of the API. Updates the list with the given tl_data rather than over-writting */
for (tl = entry->tl_data; tl;
tl = tl->tl_data_next)
{
ret = krb5_dbe_update_tl_data(handle->context, kdb, tl);
if( ret )
{
goto done;
}
}
}
/*
* Setting entry->fail_auth_count to 0 can be used to manually unlock
* an account. It is not possible to set fail_auth_count to any other
* value using kadmin.
*/
if (mask & KADM5_FAIL_AUTH_COUNT) {
if (entry->fail_auth_count != 0) {
ret = KADM5_BAD_SERVER_PARAMS;
goto done;
}
kdb->fail_auth_count = 0;
}
/* let the mask propagate to the database provider */
kdb->mask = mask;
ret = k5_kadm5_hook_modify(handle->context, handle->hook_handles,
KADM5_HOOK_STAGE_PRECOMMIT, entry, mask);
if (ret)
goto done;
ret = kdb_put_entry(handle, kdb, &adb);
if (ret) goto done;
(void) k5_kadm5_hook_modify(handle->context, handle->hook_handles,
KADM5_HOOK_STAGE_POSTCOMMIT, entry, mask);
ret = KADM5_OK;
done:
if (have_pol) {
ret2 = kadm5_free_policy_ent(handle->lhandle, &pol);
ret = ret ? ret : ret2;
}
kdb_free_entry(handle, kdb, &adb);
return ret;
}
| 6,398 |
179,705 | 1 | int hashtable_set(hashtable_t *hashtable,
const char *key, size_t serial,
json_t *value)
{
pair_t *pair;
bucket_t *bucket;
size_t hash, index;
/* rehash if the load ratio exceeds 1 */
if(hashtable->size >= num_buckets(hashtable))
if(hashtable_do_rehash(hashtable))
return -1;
hash = hash_str(key);
index = hash % num_buckets(hashtable);
bucket = &hashtable->buckets[index];
pair = hashtable_find_pair(hashtable, bucket, key, hash);
if(pair)
{
json_decref(pair->value);
pair->value = value;
}
else
{
/* offsetof(...) returns the size of pair_t without the last,
flexible member. This way, the correct amount is
allocated. */
pair = jsonp_malloc(offsetof(pair_t, key) + strlen(key) + 1);
if(!pair)
return -1;
pair->hash = hash;
pair->serial = serial;
strcpy(pair->key, key);
pair->value = value;
list_init(&pair->list);
insert_to_bucket(hashtable, bucket, &pair->list);
hashtable->size++;
}
return 0;
}
| 6,399 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.