unique_id
int64 13
189k
| target
int64 0
1
| code
stringlengths 20
241k
| __index_level_0__
int64 0
18.9k
|
---|---|---|---|
40,972 | 0 |
wchar_t* DupWcs(cmsContext ContextID, const wchar_t* ptr)
{
if (ptr == NULL) return NULL;
return (wchar_t*) _cmsDupMem(ContextID, ptr, (mywcslen(ptr) + 1) * sizeof(wchar_t));
}
| 10,000 |
132,277 | 0 |
void RenderFrameImpl::WidgetWillClose() {
FOR_EACH_OBSERVER(RenderFrameObserver, observers_, WidgetWillClose());
}
| 10,001 |
145,315 | 0 |
void TestFeaturesNativeHandler::GetAPIFeatures(
const v8::FunctionCallbackInfo<v8::Value>& args) {
scoped_ptr<JSONFeatureProviderSource> source(
ExtensionsClient::Get()->CreateFeatureProviderSource("api"));
scoped_ptr<content::V8ValueConverter> converter(
content::V8ValueConverter::create());
args.GetReturnValue().Set(
converter->ToV8Value(&source->dictionary(), context()->v8_context()));
}
| 10,002 |
16,021 | 0 |
XmpFilePtr xmp_files_open_new(const char *path, XmpOpenFileOptions options)
{
CHECK_PTR(path, NULL);
RESET_ERROR;
try {
auto txf = std::unique_ptr<SXMPFiles>(new SXMPFiles);
txf->OpenFile(path, XMP_FT_UNKNOWN, options);
return reinterpret_cast<XmpFilePtr>(txf.release());
}
catch (const XMP_Error &e) {
set_error(e);
}
return NULL;
}
| 10,003 |
25,990 | 0 |
ctx_group_list(struct perf_event *event, struct perf_event_context *ctx)
{
if (event->attr.pinned)
return &ctx->pinned_groups;
else
return &ctx->flexible_groups;
}
| 10,004 |
36,502 | 0 |
static bool card_id_ok(struct snd_card *card, const char *id)
{
int i;
if (!snd_info_check_reserved_words(id))
return false;
for (i = 0; i < snd_ecards_limit; i++) {
if (snd_cards[i] && snd_cards[i] != card &&
!strcmp(snd_cards[i]->id, id))
return false;
}
return true;
}
| 10,005 |
113,990 | 0 |
void NotifyRefreshNetwork() {
ash::NetworkObserver* observer = tray_->network_observer();
chromeos::NetworkLibrary* crosnet =
chromeos::CrosLibrary::Get()->GetNetworkLibrary();
if (observer) {
ash::NetworkIconInfo info;
info.image = network_icon_->GetIconAndText(&info.description);
info.tray_icon_visible =
ShouldShowNetworkIconInTray(crosnet->connected_network());
observer->OnNetworkRefresh(info);
}
const Network* connected_network = crosnet->connected_network();
if (accessibility::IsSpokenFeedbackEnabled()) {
bool speak = false;
if (connected_network_ != connected_network) {
speak = true;
} else if (connected_network) {
if ((Network::IsConnectedState(state_) &&
!connected_network->connected()) ||
(Network::IsConnectingState(state_) &&
!connected_network->connecting()) ||
(Network::IsDisconnectedState(state_) &&
!connected_network->disconnected())) {
speak = true;
}
}
if (speak) {
AccessibilitySpeak(connected_network);
}
}
connected_network_ = connected_network;
if (connected_network) {
state_ = connected_network->state();
} else {
state_ = STATE_UNKNOWN;
}
}
| 10,006 |
20,186 | 0 |
int sock_no_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m,
size_t len)
{
return -EOPNOTSUPP;
}
| 10,007 |
25,448 | 0 |
unsigned long perf_instruction_pointer(struct pt_regs *regs)
{
unsigned long ip;
if (TRAP(regs) != 0xf00)
return regs->nip; /* not a PMU interrupt */
ip = mfspr(SPRN_SIAR) + perf_ip_adjust(regs);
return ip;
}
| 10,008 |
14,558 | 0 |
int BN_set_word(BIGNUM *a, BN_ULONG w)
{
bn_check_top(a);
if (bn_expand(a,(int)sizeof(BN_ULONG)*8) == NULL) return(0);
a->neg = 0;
a->d[0] = w;
a->top = (w ? 1 : 0);
bn_check_top(a);
return(1);
}
| 10,009 |
59,124 | 0 |
static int check_reg_arg(struct bpf_verifier_env *env, u32 regno,
enum reg_arg_type t)
{
struct bpf_reg_state *regs = env->cur_state->regs;
if (regno >= MAX_BPF_REG) {
verbose(env, "R%d is invalid\n", regno);
return -EINVAL;
}
if (t == SRC_OP) {
/* check whether register used as source operand can be read */
if (regs[regno].type == NOT_INIT) {
verbose(env, "R%d !read_ok\n", regno);
return -EACCES;
}
mark_reg_read(env->cur_state, regno);
} else {
/* check whether register used as dest operand can be written to */
if (regno == BPF_REG_FP) {
verbose(env, "frame pointer is read only\n");
return -EACCES;
}
regs[regno].live |= REG_LIVE_WRITTEN;
if (t == DST_OP)
mark_reg_unknown(env, regs, regno);
}
return 0;
}
| 10,010 |
68,080 | 0 |
static ut64 baddr(RBinFile *arch) {
return 0;
}
| 10,011 |
42,340 | 0 |
static int path_lookupat(int dfd, const struct filename *name,
unsigned int flags, struct nameidata *nd)
{
struct path path;
int err;
/*
* Path walking is largely split up into 2 different synchronisation
* schemes, rcu-walk and ref-walk (explained in
* Documentation/filesystems/path-lookup.txt). These share much of the
* path walk code, but some things particularly setup, cleanup, and
* following mounts are sufficiently divergent that functions are
* duplicated. Typically there is a function foo(), and its RCU
* analogue, foo_rcu().
*
* -ECHILD is the error number of choice (just to avoid clashes) that
* is returned if some aspect of an rcu-walk fails. Such an error must
* be handled by restarting a traditional ref-walk (which will always
* be able to complete).
*/
err = path_init(dfd, name, flags, nd);
if (!err && !(flags & LOOKUP_PARENT)) {
err = lookup_last(nd, &path);
while (err > 0) {
void *cookie;
struct path link = path;
err = may_follow_link(&link, nd);
if (unlikely(err))
break;
nd->flags |= LOOKUP_PARENT;
err = follow_link(&link, nd, &cookie);
if (err)
break;
err = lookup_last(nd, &path);
put_link(nd, &link, cookie);
}
}
if (!err)
err = complete_walk(nd);
if (!err && nd->flags & LOOKUP_DIRECTORY) {
if (!d_can_lookup(nd->path.dentry)) {
path_put(&nd->path);
err = -ENOTDIR;
}
}
path_cleanup(nd);
return err;
}
| 10,012 |
45,148 | 0 |
static const char *req_protocol_field(request_rec *r)
{
return r->protocol;
}
| 10,013 |
127,570 | 0 |
void DetachSharedMemory(Display* display, XSharedMemoryId shmseg) {
DCHECK(QuerySharedMemorySupport(display));
XShmSegmentInfo shminfo;
memset(&shminfo, 0, sizeof(shminfo));
shminfo.shmseg = shmseg;
if (!XShmDetach(display, &shminfo))
NOTREACHED();
}
| 10,014 |
3,713 | 0 |
_dbus_append_user_from_current_process (DBusString *str)
{
return _dbus_string_append_uint (str,
_dbus_geteuid ());
}
| 10,015 |
128,200 | 0 |
ScriptPromise Notification::requestPermission(ScriptState* scriptState, NotificationPermissionCallback* deprecatedCallback)
{
ExecutionContext* context = scriptState->executionContext();
if (NotificationPermissionClient* permissionClient = NotificationPermissionClient::from(context))
return permissionClient->requestPermission(scriptState, deprecatedCallback);
ASSERT(context->activeDOMObjectsAreStopped());
return ScriptPromise();
}
| 10,016 |
132,860 | 0 |
void PictureLayerImpl::UpdateIdealScales() {
DCHECK(CanHaveTilings());
float min_contents_scale = MinimumContentsScale();
DCHECK_GT(min_contents_scale, 0.f);
float min_page_scale = layer_tree_impl()->min_page_scale_factor();
DCHECK_GT(min_page_scale, 0.f);
float min_device_scale = 1.f;
float min_source_scale =
min_contents_scale / min_page_scale / min_device_scale;
float ideal_page_scale = draw_properties().page_scale_factor;
float ideal_device_scale = draw_properties().device_scale_factor;
float ideal_source_scale = draw_properties().ideal_contents_scale /
ideal_page_scale / ideal_device_scale;
ideal_contents_scale_ =
std::max(draw_properties().ideal_contents_scale, min_contents_scale);
ideal_page_scale_ = draw_properties().page_scale_factor;
ideal_device_scale_ = draw_properties().device_scale_factor;
ideal_source_scale_ = std::max(ideal_source_scale, min_source_scale);
}
| 10,017 |
15,495 | 0 |
read_response_body (struct http_stat *hs, int sock, FILE *fp, wgint contlen,
wgint contrange, bool chunked_transfer_encoding,
char *url, char *warc_timestamp_str, char *warc_request_uuid,
ip_address *warc_ip, char *type, int statcode, char *head)
{
int warc_payload_offset = 0;
FILE *warc_tmp = NULL;
int warcerr = 0;
int flags = 0;
if (opt.warc_filename != NULL)
{
/* Open a temporary file where we can write the response before we
add it to the WARC record. */
warc_tmp = warc_tempfile ();
if (warc_tmp == NULL)
warcerr = WARC_TMP_FOPENERR;
if (warcerr == 0)
{
/* We should keep the response headers for the WARC record. */
int head_len = strlen (head);
int warc_tmp_written = fwrite (head, 1, head_len, warc_tmp);
if (warc_tmp_written != head_len)
warcerr = WARC_TMP_FWRITEERR;
warc_payload_offset = head_len;
}
if (warcerr != 0)
{
if (warc_tmp != NULL)
fclose (warc_tmp);
return warcerr;
}
}
if (fp != NULL)
{
/* This confuses the timestamping code that checks for file size.
#### The timestamping code should be smarter about file size. */
if (opt.save_headers && hs->restval == 0)
fwrite (head, 1, strlen (head), fp);
}
/* Read the response body. */
if (contlen != -1)
/* If content-length is present, read that much; otherwise, read
until EOF. The HTTP spec doesn't require the server to
actually close the connection when it's done sending data. */
flags |= rb_read_exactly;
if (fp != NULL && hs->restval > 0 && contrange == 0)
/* If the server ignored our range request, instruct fd_read_body
to skip the first RESTVAL bytes of body. */
flags |= rb_skip_startpos;
if (chunked_transfer_encoding)
flags |= rb_chunked_transfer_encoding;
if (hs->remote_encoding == ENC_GZIP)
flags |= rb_compressed_gzip;
hs->len = hs->restval;
hs->rd_size = 0;
/* Download the response body and write it to fp.
If we are working on a WARC file, we simultaneously write the
response body to warc_tmp. */
hs->res = fd_read_body (hs->local_file, sock, fp, contlen != -1 ? contlen : 0,
hs->restval, &hs->rd_size, &hs->len, &hs->dltime,
flags, warc_tmp);
if (hs->res >= 0)
{
if (warc_tmp != NULL)
{
/* Create a response record and write it to the WARC file.
Note: per the WARC standard, the request and response should share
the same date header. We re-use the timestamp of the request.
The response record should also refer to the uuid of the request. */
bool r = warc_write_response_record (url, warc_timestamp_str,
warc_request_uuid, warc_ip,
warc_tmp, warc_payload_offset,
type, statcode, hs->newloc);
/* warc_write_response_record has closed warc_tmp. */
if (! r)
return WARC_ERR;
}
return RETRFINISHED;
}
if (warc_tmp != NULL)
fclose (warc_tmp);
if (hs->res == -2)
{
/* Error while writing to fd. */
return FWRITEERR;
}
else if (hs->res == -3)
{
/* Error while writing to warc_tmp. */
return WARC_TMP_FWRITEERR;
}
else
{
/* A read error! */
hs->rderrmsg = xstrdup (fd_errstr (sock));
return RETRFINISHED;
}
}
| 10,018 |
94,728 | 0 |
static inline int build_open_flags(int flags, umode_t mode, struct open_flags *op)
{
int lookup_flags = 0;
int acc_mode = ACC_MODE(flags);
if (flags & (O_CREAT | __O_TMPFILE))
op->mode = (mode & S_IALLUGO) | S_IFREG;
else
op->mode = 0;
/* Must never be set by userspace */
flags &= ~FMODE_NONOTIFY & ~O_CLOEXEC;
/*
* O_SYNC is implemented as __O_SYNC|O_DSYNC. As many places only
* check for O_DSYNC if the need any syncing at all we enforce it's
* always set instead of having to deal with possibly weird behaviour
* for malicious applications setting only __O_SYNC.
*/
if (flags & __O_SYNC)
flags |= O_DSYNC;
if (flags & __O_TMPFILE) {
if ((flags & O_TMPFILE_MASK) != O_TMPFILE)
return -EINVAL;
if (!(acc_mode & MAY_WRITE))
return -EINVAL;
} else if (flags & O_PATH) {
/*
* If we have O_PATH in the open flag. Then we
* cannot have anything other than the below set of flags
*/
flags &= O_DIRECTORY | O_NOFOLLOW | O_PATH;
acc_mode = 0;
}
op->open_flag = flags;
/* O_TRUNC implies we need access checks for write permissions */
if (flags & O_TRUNC)
acc_mode |= MAY_WRITE;
/* Allow the LSM permission hook to distinguish append
access from general write access. */
if (flags & O_APPEND)
acc_mode |= MAY_APPEND;
op->acc_mode = acc_mode;
op->intent = flags & O_PATH ? 0 : LOOKUP_OPEN;
if (flags & O_CREAT) {
op->intent |= LOOKUP_CREATE;
if (flags & O_EXCL)
op->intent |= LOOKUP_EXCL;
}
if (flags & O_DIRECTORY)
lookup_flags |= LOOKUP_DIRECTORY;
if (!(flags & O_NOFOLLOW))
lookup_flags |= LOOKUP_FOLLOW;
op->lookup_flags = lookup_flags;
return 0;
}
| 10,019 |
40,238 | 0 |
static int pn_backlog_rcv(struct sock *sk, struct sk_buff *skb)
{
int err = sock_queue_rcv_skb(sk, skb);
if (err < 0)
kfree_skb(skb);
return err ? NET_RX_DROP : NET_RX_SUCCESS;
}
| 10,020 |
30,117 | 0 |
void __weak arch_ftrace_update_code(int command)
{
ftrace_run_stop_machine(command);
}
| 10,021 |
68,222 | 0 |
static struct sock *llc_lookup_dgram(struct llc_sap *sap,
const struct llc_addr *laddr)
{
struct sock *rc;
struct hlist_nulls_node *node;
int slot = llc_sk_laddr_hashfn(sap, laddr);
struct hlist_nulls_head *laddr_hb = &sap->sk_laddr_hash[slot];
rcu_read_lock_bh();
again:
sk_nulls_for_each_rcu(rc, node, laddr_hb) {
if (llc_dgram_match(sap, laddr, rc)) {
/* Extra checks required by SLAB_DESTROY_BY_RCU */
if (unlikely(!atomic_inc_not_zero(&rc->sk_refcnt)))
goto again;
if (unlikely(llc_sk(rc)->sap != sap ||
!llc_dgram_match(sap, laddr, rc))) {
sock_put(rc);
continue;
}
goto found;
}
}
rc = NULL;
/*
* if the nulls value we got at the end of this lookup is
* not the expected one, we must restart lookup.
* We probably met an item that was moved to another chain.
*/
if (unlikely(get_nulls_value(node) != slot))
goto again;
found:
rcu_read_unlock_bh();
return rc;
}
| 10,022 |
188,406 | 1 |
long Cluster::HasBlockEntries(
const Segment* pSegment,
long long off, //relative to start of segment payload
long long& pos,
long& len)
{
assert(pSegment);
assert(off >= 0); //relative to segment
IMkvReader* const pReader = pSegment->m_pReader;
long long total, avail;
long status = pReader->Length(&total, &avail);
if (status < 0) //error
return status;
assert((total < 0) || (avail <= total));
pos = pSegment->m_start + off; //absolute
if ((total >= 0) && (pos >= total))
return 0; //we don't even have a complete cluster
const long long segment_stop =
(pSegment->m_size < 0) ? -1 : pSegment->m_start + pSegment->m_size;
long long cluster_stop = -1; //interpreted later to mean "unknown size"
{
if ((pos + 1) > avail)
{
len = 1;
return E_BUFFER_NOT_FULL;
}
long long result = GetUIntLength(pReader, pos, len);
if (result < 0) //error
return static_cast<long>(result);
if (result > 0) //need more data
return E_BUFFER_NOT_FULL;
if ((segment_stop >= 0) && ((pos + len) > segment_stop))
return E_FILE_FORMAT_INVALID;
if ((total >= 0) && ((pos + len) > total))
return 0;
if ((pos + len) > avail)
return E_BUFFER_NOT_FULL;
const long long id = ReadUInt(pReader, pos, len);
if (id < 0) //error
return static_cast<long>(id);
if (id != 0x0F43B675) //weird: not cluster ID
return -1; //generic error
pos += len; //consume Cluster ID field
//read size field
if ((pos + 1) > avail)
{
len = 1;
return E_BUFFER_NOT_FULL;
}
result = GetUIntLength(pReader, pos, len);
if (result < 0) //error
return static_cast<long>(result);
if (result > 0) //weird
return E_BUFFER_NOT_FULL;
if ((segment_stop >= 0) && ((pos + len) > segment_stop))
return E_FILE_FORMAT_INVALID;
if ((total >= 0) && ((pos + len) > total))
return 0;
if ((pos + len) > avail)
return E_BUFFER_NOT_FULL;
const long long size = ReadUInt(pReader, pos, len);
if (size < 0) //error
return static_cast<long>(size);
if (size == 0)
return 0; //cluster does not have entries
pos += len; //consume size field
//pos now points to start of payload
const long long unknown_size = (1LL << (7 * len)) - 1;
if (size != unknown_size)
{
cluster_stop = pos + size;
assert(cluster_stop >= 0);
if ((segment_stop >= 0) && (cluster_stop > segment_stop))
return E_FILE_FORMAT_INVALID;
if ((total >= 0) && (cluster_stop > total))
//return E_FILE_FORMAT_INVALID; //too conservative
return 0; //cluster does not have any entries
}
}
for (;;)
{
if ((cluster_stop >= 0) && (pos >= cluster_stop))
return 0; //no entries detected
if ((pos + 1) > avail)
{
len = 1;
return E_BUFFER_NOT_FULL;
}
long long result = GetUIntLength(pReader, pos, len);
if (result < 0) //error
return static_cast<long>(result);
if (result > 0) //need more data
return E_BUFFER_NOT_FULL;
if ((cluster_stop >= 0) && ((pos + len) > cluster_stop))
return E_FILE_FORMAT_INVALID;
if ((pos + len) > avail)
return E_BUFFER_NOT_FULL;
const long long id = ReadUInt(pReader, pos, len);
if (id < 0) //error
return static_cast<long>(id);
//This is the distinguished set of ID's we use to determine
//that we have exhausted the sub-element's inside the cluster
//whose ID we parsed earlier.
if (id == 0x0F43B675) //Cluster ID
return 0; //no entries found
if (id == 0x0C53BB6B) //Cues ID
return 0; //no entries found
pos += len; //consume id field
if ((cluster_stop >= 0) && (pos >= cluster_stop))
return E_FILE_FORMAT_INVALID;
//read size field
if ((pos + 1) > avail)
{
len = 1;
return E_BUFFER_NOT_FULL;
}
result = GetUIntLength(pReader, pos, len);
if (result < 0) //error
return static_cast<long>(result);
if (result > 0) //underflow
return E_BUFFER_NOT_FULL;
if ((cluster_stop >= 0) && ((pos + len) > cluster_stop))
return E_FILE_FORMAT_INVALID;
if ((pos + len) > avail)
return E_BUFFER_NOT_FULL;
const long long size = ReadUInt(pReader, pos, len);
if (size < 0) //error
return static_cast<long>(size);
pos += len; //consume size field
//pos now points to start of payload
if ((cluster_stop >= 0) && (pos > cluster_stop))
return E_FILE_FORMAT_INVALID;
if (size == 0) //weird
continue;
const long long unknown_size = (1LL << (7 * len)) - 1;
if (size == unknown_size)
return E_FILE_FORMAT_INVALID; //not supported inside cluster
if ((cluster_stop >= 0) && ((pos + size) > cluster_stop))
return E_FILE_FORMAT_INVALID;
if (id == 0x20) //BlockGroup ID
return 1; //have at least one entry
if (id == 0x23) //SimpleBlock ID
return 1; //have at least one entry
pos += size; //consume payload
assert((cluster_stop < 0) || (pos <= cluster_stop));
}
}
| 10,023 |
2,446 | 0 |
NTSTATUS smb2cli_req_compound_submit(struct tevent_req **reqs,
int num_reqs)
{
struct smbXcli_req_state *state;
struct tevent_req *subreq;
struct iovec *iov;
int i, num_iov, nbt_len;
int tf_iov = -1;
const DATA_BLOB *encryption_key = NULL;
uint64_t encryption_session_id = 0;
uint64_t nonce_high = UINT64_MAX;
uint64_t nonce_low = UINT64_MAX;
/*
* 1 for the nbt length, optional TRANSFORM
* per request: HDR, fixed, dyn, padding
* -1 because the last one does not need padding
*/
iov = talloc_array(reqs[0], struct iovec, 1 + 1 + 4*num_reqs - 1);
if (iov == NULL) {
return NT_STATUS_NO_MEMORY;
}
num_iov = 1;
nbt_len = 0;
/*
* the session of the first request that requires encryption
* specifies the encryption key.
*/
for (i=0; i<num_reqs; i++) {
if (!tevent_req_is_in_progress(reqs[i])) {
return NT_STATUS_INTERNAL_ERROR;
}
state = tevent_req_data(reqs[i], struct smbXcli_req_state);
if (!smbXcli_conn_is_connected(state->conn)) {
return NT_STATUS_CONNECTION_DISCONNECTED;
}
if ((state->conn->protocol != PROTOCOL_NONE) &&
(state->conn->protocol < PROTOCOL_SMB2_02)) {
return NT_STATUS_REVISION_MISMATCH;
}
if (state->session == NULL) {
continue;
}
if (!state->smb2.should_encrypt) {
continue;
}
encryption_key = &state->session->smb2->encryption_key;
if (encryption_key->length == 0) {
return NT_STATUS_INVALID_PARAMETER_MIX;
}
encryption_session_id = state->session->smb2->session_id;
state->session->smb2->nonce_low += 1;
if (state->session->smb2->nonce_low == 0) {
state->session->smb2->nonce_high += 1;
state->session->smb2->nonce_low += 1;
}
/*
* CCM and GCM algorithms must never have their
* nonce wrap, or the security of the whole
* communication and the keys is destroyed.
* We must drop the connection once we have
* transfered too much data.
*
* NOTE: We assume nonces greater than 8 bytes.
*/
if (state->session->smb2->nonce_high >=
state->session->smb2->nonce_high_max)
{
return NT_STATUS_ENCRYPTION_FAILED;
}
nonce_high = state->session->smb2->nonce_high_random;
nonce_high += state->session->smb2->nonce_high;
nonce_low = state->session->smb2->nonce_low;
tf_iov = num_iov;
iov[num_iov].iov_base = state->smb2.transform;
iov[num_iov].iov_len = sizeof(state->smb2.transform);
num_iov += 1;
SBVAL(state->smb2.transform, SMB2_TF_PROTOCOL_ID, SMB2_TF_MAGIC);
SBVAL(state->smb2.transform, SMB2_TF_NONCE,
nonce_low);
SBVAL(state->smb2.transform, SMB2_TF_NONCE+8,
nonce_high);
SBVAL(state->smb2.transform, SMB2_TF_SESSION_ID,
encryption_session_id);
nbt_len += SMB2_TF_HDR_SIZE;
break;
}
for (i=0; i<num_reqs; i++) {
int hdr_iov;
size_t reqlen;
bool ret;
uint16_t opcode;
uint64_t avail;
uint16_t charge;
uint16_t credits;
uint64_t mid;
const DATA_BLOB *signing_key = NULL;
if (!tevent_req_is_in_progress(reqs[i])) {
return NT_STATUS_INTERNAL_ERROR;
}
state = tevent_req_data(reqs[i], struct smbXcli_req_state);
if (!smbXcli_conn_is_connected(state->conn)) {
return NT_STATUS_CONNECTION_DISCONNECTED;
}
if ((state->conn->protocol != PROTOCOL_NONE) &&
(state->conn->protocol < PROTOCOL_SMB2_02)) {
return NT_STATUS_REVISION_MISMATCH;
}
opcode = SVAL(state->smb2.hdr, SMB2_HDR_OPCODE);
if (opcode == SMB2_OP_CANCEL) {
goto skip_credits;
}
avail = UINT64_MAX - state->conn->smb2.mid;
if (avail < 1) {
return NT_STATUS_CONNECTION_ABORTED;
}
if (state->conn->smb2.server.capabilities & SMB2_CAP_LARGE_MTU) {
uint32_t max_dyn_len = 1;
max_dyn_len = MAX(max_dyn_len, state->smb2.dyn_len);
max_dyn_len = MAX(max_dyn_len, state->smb2.max_dyn_len);
charge = (max_dyn_len - 1)/ 65536 + 1;
} else {
charge = 1;
}
charge = MAX(state->smb2.credit_charge, charge);
avail = MIN(avail, state->conn->smb2.cur_credits);
if (avail < charge) {
return NT_STATUS_INTERNAL_ERROR;
}
credits = 0;
if (state->conn->smb2.max_credits > state->conn->smb2.cur_credits) {
credits = state->conn->smb2.max_credits -
state->conn->smb2.cur_credits;
}
if (state->conn->smb2.max_credits >= state->conn->smb2.cur_credits) {
credits += 1;
}
mid = state->conn->smb2.mid;
state->conn->smb2.mid += charge;
state->conn->smb2.cur_credits -= charge;
if (state->conn->smb2.server.capabilities & SMB2_CAP_LARGE_MTU) {
SSVAL(state->smb2.hdr, SMB2_HDR_CREDIT_CHARGE, charge);
}
SSVAL(state->smb2.hdr, SMB2_HDR_CREDIT, credits);
SBVAL(state->smb2.hdr, SMB2_HDR_MESSAGE_ID, mid);
state->smb2.cancel_flags = 0;
state->smb2.cancel_mid = mid;
state->smb2.cancel_aid = 0;
skip_credits:
if (state->session && encryption_key == NULL) {
/*
* We prefer the channel signing key if it is
* already there.
*/
if (state->smb2.should_sign) {
signing_key = &state->session->smb2_channel.signing_key;
}
/*
* If it is a channel binding, we already have the main
* signing key and try that one.
*/
if (signing_key && signing_key->length == 0) {
signing_key = &state->session->smb2->signing_key;
}
/*
* If we do not have any session key yet, we skip the
* signing of SMB2_OP_SESSSETUP requests.
*/
if (signing_key && signing_key->length == 0) {
signing_key = NULL;
}
}
hdr_iov = num_iov;
iov[num_iov].iov_base = state->smb2.hdr;
iov[num_iov].iov_len = sizeof(state->smb2.hdr);
num_iov += 1;
iov[num_iov].iov_base = discard_const(state->smb2.fixed);
iov[num_iov].iov_len = state->smb2.fixed_len;
num_iov += 1;
if (state->smb2.dyn != NULL) {
iov[num_iov].iov_base = discard_const(state->smb2.dyn);
iov[num_iov].iov_len = state->smb2.dyn_len;
num_iov += 1;
}
reqlen = sizeof(state->smb2.hdr);
reqlen += state->smb2.fixed_len;
reqlen += state->smb2.dyn_len;
if (i < num_reqs-1) {
if ((reqlen % 8) > 0) {
uint8_t pad = 8 - (reqlen % 8);
iov[num_iov].iov_base = state->smb2.pad;
iov[num_iov].iov_len = pad;
num_iov += 1;
reqlen += pad;
}
SIVAL(state->smb2.hdr, SMB2_HDR_NEXT_COMMAND, reqlen);
}
state->smb2.encryption_session_id = encryption_session_id;
if (signing_key != NULL) {
NTSTATUS status;
status = smb2_signing_sign_pdu(*signing_key,
state->session->conn->protocol,
&iov[hdr_iov], num_iov - hdr_iov);
if (!NT_STATUS_IS_OK(status)) {
return status;
}
}
nbt_len += reqlen;
ret = smbXcli_req_set_pending(reqs[i]);
if (!ret) {
return NT_STATUS_NO_MEMORY;
}
}
state = tevent_req_data(reqs[0], struct smbXcli_req_state);
_smb_setlen_tcp(state->length_hdr, nbt_len);
iov[0].iov_base = state->length_hdr;
iov[0].iov_len = sizeof(state->length_hdr);
if (encryption_key != NULL) {
NTSTATUS status;
size_t buflen = nbt_len - SMB2_TF_HDR_SIZE;
uint8_t *buf;
int vi;
buf = talloc_array(iov, uint8_t, buflen);
if (buf == NULL) {
return NT_STATUS_NO_MEMORY;
}
/*
* We copy the buffers before encrypting them,
* this is at least currently needed for the
* to keep state->smb2.hdr.
*
* Also the callers may expect there buffers
* to be const.
*/
for (vi = tf_iov + 1; vi < num_iov; vi++) {
struct iovec *v = &iov[vi];
const uint8_t *o = (const uint8_t *)v->iov_base;
memcpy(buf, o, v->iov_len);
v->iov_base = (void *)buf;
buf += v->iov_len;
}
status = smb2_signing_encrypt_pdu(*encryption_key,
state->conn->smb2.server.cipher,
&iov[tf_iov], num_iov - tf_iov);
if (!NT_STATUS_IS_OK(status)) {
return status;
}
}
if (state->conn->dispatch_incoming == NULL) {
state->conn->dispatch_incoming = smb2cli_conn_dispatch_incoming;
}
subreq = writev_send(state, state->ev, state->conn->outgoing,
state->conn->sock_fd, false, iov, num_iov);
if (subreq == NULL) {
return NT_STATUS_NO_MEMORY;
}
tevent_req_set_callback(subreq, smb2cli_req_writev_done, reqs[0]);
state->write_req = subreq;
return NT_STATUS_OK;
}
| 10,024 |
8,660 | 0 |
size_t mptsas_config_phy_0(MPTSASState *s, uint8_t **data, int address)
{
int phy_handle = -1;
int dev_handle = -1;
int i = mptsas_phy_addr_get(s, address);
SCSIDevice *dev;
if (i < 0) {
trace_mptsas_config_sas_phy(s, address, i, phy_handle, dev_handle, 0);
return i;
}
dev = mptsas_phy_get_device(s, i, &phy_handle, &dev_handle);
trace_mptsas_config_sas_phy(s, address, i, phy_handle, dev_handle, 0);
return MPTSAS_CONFIG_PACK_EXT(0, MPI_CONFIG_EXTPAGETYPE_SAS_PHY, 0x01,
"w*wqwb*blbb*b*b*l",
dev_handle, s->sas_addr, dev_handle, i,
(dev
? MPI_SAS_DEVICE_INFO_END_DEVICE /* | MPI_SAS_DEVICE_INFO_SSP_TARGET?? */
: MPI_SAS_DEVICE_INFO_NO_DEVICE),
(MPI_SAS_IOUNIT0_RATE_3_0 << 4) | MPI_SAS_IOUNIT0_RATE_1_5,
(MPI_SAS_IOUNIT0_RATE_3_0 << 4) | MPI_SAS_IOUNIT0_RATE_1_5);
}
| 10,025 |
75,679 | 0 |
hfs_cat_read_file_folder_record(HFS_INFO * hfs, TSK_OFF_T off,
hfs_file_folder * record)
{
TSK_FS_INFO *fs = (TSK_FS_INFO *) & (hfs->fs_info);
ssize_t cnt;
char rec_type[2];
memset(record, 0, sizeof(hfs_file_folder));
cnt = tsk_fs_attr_read(hfs->catalog_attr, off, rec_type, 2, 0);
if (cnt != 2) {
if (cnt >= 0) {
tsk_error_reset();
tsk_error_set_errno(TSK_ERR_FS_READ);
}
tsk_error_set_errstr2
("hfs_cat_read_file_folder_record: Error reading record type from catalog offset %"
PRIuOFF " (header)", off);
return 1;
}
if (tsk_getu16(fs->endian, rec_type) == HFS_FOLDER_RECORD) {
cnt =
tsk_fs_attr_read(hfs->catalog_attr, off, (char *) record,
sizeof(hfs_folder), 0);
if (cnt != sizeof(hfs_folder)) {
if (cnt >= 0) {
tsk_error_reset();
tsk_error_set_errno(TSK_ERR_FS_READ);
}
tsk_error_set_errstr2
("hfs_cat_read_file_folder_record: Error reading catalog offset %"
PRIuOFF " (folder)", off);
return 1;
}
}
else if (tsk_getu16(fs->endian, rec_type) == HFS_FILE_RECORD) {
cnt =
tsk_fs_attr_read(hfs->catalog_attr, off, (char *) record,
sizeof(hfs_file), 0);
if (cnt != sizeof(hfs_file)) {
if (cnt >= 0) {
tsk_error_reset();
tsk_error_set_errno(TSK_ERR_FS_READ);
}
tsk_error_set_errstr2
("hfs_cat_read_file_folder_record: Error reading catalog offset %"
PRIuOFF " (file)", off);
return 1;
}
}
else {
tsk_error_set_errno(TSK_ERR_FS_GENFS);
tsk_error_set_errstr
("hfs_cat_read_file_folder_record: unexpected record type %"
PRIu16, tsk_getu16(fs->endian, rec_type));
return 1;
}
return 0;
}
| 10,026 |
168,943 | 0 |
DevToolsAgentHostImpl::~DevToolsAgentHostImpl() {
DCHECK_CURRENTLY_ON(BrowserThread::UI);
NotifyDestroyed();
}
| 10,027 |
60,408 | 0 |
static void __fanout_unlink(struct sock *sk, struct packet_sock *po)
{
struct packet_fanout *f = po->fanout;
int i;
spin_lock(&f->lock);
for (i = 0; i < f->num_members; i++) {
if (f->arr[i] == sk)
break;
}
BUG_ON(i >= f->num_members);
f->arr[i] = f->arr[f->num_members - 1];
f->num_members--;
if (f->num_members == 0)
__dev_remove_pack(&f->prot_hook);
spin_unlock(&f->lock);
}
| 10,028 |
104,936 | 0 |
::ppapi::TrackerBase* GetTrackerBase() {
return ResourceTracker::Get();
}
| 10,029 |
45,539 | 0 |
static void authenc_esn_verify_ahash_update_done(struct crypto_async_request *areq,
int err)
{
u8 *ihash;
unsigned int authsize;
struct ablkcipher_request *abreq;
struct aead_request *req = areq->data;
struct crypto_aead *authenc_esn = crypto_aead_reqtfm(req);
struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(authenc_esn);
struct authenc_esn_request_ctx *areq_ctx = aead_request_ctx(req);
struct ahash_request *ahreq = (void *)(areq_ctx->tail + ctx->reqoff);
unsigned int cryptlen = req->cryptlen;
if (err)
goto out;
ahash_request_set_crypt(ahreq, areq_ctx->sg, ahreq->result,
areq_ctx->cryptlen);
ahash_request_set_callback(ahreq,
aead_request_flags(req) &
CRYPTO_TFM_REQ_MAY_SLEEP,
areq_ctx->update_complete2, req);
err = crypto_ahash_update(ahreq);
if (err)
goto out;
ahash_request_set_crypt(ahreq, areq_ctx->tsg, ahreq->result,
areq_ctx->trailen);
ahash_request_set_callback(ahreq, aead_request_flags(req) &
CRYPTO_TFM_REQ_MAY_SLEEP,
areq_ctx->complete, req);
err = crypto_ahash_finup(ahreq);
if (err)
goto out;
authsize = crypto_aead_authsize(authenc_esn);
cryptlen -= authsize;
ihash = ahreq->result + authsize;
scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen,
authsize, 0);
err = crypto_memneq(ihash, ahreq->result, authsize) ? -EBADMSG : 0;
if (err)
goto out;
abreq = aead_request_ctx(req);
ablkcipher_request_set_tfm(abreq, ctx->enc);
ablkcipher_request_set_callback(abreq, aead_request_flags(req),
req->base.complete, req->base.data);
ablkcipher_request_set_crypt(abreq, req->src, req->dst,
cryptlen, req->iv);
err = crypto_ablkcipher_decrypt(abreq);
out:
authenc_esn_request_complete(req, err);
}
| 10,030 |
177,619 | 0 |
int num_buffers() const { return num_buffers_; }
| 10,031 |
30,734 | 0 |
static int rfcomm_sock_create(struct net *net, struct socket *sock,
int protocol, int kern)
{
struct sock *sk;
BT_DBG("sock %p", sock);
sock->state = SS_UNCONNECTED;
if (sock->type != SOCK_STREAM && sock->type != SOCK_RAW)
return -ESOCKTNOSUPPORT;
sock->ops = &rfcomm_sock_ops;
sk = rfcomm_sock_alloc(net, sock, protocol, GFP_ATOMIC);
if (!sk)
return -ENOMEM;
rfcomm_sock_init(sk, NULL);
return 0;
}
| 10,032 |
14,808 | 0 |
ftp_rename(ftpbuf_t *ftp, const char *src, const char *dest)
{
if (ftp == NULL) {
return 0;
}
if (!ftp_putcmd(ftp, "RNFR", src)) {
return 0;
}
if (!ftp_getresp(ftp) || ftp->resp != 350) {
return 0;
}
if (!ftp_putcmd(ftp, "RNTO", dest)) {
return 0;
}
if (!ftp_getresp(ftp) || ftp->resp != 250) {
return 0;
}
return 1;
}
| 10,033 |
64,563 | 0 |
int _yr_re_fiber_split(
RE_FIBER_LIST* fiber_list,
RE_FIBER_POOL* fiber_pool,
RE_FIBER* fiber,
RE_FIBER** new_fiber)
{
int32_t i;
FAIL_ON_ERROR(_yr_re_fiber_create(fiber_pool, new_fiber));
(*new_fiber)->sp = fiber->sp;
(*new_fiber)->ip = fiber->ip;
(*new_fiber)->rc = fiber->rc;
for (i = 0; i <= fiber->sp; i++)
(*new_fiber)->stack[i] = fiber->stack[i];
(*new_fiber)->next = fiber->next;
(*new_fiber)->prev = fiber;
if (fiber->next != NULL)
fiber->next->prev = *new_fiber;
fiber->next = *new_fiber;
if (fiber_list->tail == fiber)
fiber_list->tail = *new_fiber;
assert(fiber_list->tail->next == NULL);
assert(fiber_list->head->prev == NULL);
return ERROR_SUCCESS;
}
| 10,034 |
174,656 | 0 |
void HeapCache::free_heap(const wp<IBinder>& binder)
{
sp<IMemoryHeap> rel;
{
Mutex::Autolock _l(mHeapCacheLock);
ssize_t i = mHeapCache.indexOfKey(binder);
if (i>=0) {
heap_info_t& info(mHeapCache.editValueAt(i));
int32_t c = android_atomic_dec(&info.count);
if (c == 1) {
ALOGD_IF(VERBOSE,
"removing binder=%p, heap=%p, size=%zu, fd=%d, count=%d",
binder.unsafe_get(), info.heap.get(),
static_cast<BpMemoryHeap*>(info.heap.get())->mSize,
static_cast<BpMemoryHeap*>(info.heap.get())->mHeapId,
info.count);
rel = mHeapCache.valueAt(i).heap;
mHeapCache.removeItemsAt(i);
}
} else {
ALOGE("free_heap binder=%p not found!!!", binder.unsafe_get());
}
}
}
| 10,035 |
7,660 | 0 |
static int v9fs_receive_response(V9fsProxy *proxy, int type,
int *status, void *response)
{
int retval;
ProxyHeader header;
struct iovec *reply = &proxy->in_iovec;
*status = 0;
reply->iov_len = 0;
retval = socket_read(proxy->sockfd, reply->iov_base, PROXY_HDR_SZ);
if (retval < 0) {
return retval;
}
reply->iov_len = PROXY_HDR_SZ;
proxy_unmarshal(reply, 0, "dd", &header.type, &header.size);
/*
* if response size > PROXY_MAX_IO_SZ, read the response but ignore it and
* return -ENOBUFS
*/
if (header.size > PROXY_MAX_IO_SZ) {
int count;
while (header.size > 0) {
count = MIN(PROXY_MAX_IO_SZ, header.size);
count = socket_read(proxy->sockfd, reply->iov_base, count);
if (count < 0) {
return count;
}
header.size -= count;
}
*status = -ENOBUFS;
return 0;
}
retval = socket_read(proxy->sockfd,
reply->iov_base + PROXY_HDR_SZ, header.size);
if (retval < 0) {
return retval;
}
reply->iov_len += header.size;
/* there was an error during processing request */
if (header.type == T_ERROR) {
int ret;
ret = proxy_unmarshal(reply, PROXY_HDR_SZ, "d", status);
if (ret < 0) {
*status = ret;
}
return 0;
}
switch (type) {
case T_LSTAT: {
ProxyStat prstat;
retval = proxy_unmarshal(reply, PROXY_HDR_SZ,
"qqqdddqqqqqqqqqq", &prstat.st_dev,
&prstat.st_ino, &prstat.st_nlink,
&prstat.st_mode, &prstat.st_uid,
&prstat.st_gid, &prstat.st_rdev,
&prstat.st_size, &prstat.st_blksize,
&prstat.st_blocks,
&prstat.st_atim_sec, &prstat.st_atim_nsec,
&prstat.st_mtim_sec, &prstat.st_mtim_nsec,
&prstat.st_ctim_sec, &prstat.st_ctim_nsec);
prstat_to_stat(response, &prstat);
break;
}
case T_STATFS: {
ProxyStatFS prstfs;
retval = proxy_unmarshal(reply, PROXY_HDR_SZ,
"qqqqqqqqqqq", &prstfs.f_type,
&prstfs.f_bsize, &prstfs.f_blocks,
&prstfs.f_bfree, &prstfs.f_bavail,
&prstfs.f_files, &prstfs.f_ffree,
&prstfs.f_fsid[0], &prstfs.f_fsid[1],
&prstfs.f_namelen, &prstfs.f_frsize);
prstatfs_to_statfs(response, &prstfs);
break;
}
case T_READLINK: {
V9fsString target;
v9fs_string_init(&target);
retval = proxy_unmarshal(reply, PROXY_HDR_SZ, "s", &target);
strcpy(response, target.data);
v9fs_string_free(&target);
break;
}
case T_LGETXATTR:
case T_LLISTXATTR: {
V9fsString xattr;
v9fs_string_init(&xattr);
retval = proxy_unmarshal(reply, PROXY_HDR_SZ, "s", &xattr);
memcpy(response, xattr.data, xattr.size);
v9fs_string_free(&xattr);
break;
}
case T_GETVERSION:
proxy_unmarshal(reply, PROXY_HDR_SZ, "q", response);
break;
default:
return -1;
}
if (retval < 0) {
*status = retval;
}
return 0;
}
| 10,036 |
22,285 | 0 |
SYSCALL_DEFINE1(sched_getscheduler, pid_t, pid)
{
struct task_struct *p;
int retval;
if (pid < 0)
return -EINVAL;
retval = -ESRCH;
rcu_read_lock();
p = find_process_by_pid(pid);
if (p) {
retval = security_task_getscheduler(p);
if (!retval)
retval = p->policy
| (p->sched_reset_on_fork ? SCHED_RESET_ON_FORK : 0);
}
rcu_read_unlock();
return retval;
}
| 10,037 |
96,763 | 0 |
prepare_decryption( RIJNDAEL_context *ctx )
{
int r;
if (0)
;
#ifdef USE_AESNI
else if (ctx->use_aesni)
{
_gcry_aes_aesni_prepare_decryption (ctx);
}
#endif /*USE_AESNI*/
#ifdef USE_SSSE3
else if (ctx->use_ssse3)
{
_gcry_aes_ssse3_prepare_decryption (ctx);
}
#endif /*USE_SSSE3*/
#ifdef USE_ARM_CE
else if (ctx->use_arm_ce)
{
_gcry_aes_armv8_ce_prepare_decryption (ctx);
}
#endif /*USE_SSSE3*/
#ifdef USE_PADLOCK
else if (ctx->use_padlock)
{
/* Padlock does not need decryption subkeys. */
}
#endif /*USE_PADLOCK*/
else
{
const byte *sbox = ((const byte *)encT) + 1;
prefetch_enc();
prefetch_dec();
ctx->keyschdec32[0][0] = ctx->keyschenc32[0][0];
ctx->keyschdec32[0][1] = ctx->keyschenc32[0][1];
ctx->keyschdec32[0][2] = ctx->keyschenc32[0][2];
ctx->keyschdec32[0][3] = ctx->keyschenc32[0][3];
for (r = 1; r < ctx->rounds; r++)
{
u32 *wi = ctx->keyschenc32[r];
u32 *wo = ctx->keyschdec32[r];
u32 wt;
wt = wi[0];
wo[0] = rol(decT[sbox[(byte)(wt >> 0) * 4]], 8 * 0)
^ rol(decT[sbox[(byte)(wt >> 8) * 4]], 8 * 1)
^ rol(decT[sbox[(byte)(wt >> 16) * 4]], 8 * 2)
^ rol(decT[sbox[(byte)(wt >> 24) * 4]], 8 * 3);
wt = wi[1];
wo[1] = rol(decT[sbox[(byte)(wt >> 0) * 4]], 8 * 0)
^ rol(decT[sbox[(byte)(wt >> 8) * 4]], 8 * 1)
^ rol(decT[sbox[(byte)(wt >> 16) * 4]], 8 * 2)
^ rol(decT[sbox[(byte)(wt >> 24) * 4]], 8 * 3);
wt = wi[2];
wo[2] = rol(decT[sbox[(byte)(wt >> 0) * 4]], 8 * 0)
^ rol(decT[sbox[(byte)(wt >> 8) * 4]], 8 * 1)
^ rol(decT[sbox[(byte)(wt >> 16) * 4]], 8 * 2)
^ rol(decT[sbox[(byte)(wt >> 24) * 4]], 8 * 3);
wt = wi[3];
wo[3] = rol(decT[sbox[(byte)(wt >> 0) * 4]], 8 * 0)
^ rol(decT[sbox[(byte)(wt >> 8) * 4]], 8 * 1)
^ rol(decT[sbox[(byte)(wt >> 16) * 4]], 8 * 2)
^ rol(decT[sbox[(byte)(wt >> 24) * 4]], 8 * 3);
}
ctx->keyschdec32[r][0] = ctx->keyschenc32[r][0];
ctx->keyschdec32[r][1] = ctx->keyschenc32[r][1];
ctx->keyschdec32[r][2] = ctx->keyschenc32[r][2];
ctx->keyschdec32[r][3] = ctx->keyschenc32[r][3];
}
}
| 10,038 |
88,625 | 0 |
int mwifiex_uap_prepare_cmd(struct mwifiex_private *priv, u16 cmd_no,
u16 cmd_action, u32 type,
void *data_buf, void *cmd_buf)
{
struct host_cmd_ds_command *cmd = cmd_buf;
switch (cmd_no) {
case HostCmd_CMD_UAP_SYS_CONFIG:
if (mwifiex_cmd_uap_sys_config(cmd, cmd_action, type, data_buf))
return -1;
break;
case HostCmd_CMD_UAP_BSS_START:
case HostCmd_CMD_UAP_BSS_STOP:
case HOST_CMD_APCMD_SYS_RESET:
case HOST_CMD_APCMD_STA_LIST:
cmd->command = cpu_to_le16(cmd_no);
cmd->size = cpu_to_le16(S_DS_GEN);
break;
case HostCmd_CMD_UAP_STA_DEAUTH:
if (mwifiex_cmd_uap_sta_deauth(priv, cmd, data_buf))
return -1;
break;
case HostCmd_CMD_CHAN_REPORT_REQUEST:
if (mwifiex_cmd_issue_chan_report_request(priv, cmd_buf,
data_buf))
return -1;
break;
default:
mwifiex_dbg(priv->adapter, ERROR,
"PREP_CMD: unknown cmd %#x\n", cmd_no);
return -1;
}
return 0;
}
| 10,039 |
102,501 | 0 |
LayerTreeCoordinator::~LayerTreeCoordinator()
{
HashSet<WebCore::CoordinatedGraphicsLayer*> registeredLayers;
registeredLayers.swap(m_registeredLayers);
HashSet<WebCore::CoordinatedGraphicsLayer*>::iterator end = registeredLayers.end();
for (HashSet<WebCore::CoordinatedGraphicsLayer*>::iterator it = registeredLayers.begin(); it != end; ++it)
(*it)->setCoordinatedGraphicsLayerClient(0);
}
| 10,040 |
62,438 | 0 |
print_32bits_val(netdissect_options *ndo, const uint32_t *dat)
{
ND_PRINT((ndo, "%lu", (u_long)EXTRACT_32BITS(dat)));
}
| 10,041 |
87,919 | 0 |
static int fm10k_clean_rx_irq(struct fm10k_q_vector *q_vector,
struct fm10k_ring *rx_ring,
int budget)
{
struct sk_buff *skb = rx_ring->skb;
unsigned int total_bytes = 0, total_packets = 0;
u16 cleaned_count = fm10k_desc_unused(rx_ring);
while (likely(total_packets < budget)) {
union fm10k_rx_desc *rx_desc;
/* return some buffers to hardware, one at a time is too slow */
if (cleaned_count >= FM10K_RX_BUFFER_WRITE) {
fm10k_alloc_rx_buffers(rx_ring, cleaned_count);
cleaned_count = 0;
}
rx_desc = FM10K_RX_DESC(rx_ring, rx_ring->next_to_clean);
if (!rx_desc->d.staterr)
break;
/* This memory barrier is needed to keep us from reading
* any other fields out of the rx_desc until we know the
* descriptor has been written back
*/
dma_rmb();
/* retrieve a buffer from the ring */
skb = fm10k_fetch_rx_buffer(rx_ring, rx_desc, skb);
/* exit if we failed to retrieve a buffer */
if (!skb)
break;
cleaned_count++;
/* fetch next buffer in frame if non-eop */
if (fm10k_is_non_eop(rx_ring, rx_desc))
continue;
/* verify the packet layout is correct */
if (fm10k_cleanup_headers(rx_ring, rx_desc, skb)) {
skb = NULL;
continue;
}
/* populate checksum, timestamp, VLAN, and protocol */
total_bytes += fm10k_process_skb_fields(rx_ring, rx_desc, skb);
fm10k_receive_skb(q_vector, skb);
/* reset skb pointer */
skb = NULL;
/* update budget accounting */
total_packets++;
}
/* place incomplete frames back on ring for completion */
rx_ring->skb = skb;
u64_stats_update_begin(&rx_ring->syncp);
rx_ring->stats.packets += total_packets;
rx_ring->stats.bytes += total_bytes;
u64_stats_update_end(&rx_ring->syncp);
q_vector->rx.total_packets += total_packets;
q_vector->rx.total_bytes += total_bytes;
return total_packets;
}
| 10,042 |
8,680 | 0 |
append_uri_pathel (const char *b, const char *e, bool escaped,
struct growable *dest)
{
const char *p;
int quoted, outlen;
int mask;
if (opt.restrict_files_os == restrict_unix)
mask = filechr_not_unix;
else if (opt.restrict_files_os == restrict_vms)
mask = filechr_not_vms;
else
mask = filechr_not_windows;
if (opt.restrict_files_ctrl)
mask |= filechr_control;
/* Copy [b, e) to PATHEL and URL-unescape it. */
if (escaped)
{
char *unescaped;
BOUNDED_TO_ALLOCA (b, e, unescaped);
url_unescape (unescaped);
b = unescaped;
e = unescaped + strlen (unescaped);
}
/* Defang ".." when found as component of path. Remember that path
comes from the URL and might contain malicious input. */
if (e - b == 2 && b[0] == '.' && b[1] == '.')
{
b = "%2E%2E";
e = b + 6;
}
/* Walk the PATHEL string and check how many characters we'll need
to quote. */
quoted = 0;
for (p = b; p < e; p++)
if (FILE_CHAR_TEST (*p, mask))
++quoted;
/* Calculate the length of the output string. e-b is the input
string length. Each quoted char introduces two additional
characters in the string, hence 2*quoted. */
outlen = (e - b) + (2 * quoted);
GROW (dest, outlen);
if (!quoted)
{
/* If there's nothing to quote, we can simply append the string
without processing it again. */
memcpy (TAIL (dest), b, outlen);
}
else
{
char *q = TAIL (dest);
for (p = b; p < e; p++)
{
if (!FILE_CHAR_TEST (*p, mask))
*q++ = *p;
else
{
unsigned char ch = *p;
*q++ = '%';
*q++ = XNUM_TO_DIGIT (ch >> 4);
*q++ = XNUM_TO_DIGIT (ch & 0xf);
}
}
assert (q - TAIL (dest) == outlen);
}
/* Perform inline case transformation if required. */
if (opt.restrict_files_case == restrict_lowercase
|| opt.restrict_files_case == restrict_uppercase)
{
char *q;
for (q = TAIL (dest); q < TAIL (dest) + outlen; ++q)
{
if (opt.restrict_files_case == restrict_lowercase)
*q = c_tolower (*q);
else
*q = c_toupper (*q);
}
}
TAIL_INCR (dest, outlen);
append_null (dest);
}
| 10,043 |
130,145 | 0 |
bool GenerateStateFromBeaconAndAttemptCount(HKEY* key, DWORD blacklist_state) {
LONG result = 0;
if (blacklist_state == blacklist::BLACKLIST_ENABLED) {
return (SetDWValue(key,
blacklist::kBeaconAttemptCount,
static_cast<DWORD>(0)) == ERROR_SUCCESS);
} else {
DWORD attempt_count = 0;
DWORD attempt_count_size = sizeof(attempt_count);
result = ::RegQueryValueEx(*key,
blacklist::kBeaconAttemptCount,
0,
NULL,
reinterpret_cast<LPBYTE>(&attempt_count),
&attempt_count_size);
if (result == ERROR_FILE_NOT_FOUND)
attempt_count = 0;
else if (result != ERROR_SUCCESS)
return false;
++attempt_count;
SetDWValue(key, blacklist::kBeaconAttemptCount, attempt_count);
if (attempt_count >= blacklist::kBeaconMaxAttempts) {
blacklist_state = blacklist::BLACKLIST_SETUP_FAILED;
SetDWValue(key, blacklist::kBeaconState, blacklist_state);
}
return false;
}
}
| 10,044 |
58,092 | 0 |
static int snd_compr_start(struct snd_compr_stream *stream)
{
int retval;
if (stream->runtime->state != SNDRV_PCM_STATE_PREPARED)
return -EPERM;
retval = stream->ops->trigger(stream, SNDRV_PCM_TRIGGER_START);
if (!retval)
stream->runtime->state = SNDRV_PCM_STATE_RUNNING;
return retval;
}
| 10,045 |
123,093 | 0 |
int RenderWidgetHostViewAndroid::GetNativeImeAdapter() {
return reinterpret_cast<int>(&ime_adapter_android_);
}
| 10,046 |
174,094 | 0 |
virtual void allocateBuffers(bool async, uint32_t width, uint32_t height,
PixelFormat format, uint32_t usage) {
Parcel data, reply;
data.writeInterfaceToken(IGraphicBufferProducer::getInterfaceDescriptor());
data.writeInt32(static_cast<int32_t>(async));
data.writeUint32(width);
data.writeUint32(height);
data.writeInt32(static_cast<int32_t>(format));
data.writeUint32(usage);
status_t result = remote()->transact(ALLOCATE_BUFFERS, data, &reply);
if (result != NO_ERROR) {
ALOGE("allocateBuffers failed to transact: %d", result);
}
}
| 10,047 |
82,961 | 0 |
static int bin_mem(RCore *r, int mode) {
RList *mem = NULL;
if (!r) return false;
if (!IS_MODE_JSON(mode)) {
if (!(IS_MODE_RAD (mode) || IS_MODE_SET (mode))) {
r_cons_println ("[Memory]\n");
}
}
if (!(mem = r_bin_get_mem (r->bin))) {
if (IS_MODE_JSON (mode)) {
r_cons_print("[]");
}
return false;
}
if (IS_MODE_JSON (mode)) {
r_cons_print ("[");
bin_mem_print (mem, 7, 0, R_CORE_BIN_JSON);
r_cons_println ("]");
return true;
} else if (!(IS_MODE_RAD (mode) || IS_MODE_SET (mode))) {
bin_mem_print (mem, 7, 0, mode);
}
return true;
}
| 10,048 |
129,052 | 0 |
void writeUint32(uint32_t value)
{
append(Uint32Tag);
doWriteUint32(value);
}
| 10,049 |
86,696 | 0 |
static void blk_mq_free_bitmap(struct blk_mq_ctxmap *bitmap)
{
kfree(bitmap->map);
}
| 10,050 |
135,765 | 0 |
int InputMethodController::TextInputFlags() const {
Element* element = GetDocument().FocusedElement();
if (!element)
return kWebTextInputFlagNone;
int flags = 0;
const AtomicString& autocomplete =
element->getAttribute(HTMLNames::autocompleteAttr);
if (autocomplete == "on")
flags |= kWebTextInputFlagAutocompleteOn;
else if (autocomplete == "off")
flags |= kWebTextInputFlagAutocompleteOff;
const AtomicString& autocorrect =
element->getAttribute(HTMLNames::autocorrectAttr);
if (autocorrect == "on")
flags |= kWebTextInputFlagAutocorrectOn;
else if (autocorrect == "off")
flags |= kWebTextInputFlagAutocorrectOff;
SpellcheckAttributeState spellcheck = element->GetSpellcheckAttributeState();
if (spellcheck == kSpellcheckAttributeTrue)
flags |= kWebTextInputFlagSpellcheckOn;
else if (spellcheck == kSpellcheckAttributeFalse)
flags |= kWebTextInputFlagSpellcheckOff;
if (IsTextControlElement(element)) {
TextControlElement* text_control = ToTextControlElement(element);
if (text_control->SupportsAutocapitalize()) {
DEFINE_STATIC_LOCAL(const AtomicString, none, ("none"));
DEFINE_STATIC_LOCAL(const AtomicString, characters, ("characters"));
DEFINE_STATIC_LOCAL(const AtomicString, words, ("words"));
DEFINE_STATIC_LOCAL(const AtomicString, sentences, ("sentences"));
const AtomicString& autocapitalize = text_control->autocapitalize();
if (autocapitalize == none)
flags |= kWebTextInputFlagAutocapitalizeNone;
else if (autocapitalize == characters)
flags |= kWebTextInputFlagAutocapitalizeCharacters;
else if (autocapitalize == words)
flags |= kWebTextInputFlagAutocapitalizeWords;
else if (autocapitalize == sentences)
flags |= kWebTextInputFlagAutocapitalizeSentences;
else
NOTREACHED();
}
}
return flags;
}
| 10,051 |
9,101 | 0 |
static int vrend_decode_create_stream_output_target(struct vrend_decode_ctx *ctx, uint32_t handle, uint16_t length)
{
uint32_t res_handle, buffer_size, buffer_offset;
if (length != VIRGL_OBJ_STREAMOUT_SIZE)
return EINVAL;
res_handle = get_buf_entry(ctx, VIRGL_OBJ_STREAMOUT_RES_HANDLE);
buffer_offset = get_buf_entry(ctx, VIRGL_OBJ_STREAMOUT_BUFFER_OFFSET);
buffer_size = get_buf_entry(ctx, VIRGL_OBJ_STREAMOUT_BUFFER_SIZE);
return vrend_create_so_target(ctx->grctx, handle, res_handle, buffer_offset,
buffer_size);
}
| 10,052 |
87,345 | 0 |
vips_foreign_load_gif_dispose( GObject *gobject )
{
VipsForeignLoadGif *gif = (VipsForeignLoadGif *) gobject;
vips_foreign_load_gif_close( gif );
VIPS_UNREF( gif->frame );
VIPS_UNREF( gif->previous );
VIPS_FREE( gif->comment );
VIPS_FREE( gif->line )
G_OBJECT_CLASS( vips_foreign_load_gif_parent_class )->
dispose( gobject );
}
| 10,053 |
121,620 | 0 |
inline bool SearchBuffer::needsMoreContext() const
{
return m_needsMoreContext;
}
| 10,054 |
37,697 | 0 |
struct assoc_array_edit *assoc_array_insert(struct assoc_array *array,
const struct assoc_array_ops *ops,
const void *index_key,
void *object)
{
struct assoc_array_walk_result result;
struct assoc_array_edit *edit;
pr_devel("-->%s()\n", __func__);
/* The leaf pointer we're given must not have the bottom bit set as we
* use those for type-marking the pointer. NULL pointers are also not
* allowed as they indicate an empty slot but we have to allow them
* here as they can be updated later.
*/
BUG_ON(assoc_array_ptr_is_meta(object));
edit = kzalloc(sizeof(struct assoc_array_edit), GFP_KERNEL);
if (!edit)
return ERR_PTR(-ENOMEM);
edit->array = array;
edit->ops = ops;
edit->leaf = assoc_array_leaf_to_ptr(object);
edit->adjust_count_by = 1;
switch (assoc_array_walk(array, ops, index_key, &result)) {
case assoc_array_walk_tree_empty:
/* Allocate a root node if there isn't one yet */
if (!assoc_array_insert_in_empty_tree(edit))
goto enomem;
return edit;
case assoc_array_walk_found_terminal_node:
/* We found a node that doesn't have a node/shortcut pointer in
* the slot corresponding to the index key that we have to
* follow.
*/
if (!assoc_array_insert_into_terminal_node(edit, ops, index_key,
&result))
goto enomem;
return edit;
case assoc_array_walk_found_wrong_shortcut:
/* We found a shortcut that didn't match our key in a slot we
* needed to follow.
*/
if (!assoc_array_insert_mid_shortcut(edit, ops, &result))
goto enomem;
return edit;
}
enomem:
/* Clean up after an out of memory error */
pr_devel("enomem\n");
assoc_array_cancel_edit(edit);
return ERR_PTR(-ENOMEM);
}
| 10,055 |
135,076 | 0 |
void AppCacheHost::ObserveGroupBeingUpdated(AppCacheGroup* group) {
DCHECK(!group_being_updated_.get());
group_being_updated_ = group;
newest_cache_of_group_being_updated_ = group->newest_complete_cache();
group->AddUpdateObserver(this);
}
| 10,056 |
105,005 | 0 |
HttpBridgeFactory::HttpBridgeFactory(
net::URLRequestContextGetter* baseline_context_getter) {
DCHECK(baseline_context_getter != NULL);
request_context_getter_ =
new HttpBridge::RequestContextGetter(baseline_context_getter);
}
| 10,057 |
82,793 | 0 |
static void _anal_calls(RCore *core, ut64 addr, ut64 addr_end) {
RAnalOp op;
int bufi;
int depth = r_config_get_i (core->config, "anal.depth");
const int addrbytes = core->io->addrbytes;
const int bsz = 4096;
ut8 *buf;
ut8 *block;
bufi = 0;
if (addr_end - addr > UT32_MAX) {
return;
}
buf = malloc (bsz);
block = malloc (bsz);
if (!buf || !block) {
eprintf ("Error: cannot allocate buf or block\n");
free (buf);
free (block);
return;
}
int minop = r_anal_archinfo (core->anal, R_ANAL_ARCHINFO_MIN_OP_SIZE);
if (minop < 1) {
minop = 1;
}
while (addr < addr_end) {
if (r_cons_is_breaked ()) {
break;
}
if (bufi > 4000) {
bufi = 0;
}
if (!bufi) {
r_io_read_at (core->io, addr, buf, bsz);
}
memset (block, -1, bsz);
if (!memcmp (buf, block, bsz)) {
addr += bsz;
continue;
}
memset (block, 0, bsz);
if (!memcmp (buf, block, bsz)) {
addr += bsz;
continue;
}
if (r_anal_op (core->anal, &op, addr, buf + bufi, bsz - bufi, 0) > 0) {
if (op.size < 1) {
op.size = minop;
}
if (op.type == R_ANAL_OP_TYPE_CALL) {
#if JAYRO_03
#error FUCK
if (!anal_is_bad_call (core, from, to, addr, buf, bufi)) {
fcn = r_anal_get_fcn_in (core->anal, op.jump, R_ANAL_FCN_TYPE_ROOT);
if (!fcn) {
r_core_anal_fcn (core, op.jump, addr,
R_ANAL_REF_TYPE_NULL, depth);
}
}
#else
r_anal_xrefs_set (core->anal, R_ANAL_REF_TYPE_CALL, addr, op.jump);
if (r_io_is_valid_offset (core->io, op.jump, 1)) {
r_core_anal_fcn (core, op.jump, addr, R_ANAL_REF_TYPE_NULL, depth);
}
#endif
}
} else {
op.size = minop;
}
if ((int)op.size < 1) {
op.size = minop;
}
addr += op.size;
bufi += addrbytes * op.size;
r_anal_op_fini (&op);
}
free (buf);
free (block);
}
| 10,058 |
79,622 | 0 |
char *imap_fix_path(struct ImapData *idata, const char *mailbox, char *path, size_t plen)
{
int i = 0;
char delim = '\0';
if (idata)
delim = idata->delim;
while (mailbox && *mailbox && i < plen - 1)
{
if ((ImapDelimChars && strchr(ImapDelimChars, *mailbox)) || (delim && *mailbox == delim))
{
/* use connection delimiter if known. Otherwise use user delimiter */
if (!idata)
delim = *mailbox;
while (*mailbox && ((ImapDelimChars && strchr(ImapDelimChars, *mailbox)) ||
(delim && *mailbox == delim)))
{
mailbox++;
}
path[i] = delim;
}
else
{
path[i] = *mailbox;
mailbox++;
}
i++;
}
if (i && path[--i] != delim)
i++;
path[i] = '\0';
return path;
}
| 10,059 |
66,068 | 0 |
static int do_boot_mode(struct edgeport_serial *serial,
const struct firmware *fw)
{
struct device *dev = &serial->serial->interface->dev;
int status = 0;
struct edge_ti_manuf_descriptor *ti_manuf_desc;
struct edgeport_fw_hdr *fw_hdr = (struct edgeport_fw_hdr *)fw->data;
dev_dbg(dev, "%s - RUNNING IN BOOT MODE\n", __func__);
/* Configure the TI device so we can use the BULK pipes for download */
status = config_boot_dev(serial->serial->dev);
if (status)
return status;
if (le16_to_cpu(serial->serial->dev->descriptor.idVendor)
!= USB_VENDOR_ID_ION) {
dev_dbg(dev, "%s - VID = 0x%x\n", __func__,
le16_to_cpu(serial->serial->dev->descriptor.idVendor));
serial->TI_I2C_Type = DTK_ADDR_SPACE_I2C_TYPE_II;
goto stayinbootmode;
}
/*
* We have an ION device (I2c Must be programmed)
* Determine I2C image type
*/
if (i2c_type_bootmode(serial))
goto stayinbootmode;
/* Check for ION Vendor ID and that the I2C is valid */
if (!check_i2c_image(serial)) {
struct ti_i2c_image_header *header;
int i;
__u8 cs = 0;
__u8 *buffer;
int buffer_size;
/*
* Validate Hardware version number
* Read Manufacturing Descriptor from TI Based Edgeport
*/
ti_manuf_desc = kmalloc(sizeof(*ti_manuf_desc), GFP_KERNEL);
if (!ti_manuf_desc)
return -ENOMEM;
status = get_manuf_info(serial, (__u8 *)ti_manuf_desc);
if (status) {
kfree(ti_manuf_desc);
goto stayinbootmode;
}
/* Check for version 2 */
if (!ignore_cpu_rev && ti_cpu_rev(ti_manuf_desc) < 2) {
dev_dbg(dev, "%s - Wrong CPU Rev %d (Must be 2)\n",
__func__, ti_cpu_rev(ti_manuf_desc));
kfree(ti_manuf_desc);
goto stayinbootmode;
}
kfree(ti_manuf_desc);
/*
* In order to update the I2C firmware we must change the type
* 2 record to type 0xF2. This will force the UMP to come up
* in Boot Mode. Then while in boot mode, the driver will
* download the latest firmware (padded to 15.5k) into the
* UMP ram. Finally when the device comes back up in download
* mode the driver will cause the new firmware to be copied
* from the UMP Ram to I2C and the firmware will update the
* record type from 0xf2 to 0x02.
*
* Do we really have to copy the whole firmware image,
* or could we do this in place!
*/
/* Allocate a 15.5k buffer + 3 byte header */
buffer_size = (((1024 * 16) - 512) +
sizeof(struct ti_i2c_image_header));
buffer = kmalloc(buffer_size, GFP_KERNEL);
if (!buffer)
return -ENOMEM;
/* Initialize the buffer to 0xff (pad the buffer) */
memset(buffer, 0xff, buffer_size);
memcpy(buffer, &fw->data[4], fw->size - 4);
for (i = sizeof(struct ti_i2c_image_header);
i < buffer_size; i++) {
cs = (__u8)(cs + buffer[i]);
}
header = (struct ti_i2c_image_header *)buffer;
/* update length and checksum after padding */
header->Length = cpu_to_le16((__u16)(buffer_size -
sizeof(struct ti_i2c_image_header)));
header->CheckSum = cs;
/* Download the operational code */
dev_dbg(dev, "%s - Downloading operational code image version %d.%d (TI UMP)\n",
__func__,
fw_hdr->major_version, fw_hdr->minor_version);
status = download_code(serial, buffer, buffer_size);
kfree(buffer);
if (status) {
dev_dbg(dev, "%s - Error downloading operational code image\n", __func__);
return status;
}
/* Device will reboot */
serial->product_info.TiMode = TI_MODE_TRANSITIONING;
dev_dbg(dev, "%s - Download successful -- Device rebooting...\n", __func__);
return 1;
}
stayinbootmode:
/* Eprom is invalid or blank stay in boot mode */
dev_dbg(dev, "%s - STAYING IN BOOT MODE\n", __func__);
serial->product_info.TiMode = TI_MODE_BOOT;
return 1;
}
| 10,060 |
9,653 | 0 |
static int php_is_file_ok(const cwd_state *state) /* {{{ */
{
struct stat buf;
if (php_sys_stat(state->cwd, &buf) == 0 && S_ISREG(buf.st_mode))
return (0);
return (1);
}
/* }}} */
| 10,061 |
72,619 | 0 |
ring_buffer_read_finish(struct ring_buffer_iter *iter)
{
struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
unsigned long flags;
/*
* Ring buffer is disabled from recording, here's a good place
* to check the integrity of the ring buffer.
* Must prevent readers from trying to read, as the check
* clears the HEAD page and readers require it.
*/
raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
rb_check_pages(cpu_buffer);
raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
atomic_dec(&cpu_buffer->record_disabled);
atomic_dec(&cpu_buffer->buffer->resize_disabled);
kfree(iter);
}
| 10,062 |
169,917 | 0 |
xsltInitCtxtKeys(xsltTransformContextPtr ctxt, xsltDocumentPtr idoc) {
xsltStylesheetPtr style;
xsltKeyDefPtr keyDef;
if ((ctxt == NULL) || (idoc == NULL))
return;
#ifdef KEY_INIT_DEBUG
fprintf(stderr, "xsltInitCtxtKeys on document\n");
#endif
#ifdef WITH_XSLT_DEBUG_KEYS
if ((idoc->doc != NULL) && (idoc->doc->URL != NULL))
XSLT_TRACE(ctxt,XSLT_TRACE_KEYS,xsltGenericDebug(xsltGenericDebugContext, "Initializing keys on %s\n",
idoc->doc->URL));
#endif
style = ctxt->style;
while (style != NULL) {
keyDef = (xsltKeyDefPtr) style->keys;
while (keyDef != NULL) {
xsltInitCtxtKey(ctxt, idoc, keyDef);
keyDef = keyDef->next;
}
style = xsltNextImport(style);
}
#ifdef KEY_INIT_DEBUG
fprintf(stderr, "xsltInitCtxtKeys on document: done\n");
#endif
}
| 10,063 |
47,886 | 0 |
bool skb_page_frag_refill(unsigned int sz, struct page_frag *pfrag, gfp_t gfp)
{
if (pfrag->page) {
if (page_ref_count(pfrag->page) == 1) {
pfrag->offset = 0;
return true;
}
if (pfrag->offset + sz <= pfrag->size)
return true;
put_page(pfrag->page);
}
pfrag->offset = 0;
if (SKB_FRAG_PAGE_ORDER) {
/* Avoid direct reclaim but allow kswapd to wake */
pfrag->page = alloc_pages((gfp & ~__GFP_DIRECT_RECLAIM) |
__GFP_COMP | __GFP_NOWARN |
__GFP_NORETRY,
SKB_FRAG_PAGE_ORDER);
if (likely(pfrag->page)) {
pfrag->size = PAGE_SIZE << SKB_FRAG_PAGE_ORDER;
return true;
}
}
pfrag->page = alloc_page(gfp);
if (likely(pfrag->page)) {
pfrag->size = PAGE_SIZE;
return true;
}
return false;
}
| 10,064 |
21,240 | 0 |
int handle_pte_fault(struct mm_struct *mm,
struct vm_area_struct *vma, unsigned long address,
pte_t *pte, pmd_t *pmd, unsigned int flags)
{
pte_t entry;
spinlock_t *ptl;
entry = *pte;
if (!pte_present(entry)) {
if (pte_none(entry)) {
if (vma->vm_ops) {
if (likely(vma->vm_ops->fault))
return do_linear_fault(mm, vma, address,
pte, pmd, flags, entry);
}
return do_anonymous_page(mm, vma, address,
pte, pmd, flags);
}
if (pte_file(entry))
return do_nonlinear_fault(mm, vma, address,
pte, pmd, flags, entry);
return do_swap_page(mm, vma, address,
pte, pmd, flags, entry);
}
ptl = pte_lockptr(mm, pmd);
spin_lock(ptl);
if (unlikely(!pte_same(*pte, entry)))
goto unlock;
if (flags & FAULT_FLAG_WRITE) {
if (!pte_write(entry))
return do_wp_page(mm, vma, address,
pte, pmd, ptl, entry);
entry = pte_mkdirty(entry);
}
entry = pte_mkyoung(entry);
if (ptep_set_access_flags(vma, address, pte, entry, flags & FAULT_FLAG_WRITE)) {
update_mmu_cache(vma, address, pte);
} else {
/*
* This is needed only for protection faults but the arch code
* is not yet telling us if this is a protection fault or not.
* This still avoids useless tlb flushes for .text page faults
* with threads.
*/
if (flags & FAULT_FLAG_WRITE)
flush_tlb_fix_spurious_fault(vma, address);
}
unlock:
pte_unmap_unlock(pte, ptl);
return 0;
}
| 10,065 |
31,779 | 0 |
static void print_fatal_signal(int signr)
{
struct pt_regs *regs = signal_pt_regs();
printk(KERN_INFO "%s/%d: potentially unexpected fatal signal %d.\n",
current->comm, task_pid_nr(current), signr);
#if defined(__i386__) && !defined(__arch_um__)
printk(KERN_INFO "code at %08lx: ", regs->ip);
{
int i;
for (i = 0; i < 16; i++) {
unsigned char insn;
if (get_user(insn, (unsigned char *)(regs->ip + i)))
break;
printk(KERN_CONT "%02x ", insn);
}
}
printk(KERN_CONT "\n");
#endif
preempt_disable();
show_regs(regs);
preempt_enable();
}
| 10,066 |
90,812 | 0 |
static time_t gf_mktime_utc(struct tm *tm)
{
static const u32 days_per_month[2][12] = {
{31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31},
{31, 29, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31}
};
time_t time=0;
int i;
for (i=70; i<tm->tm_year; i++) {
time += leap_year(i) ? 366 : 365;
}
for (i=0; i<tm->tm_mon; ++i) {
time += days_per_month[leap_year(tm->tm_year)][i];
}
time += tm->tm_mday - 1;
time *= 24;
time += tm->tm_hour;
time *= 60;
time += tm->tm_min;
time *= 60;
time += tm->tm_sec;
return time;
}
| 10,067 |
159,120 | 0 |
std::string GetDownloadTypeNames(DownloadItem::DownloadType type) {
switch (type) {
case DownloadItem::TYPE_ACTIVE_DOWNLOAD:
return "NEW_DOWNLOAD";
case DownloadItem::TYPE_HISTORY_IMPORT:
return "HISTORY_IMPORT";
case DownloadItem::TYPE_SAVE_PAGE_AS:
return "SAVE_PAGE_AS";
default:
NOTREACHED();
return "INVALID_TYPE";
}
}
| 10,068 |
43,184 | 0 |
static void oz_hcd_endpoint_reset(struct usb_hcd *hcd,
struct usb_host_endpoint *ep)
{
}
| 10,069 |
120,290 | 0 |
void FolderHeaderView::ButtonPressed(views::Button* sender,
const ui::Event& event) {
delegate_->GiveBackFocusToSearchBox();
delegate_->NavigateBack(folder_item_, event);
}
| 10,070 |
18,405 | 0 |
spawn(WebKitWebView *web_view, GArray *argv, GString *result) {
(void)web_view; (void)result;
gchar *path = NULL;
if (argv_idx(argv, 0) &&
((path = find_existing_file(argv_idx(argv, 0)))) ) {
run_command(path, 0,
((const gchar **) (argv->data + sizeof(gchar*))),
FALSE, NULL);
g_free(path);
}
}
| 10,071 |
37,584 | 0 |
static inline u64 rsvd_bits(int s, int e)
{
return ((1ULL << (e - s + 1)) - 1) << s;
}
| 10,072 |
39,599 | 0 |
static int __init aio_setup(void)
{
static struct file_system_type aio_fs = {
.name = "aio",
.mount = aio_mount,
.kill_sb = kill_anon_super,
};
aio_mnt = kern_mount(&aio_fs);
if (IS_ERR(aio_mnt))
panic("Failed to create aio fs mount.");
kiocb_cachep = KMEM_CACHE(kiocb, SLAB_HWCACHE_ALIGN|SLAB_PANIC);
kioctx_cachep = KMEM_CACHE(kioctx,SLAB_HWCACHE_ALIGN|SLAB_PANIC);
pr_debug("sizeof(struct page) = %zu\n", sizeof(struct page));
return 0;
}
| 10,073 |
90,454 | 0 |
void ib_uverbs_detach_umcast(struct ib_qp *qp,
struct ib_uqp_object *uobj)
{
struct ib_uverbs_mcast_entry *mcast, *tmp;
list_for_each_entry_safe(mcast, tmp, &uobj->mcast_list, list) {
ib_detach_mcast(qp, &mcast->gid, mcast->lid);
list_del(&mcast->list);
kfree(mcast);
}
}
| 10,074 |
88,954 | 0 |
static unsigned int PNMInteger(Image *image,CommentInfo *comment_info,
const unsigned int base)
{
int
c;
unsigned int
value;
/*
Skip any leading whitespace.
*/
do
{
c=ReadBlobByte(image);
if (c == EOF)
return(0);
if (c == (int) '#')
c=PNMComment(image,comment_info);
} while ((c == ' ') || (c == '\t') || (c == '\n') || (c == '\r'));
if (base == 2)
return((unsigned int) (c-(int) '0'));
/*
Evaluate number.
*/
value=0;
while (isdigit(c) != 0)
{
if (value <= (unsigned int) (INT_MAX/10))
{
value*=10;
if (value <= (unsigned int) (INT_MAX-(c-(int) '0')))
value+=c-(int) '0';
}
c=ReadBlobByte(image);
if (c == EOF)
return(0);
}
if (c == (int) '#')
c=PNMComment(image,comment_info);
return(value);
}
| 10,075 |
143,602 | 0 |
void RenderWidgetHostImpl::CompositorFrameDrawn(
const std::vector<ui::LatencyInfo>& latency_info) {
for (size_t i = 0; i < latency_info.size(); i++) {
std::set<RenderWidgetHostImpl*> rwhi_set;
for (const auto& lc : latency_info[i].latency_components()) {
if (lc.first.first == ui::INPUT_EVENT_LATENCY_BEGIN_RWH_COMPONENT ||
lc.first.first == ui::WINDOW_SNAPSHOT_FRAME_NUMBER_COMPONENT ||
lc.first.first == ui::TAB_SHOW_COMPONENT) {
int routing_id = lc.first.second & 0xffffffff;
int process_id = (lc.first.second >> 32) & 0xffffffff;
RenderWidgetHost* rwh =
RenderWidgetHost::FromID(process_id, routing_id);
if (!rwh) {
continue;
}
RenderWidgetHostImpl* rwhi = RenderWidgetHostImpl::From(rwh);
if (rwhi_set.insert(rwhi).second)
rwhi->FrameSwapped(latency_info[i]);
}
}
}
}
| 10,076 |
187,133 | 1 |
void MediaElementAudioSourceHandler::Process(size_t number_of_frames) {
AudioBus* output_bus = Output(0).Bus();
// Use a tryLock() to avoid contention in the real-time audio thread.
// If we fail to acquire the lock then the HTMLMediaElement must be in the
// middle of reconfiguring its playback engine, so we output silence in this
// case.
MutexTryLocker try_locker(process_lock_);
if (try_locker.Locked()) {
if (!MediaElement() || !source_sample_rate_) {
output_bus->Zero();
return;
}
// TODO(crbug.com/811516): Although OnSetFormat() requested the output bus
// channels, the actual channel count might have not been changed yet.
// Output silence for such case until the channel count is resolved.
if (source_number_of_channels_ != output_bus->NumberOfChannels()) {
output_bus->Zero();
return;
}
AudioSourceProvider& provider = MediaElement()->GetAudioSourceProvider();
// Grab data from the provider so that the element continues to make
// progress, even if we're going to output silence anyway.
if (multi_channel_resampler_.get()) {
DCHECK_NE(source_sample_rate_, Context()->sampleRate());
multi_channel_resampler_->Process(&provider, output_bus,
number_of_frames);
} else {
// Bypass the resampler completely if the source is at the context's
// sample-rate.
DCHECK_EQ(source_sample_rate_, Context()->sampleRate());
provider.ProvideInput(output_bus, number_of_frames);
}
// Output silence if we don't have access to the element.
if (!PassesCORSAccessCheck()) {
if (maybe_print_cors_message_) {
// Print a CORS message, but just once for each change in the current
// media element source, and only if we have a document to print to.
maybe_print_cors_message_ = false;
PostCrossThreadTask(
*task_runner_, FROM_HERE,
CrossThreadBind(&MediaElementAudioSourceHandler::PrintCORSMessage,
WrapRefCounted(this), current_src_string_));
}
output_bus->Zero();
}
} else {
// We failed to acquire the lock.
output_bus->Zero();
}
}
| 10,077 |
14,424 | 0 |
session_set_fds(Session *s, int fdin, int fdout, int fderr, int ignore_fderr,
int is_tty)
{
if (!compat20)
fatal("session_set_fds: called for proto != 2.0");
/*
* now that have a child and a pipe to the child,
* we can activate our channel and register the fd's
*/
if (s->chanid == -1)
fatal("no channel for session %d", s->self);
channel_set_fds(s->chanid,
fdout, fdin, fderr,
ignore_fderr ? CHAN_EXTENDED_IGNORE : CHAN_EXTENDED_READ,
1, is_tty, CHAN_SES_WINDOW_DEFAULT);
}
| 10,078 |
100,609 | 0 |
void VoiceInteractionOverlay::HideAnimation() {
animation_state_ = AnimationState::HIDDEN;
{
ui::ScopedLayerAnimationSettings settings(ripple_layer_->GetAnimator());
settings.SetTransitionDuration(
base::TimeDelta::FromMilliseconds(kHideDurationMs));
settings.SetTweenType(gfx::Tween::LINEAR_OUT_SLOW_IN);
settings.SetPreemptionStrategy(
ui::LayerAnimator::PreemptionStrategy::ENQUEUE_NEW_ANIMATION);
ripple_layer_->SetOpacity(0);
}
{
ui::ScopedLayerAnimationSettings settings(icon_layer_->GetAnimator());
settings.SetTransitionDuration(
base::TimeDelta::FromMilliseconds(kHideDurationMs));
settings.SetTweenType(gfx::Tween::LINEAR_OUT_SLOW_IN);
settings.SetPreemptionStrategy(
ui::LayerAnimator::PreemptionStrategy::ENQUEUE_NEW_ANIMATION);
icon_layer_->SetOpacity(0);
icon_layer_->StopAnimation();
}
{
ui::ScopedLayerAnimationSettings settings(background_layer_->GetAnimator());
settings.SetTransitionDuration(
base::TimeDelta::FromMilliseconds(kHideDurationMs));
settings.SetTweenType(gfx::Tween::LINEAR_OUT_SLOW_IN);
settings.SetPreemptionStrategy(
ui::LayerAnimator::PreemptionStrategy::ENQUEUE_NEW_ANIMATION);
background_layer_->SetOpacity(0);
}
}
| 10,079 |
60,501 | 0 |
int kvm_arch_create_vcpu_debugfs(struct kvm_vcpu *vcpu)
{
return 0;
}
| 10,080 |
120,051 | 0 |
~FileBrowserPrivateGetDriveFilesFunction() {
}
| 10,081 |
91,724 | 0 |
int am_save_post(request_rec *r, const char **relay_state)
{
am_mod_cfg_rec *mod_cfg;
const char *content_type;
const char *charset;
const char *psf_id;
char *psf_name;
char *post_data;
apr_size_t post_data_len;
apr_size_t written;
apr_file_t *psf;
mod_cfg = am_get_mod_cfg(r->server);
if (mod_cfg->post_dir == NULL) {
AM_LOG_RERROR(APLOG_MARK, APLOG_ERR, 0, r,
"MellonPostReplay enabled but MellonPostDirectory not set "
"-- cannot save post data");
return HTTP_INTERNAL_SERVER_ERROR;
}
if (am_postdir_cleanup(r) != OK)
return HTTP_INTERNAL_SERVER_ERROR;
/* Check Content-Type */
content_type = apr_table_get(r->headers_in, "Content-Type");
if (content_type == NULL) {
content_type = "urlencoded";
charset = NULL;
} else {
if (am_has_header(r, content_type,
"application/x-www-form-urlencoded")) {
content_type = "urlencoded";
} else if (am_has_header(r, content_type,
"multipart/form-data")) {
content_type = "multipart";
} else {
AM_LOG_RERROR(APLOG_MARK, APLOG_ERR, 0, r,
"Unknown POST Content-Type \"%s\"", content_type);
return HTTP_INTERNAL_SERVER_ERROR;
}
charset = am_get_header_attr(r, content_type, NULL, "charset");
}
if ((psf_id = am_generate_id(r)) == NULL) {
AM_LOG_RERROR(APLOG_MARK, APLOG_ERR, 0, r, "cannot generate id");
return HTTP_INTERNAL_SERVER_ERROR;
}
psf_name = apr_psprintf(r->pool, "%s/%s", mod_cfg->post_dir, psf_id);
if (apr_file_open(&psf, psf_name,
APR_WRITE|APR_CREATE|APR_BINARY,
APR_FPROT_UREAD|APR_FPROT_UWRITE,
r->pool) != OK) {
AM_LOG_RERROR(APLOG_MARK, APLOG_ERR, 0, r,
"cannot create POST session file");
return HTTP_INTERNAL_SERVER_ERROR;
}
if (am_read_post_data(r, &post_data, &post_data_len) != OK) {
AM_LOG_RERROR(APLOG_MARK, APLOG_ERR, 0, r, "cannot read POST data");
(void)apr_file_close(psf);
return HTTP_INTERNAL_SERVER_ERROR;
}
if (post_data_len > mod_cfg->post_size) {
AM_LOG_RERROR(APLOG_MARK, APLOG_ERR, 0, r,
"POST data size %" APR_SIZE_T_FMT
" exceeds maximum %" APR_SIZE_T_FMT ". "
"Increase MellonPostSize directive.",
post_data_len, mod_cfg->post_size);
(void)apr_file_close(psf);
return HTTP_INTERNAL_SERVER_ERROR;
}
written = post_data_len;
if ((apr_file_write(psf, post_data, &written) != OK) ||
(written != post_data_len)) {
AM_LOG_RERROR(APLOG_MARK, APLOG_ERR, 0, r,
"cannot write to POST session file");
(void)apr_file_close(psf);
return HTTP_INTERNAL_SERVER_ERROR;
}
if (apr_file_close(psf) != OK) {
AM_LOG_RERROR(APLOG_MARK, APLOG_ERR, 0, r,
"cannot close POST session file");
return HTTP_INTERNAL_SERVER_ERROR;
}
if (charset != NULL)
charset = apr_psprintf(r->pool, "&charset=%s",
am_urlencode(r->pool, charset));
else
charset = "";
*relay_state = apr_psprintf(r->pool,
"%srepost?id=%s&ReturnTo=%s&enctype=%s%s",
am_get_endpoint_url(r), psf_id,
am_urlencode(r->pool, *relay_state),
content_type, charset);
return OK;
}
| 10,082 |
132,506 | 0 |
UsbSetConfigurationFunction::UsbSetConfigurationFunction() {
}
| 10,083 |
149,700 | 0 |
void ShowLocalBubble() {
controller()->ShowBubbleForLocalSave(CreditCard(),
base::Bind(&SaveCardCallback));
}
| 10,084 |
133,781 | 0 |
bool SetClientSocketForSSL(SSL* ssl, SSLClientSocketOpenSSL* socket) {
return SSL_set_ex_data(ssl, ssl_socket_data_index_, socket) != 0;
}
| 10,085 |
36,989 | 0 |
static __init bool allow_1_setting(u32 msr, u32 ctl)
{
u32 vmx_msr_low, vmx_msr_high;
rdmsr(msr, vmx_msr_low, vmx_msr_high);
return vmx_msr_high & ctl;
}
| 10,086 |
57,945 | 0 |
static int nf_tables_commit(struct sk_buff *skb)
{
struct net *net = sock_net(skb->sk);
struct nft_trans *trans, *next;
struct nft_trans_elem *te;
/* Bump generation counter, invalidate any dump in progress */
while (++net->nft.base_seq == 0);
/* A new generation has just started */
net->nft.gencursor = gencursor_next(net);
/* Make sure all packets have left the previous generation before
* purging old rules.
*/
synchronize_rcu();
list_for_each_entry_safe(trans, next, &net->nft.commit_list, list) {
switch (trans->msg_type) {
case NFT_MSG_NEWTABLE:
if (nft_trans_table_update(trans)) {
if (!nft_trans_table_enable(trans)) {
nf_tables_table_disable(trans->ctx.afi,
trans->ctx.table);
trans->ctx.table->flags |= NFT_TABLE_F_DORMANT;
}
} else {
trans->ctx.table->flags &= ~NFT_TABLE_INACTIVE;
}
nf_tables_table_notify(&trans->ctx, NFT_MSG_NEWTABLE);
nft_trans_destroy(trans);
break;
case NFT_MSG_DELTABLE:
nf_tables_table_notify(&trans->ctx, NFT_MSG_DELTABLE);
break;
case NFT_MSG_NEWCHAIN:
if (nft_trans_chain_update(trans))
nft_chain_commit_update(trans);
else
trans->ctx.chain->flags &= ~NFT_CHAIN_INACTIVE;
nf_tables_chain_notify(&trans->ctx, NFT_MSG_NEWCHAIN);
nft_trans_destroy(trans);
break;
case NFT_MSG_DELCHAIN:
nf_tables_chain_notify(&trans->ctx, NFT_MSG_DELCHAIN);
nf_tables_unregister_hooks(trans->ctx.table,
trans->ctx.chain,
trans->ctx.afi->nops);
break;
case NFT_MSG_NEWRULE:
nft_rule_clear(trans->ctx.net, nft_trans_rule(trans));
nf_tables_rule_notify(&trans->ctx,
nft_trans_rule(trans),
NFT_MSG_NEWRULE);
nft_trans_destroy(trans);
break;
case NFT_MSG_DELRULE:
list_del_rcu(&nft_trans_rule(trans)->list);
nf_tables_rule_notify(&trans->ctx,
nft_trans_rule(trans),
NFT_MSG_DELRULE);
break;
case NFT_MSG_NEWSET:
nft_trans_set(trans)->flags &= ~NFT_SET_INACTIVE;
/* This avoids hitting -EBUSY when deleting the table
* from the transaction.
*/
if (nft_trans_set(trans)->flags & NFT_SET_ANONYMOUS &&
!list_empty(&nft_trans_set(trans)->bindings))
trans->ctx.table->use--;
nf_tables_set_notify(&trans->ctx, nft_trans_set(trans),
NFT_MSG_NEWSET, GFP_KERNEL);
nft_trans_destroy(trans);
break;
case NFT_MSG_DELSET:
nf_tables_set_notify(&trans->ctx, nft_trans_set(trans),
NFT_MSG_DELSET, GFP_KERNEL);
break;
case NFT_MSG_NEWSETELEM:
nf_tables_setelem_notify(&trans->ctx,
nft_trans_elem_set(trans),
&nft_trans_elem(trans),
NFT_MSG_NEWSETELEM, 0);
nft_trans_destroy(trans);
break;
case NFT_MSG_DELSETELEM:
te = (struct nft_trans_elem *)trans->data;
nf_tables_setelem_notify(&trans->ctx, te->set,
&te->elem,
NFT_MSG_DELSETELEM, 0);
te->set->ops->get(te->set, &te->elem);
te->set->ops->remove(te->set, &te->elem);
nft_data_uninit(&te->elem.key, NFT_DATA_VALUE);
if (te->elem.flags & NFT_SET_MAP) {
nft_data_uninit(&te->elem.data,
te->set->dtype);
}
nft_trans_destroy(trans);
break;
}
}
synchronize_rcu();
list_for_each_entry_safe(trans, next, &net->nft.commit_list, list) {
list_del(&trans->list);
nf_tables_commit_release(trans);
}
nf_tables_gen_notify(net, skb, NFT_MSG_NEWGEN);
return 0;
}
| 10,087 |
109,577 | 0 |
KURL Document::baseURI() const
{
return m_baseURL;
}
| 10,088 |
118,626 | 0 |
void BlockLoop(base::WaitableEvent* completion, base::Callback<bool()> work) {
do {
completion->Wait();
} while (work.Run());
base::MessageLoop::current()->QuitNow();
}
| 10,089 |
83,191 | 0 |
mrb_yield_cont(mrb_state *mrb, mrb_value b, mrb_value self, mrb_int argc, const mrb_value *argv)
{
struct RProc *p;
mrb_callinfo *ci;
if (mrb_nil_p(b)) {
mrb_raise(mrb, E_ARGUMENT_ERROR, "no block given");
}
if (mrb_type(b) != MRB_TT_PROC) {
mrb_raise(mrb, E_TYPE_ERROR, "not a block");
}
p = mrb_proc_ptr(b);
ci = mrb->c->ci;
stack_extend(mrb, 3);
mrb->c->stack[1] = mrb_ary_new_from_values(mrb, argc, argv);
mrb->c->stack[2] = mrb_nil_value();
ci->argc = -1;
return mrb_exec_irep(mrb, self, p);
}
| 10,090 |
112,887 | 0 |
bool GDataCache::IsUnderGDataCacheDirectory(const FilePath& path) const {
return cache_root_path_ == path || cache_root_path_.IsParent(path);
}
| 10,091 |
31,557 | 0 |
sctp_chunk_length_valid(struct sctp_chunk *chunk,
__u16 required_length)
{
__u16 chunk_length = ntohs(chunk->chunk_hdr->length);
if (unlikely(chunk_length < required_length))
return 0;
return 1;
}
| 10,092 |
35,835 | 0 |
void sctp_chunk_assign_ssn(struct sctp_chunk *chunk)
{
struct sctp_datamsg *msg;
struct sctp_chunk *lchunk;
struct sctp_stream *stream;
__u16 ssn;
__u16 sid;
if (chunk->has_ssn)
return;
/* All fragments will be on the same stream */
sid = ntohs(chunk->subh.data_hdr->stream);
stream = &chunk->asoc->ssnmap->out;
/* Now assign the sequence number to the entire message.
* All fragments must have the same stream sequence number.
*/
msg = chunk->msg;
list_for_each_entry(lchunk, &msg->chunks, frag_list) {
if (lchunk->chunk_hdr->flags & SCTP_DATA_UNORDERED) {
ssn = 0;
} else {
if (lchunk->chunk_hdr->flags & SCTP_DATA_LAST_FRAG)
ssn = sctp_ssn_next(stream, sid);
else
ssn = sctp_ssn_peek(stream, sid);
}
lchunk->subh.data_hdr->ssn = htons(ssn);
lchunk->has_ssn = 1;
}
}
| 10,093 |
178,429 | 1 |
int ssl3_send_server_key_exchange(SSL *s)
{
#ifndef OPENSSL_NO_RSA
unsigned char *q;
int j, num;
RSA *rsa;
unsigned char md_buf[MD5_DIGEST_LENGTH + SHA_DIGEST_LENGTH];
unsigned int u;
#endif
#ifndef OPENSSL_NO_DH
DH *dh = NULL, *dhp;
#endif
#ifndef OPENSSL_NO_ECDH
EC_KEY *ecdh = NULL, *ecdhp;
unsigned char *encodedPoint = NULL;
int encodedlen = 0;
int curve_id = 0;
BN_CTX *bn_ctx = NULL;
#endif
EVP_PKEY *pkey;
const EVP_MD *md = NULL;
unsigned char *p, *d;
int al, i;
unsigned long type;
int n;
CERT *cert;
BIGNUM *r[4];
int nr[4], kn;
BUF_MEM *buf;
EVP_MD_CTX md_ctx;
EVP_MD_CTX_init(&md_ctx);
if (s->state == SSL3_ST_SW_KEY_EXCH_A) {
type = s->s3->tmp.new_cipher->algorithm_mkey;
cert = s->cert;
buf = s->init_buf;
r[0] = r[1] = r[2] = r[3] = NULL;
n = 0;
#ifndef OPENSSL_NO_RSA
if (type & SSL_kRSA) {
rsa = cert->rsa_tmp;
if ((rsa == NULL) && (s->cert->rsa_tmp_cb != NULL)) {
rsa = s->cert->rsa_tmp_cb(s,
SSL_C_IS_EXPORT(s->s3->
tmp.new_cipher),
SSL_C_EXPORT_PKEYLENGTH(s->s3->
tmp.new_cipher));
if (rsa == NULL) {
al = SSL_AD_HANDSHAKE_FAILURE;
SSLerr(SSL_F_SSL3_SEND_SERVER_KEY_EXCHANGE,
SSL_R_ERROR_GENERATING_TMP_RSA_KEY);
goto f_err;
}
RSA_up_ref(rsa);
cert->rsa_tmp = rsa;
}
if (rsa == NULL) {
al = SSL_AD_HANDSHAKE_FAILURE;
SSLerr(SSL_F_SSL3_SEND_SERVER_KEY_EXCHANGE,
SSL_R_MISSING_TMP_RSA_KEY);
goto f_err;
}
r[0] = rsa->n;
r[1] = rsa->e;
s->s3->tmp.use_rsa_tmp = 1;
} else
#endif
#ifndef OPENSSL_NO_DH
if (type & SSL_kEDH) {
dhp = cert->dh_tmp;
if ((dhp == NULL) && (s->cert->dh_tmp_cb != NULL))
dhp = s->cert->dh_tmp_cb(s,
SSL_C_IS_EXPORT(s->s3->
tmp.new_cipher),
SSL_C_EXPORT_PKEYLENGTH(s->s3->
tmp.new_cipher));
if (dhp == NULL) {
al = SSL_AD_HANDSHAKE_FAILURE;
SSLerr(SSL_F_SSL3_SEND_SERVER_KEY_EXCHANGE,
SSL_R_MISSING_TMP_DH_KEY);
goto f_err;
}
if (s->s3->tmp.dh != NULL) {
SSLerr(SSL_F_SSL3_SEND_SERVER_KEY_EXCHANGE,
ERR_R_INTERNAL_ERROR);
goto err;
}
if ((dh = DHparams_dup(dhp)) == NULL) {
SSLerr(SSL_F_SSL3_SEND_SERVER_KEY_EXCHANGE, ERR_R_DH_LIB);
goto err;
}
s->s3->tmp.dh = dh;
if ((dhp->pub_key == NULL ||
dhp->priv_key == NULL ||
(s->options & SSL_OP_SINGLE_DH_USE))) {
if (!DH_generate_key(dh)) {
SSLerr(SSL_F_SSL3_SEND_SERVER_KEY_EXCHANGE, ERR_R_DH_LIB);
goto err;
}
} else {
dh->pub_key = BN_dup(dhp->pub_key);
dh->priv_key = BN_dup(dhp->priv_key);
if ((dh->pub_key == NULL) || (dh->priv_key == NULL)) {
SSLerr(SSL_F_SSL3_SEND_SERVER_KEY_EXCHANGE, ERR_R_DH_LIB);
goto err;
}
}
r[0] = dh->p;
r[1] = dh->g;
}
} else {
dh->pub_key = BN_dup(dhp->pub_key);
dh->priv_key = BN_dup(dhp->priv_key);
if ((dh->pub_key == NULL) || (dh->priv_key == NULL)) {
SSLerr(SSL_F_SSL3_SEND_SERVER_KEY_EXCHANGE, ERR_R_DH_LIB);
goto err;
}
}
r[0] = dh->p;
r[1] = dh->g;
r[2] = dh->pub_key;
} else
| 10,094 |
187,417 | 1 |
int validate_camera_metadata_structure(const camera_metadata_t *metadata,
const size_t *expected_size) {
if (metadata == NULL) {
ALOGE("%s: metadata is null!", __FUNCTION__);
return ERROR;
}
// Check that the metadata pointer is well-aligned first.
{
static const struct {
const char *name;
size_t alignment;
} alignments[] = {
{
.name = "camera_metadata",
.alignment = METADATA_ALIGNMENT
},
{
.name = "camera_metadata_buffer_entry",
.alignment = ENTRY_ALIGNMENT
},
{
.name = "camera_metadata_data",
.alignment = DATA_ALIGNMENT
},
};
for (size_t i = 0; i < sizeof(alignments)/sizeof(alignments[0]); ++i) {
uintptr_t aligned_ptr = ALIGN_TO(metadata, alignments[i].alignment);
if ((uintptr_t)metadata != aligned_ptr) {
ALOGE("%s: Metadata pointer is not aligned (actual %p, "
"expected %p) to type %s",
__FUNCTION__, metadata,
(void*)aligned_ptr, alignments[i].name);
return ERROR;
}
}
}
/**
* Check that the metadata contents are correct
*/
if (expected_size != NULL && metadata->size > *expected_size) {
ALOGE("%s: Metadata size (%" PRIu32 ") should be <= expected size (%zu)",
__FUNCTION__, metadata->size, *expected_size);
return ERROR;
}
if (metadata->entry_count > metadata->entry_capacity) {
ALOGE("%s: Entry count (%" PRIu32 ") should be <= entry capacity "
"(%" PRIu32 ")",
__FUNCTION__, metadata->entry_count, metadata->entry_capacity);
return ERROR;
}
const metadata_uptrdiff_t entries_end =
metadata->entries_start + metadata->entry_capacity;
if (entries_end < metadata->entries_start || // overflow check
entries_end > metadata->data_start) {
ALOGE("%s: Entry start + capacity (%" PRIu32 ") should be <= data start "
"(%" PRIu32 ")",
__FUNCTION__,
(metadata->entries_start + metadata->entry_capacity),
metadata->data_start);
return ERROR;
}
const metadata_uptrdiff_t data_end =
metadata->data_start + metadata->data_capacity;
if (data_end < metadata->data_start || // overflow check
data_end > metadata->size) {
ALOGE("%s: Data start + capacity (%" PRIu32 ") should be <= total size "
"(%" PRIu32 ")",
__FUNCTION__,
(metadata->data_start + metadata->data_capacity),
metadata->size);
return ERROR;
}
// Validate each entry
const metadata_size_t entry_count = metadata->entry_count;
camera_metadata_buffer_entry_t *entries = get_entries(metadata);
for (size_t i = 0; i < entry_count; ++i) {
if ((uintptr_t)&entries[i] != ALIGN_TO(&entries[i], ENTRY_ALIGNMENT)) {
ALOGE("%s: Entry index %zu had bad alignment (address %p),"
" expected alignment %zu",
__FUNCTION__, i, &entries[i], ENTRY_ALIGNMENT);
return ERROR;
}
camera_metadata_buffer_entry_t entry = entries[i];
if (entry.type >= NUM_TYPES) {
ALOGE("%s: Entry index %zu had a bad type %d",
__FUNCTION__, i, entry.type);
return ERROR;
}
// TODO: fix vendor_tag_ops across processes so we don't need to special
// case vendor-specific tags
uint32_t tag_section = entry.tag >> 16;
int tag_type = get_camera_metadata_tag_type(entry.tag);
if (tag_type != (int)entry.type && tag_section < VENDOR_SECTION) {
ALOGE("%s: Entry index %zu had tag type %d, but the type was %d",
__FUNCTION__, i, tag_type, entry.type);
return ERROR;
}
size_t data_size =
calculate_camera_metadata_entry_data_size(entry.type,
entry.count);
if (data_size != 0) {
camera_metadata_data_t *data =
(camera_metadata_data_t*) (get_data(metadata) +
entry.data.offset);
if ((uintptr_t)data != ALIGN_TO(data, DATA_ALIGNMENT)) {
ALOGE("%s: Entry index %zu had bad data alignment (address %p),"
" expected align %zu, (tag name %s, data size %zu)",
__FUNCTION__, i, data, DATA_ALIGNMENT,
get_camera_metadata_tag_name(entry.tag) ?: "unknown",
data_size);
return ERROR;
}
size_t data_entry_end = entry.data.offset + data_size;
if (data_entry_end < entry.data.offset || // overflow check
data_entry_end > metadata->data_capacity) {
ALOGE("%s: Entry index %zu data ends (%zu) beyond the capacity "
"%" PRIu32, __FUNCTION__, i, data_entry_end,
metadata->data_capacity);
return ERROR;
}
} else if (entry.count == 0) {
if (entry.data.offset != 0) {
ALOGE("%s: Entry index %zu had 0 items, but offset was non-0 "
"(%" PRIu32 "), tag name: %s", __FUNCTION__, i, entry.data.offset,
get_camera_metadata_tag_name(entry.tag) ?: "unknown");
return ERROR;
}
} // else data stored inline, so we look at value which can be anything.
}
return OK;
}
| 10,095 |
95,225 | 0 |
static int list_cb(struct findall_data *data, void *rockp)
{
struct list_rock *rock = (struct list_rock *)rockp;
if (!data) {
if (!(rock->last_attributes & MBOX_ATTRIBUTE_HASCHILDREN))
rock->last_attributes |= MBOX_ATTRIBUTE_HASNOCHILDREN;
perform_output(NULL, NULL, rock);
return 0;
}
size_t last_len = (rock->last_name ? strlen(rock->last_name) : 0);
const char *extname = data->extname;
int last_name_is_ancestor =
rock->last_name
&& strlen(extname) >= last_len
&& extname[last_len] == imapd_namespace.hier_sep
&& !memcmp(rock->last_name, extname, last_len);
list_callback_calls++;
/* list_response will calculate haschildren/hasnochildren flags later
* if they're required but not yet set, but it's a little cheaper to
* precalculate them now while we're iterating the mailboxes anyway.
*/
if (last_name_is_ancestor || (rock->last_name && !data->mbname && !strcmp(rock->last_name, extname)))
rock->last_attributes |= MBOX_ATTRIBUTE_HASCHILDREN;
else if (!(rock->last_attributes & MBOX_ATTRIBUTE_HASCHILDREN))
rock->last_attributes |= MBOX_ATTRIBUTE_HASNOCHILDREN;
if (!perform_output(data->extname, data->mbentry, rock))
return 0;
if (!data->mbname)
rock->last_attributes |= MBOX_ATTRIBUTE_HASCHILDREN | MBOX_ATTRIBUTE_NONEXISTENT;
else if (data->mb_category == MBNAME_ALTINBOX)
rock->last_attributes |= MBOX_ATTRIBUTE_NOINFERIORS;
return 0;
}
| 10,096 |
121,737 | 0 |
EmbeddedWorkerContextClient::~EmbeddedWorkerContextClient() {
DCHECK(g_worker_client_tls.Pointer()->Get() != NULL);
g_worker_client_tls.Pointer()->Set(NULL);
}
| 10,097 |
114,797 | 0 |
__xmlIndentTreeOutput(void) {
if (IS_MAIN_THREAD)
return (&xmlIndentTreeOutput);
else
return (&xmlGetGlobalState()->xmlIndentTreeOutput);
}
| 10,098 |
80,980 | 0 |
static bool nested_guest_cr0_valid(struct kvm_vcpu *vcpu, unsigned long val)
{
u64 fixed0 = to_vmx(vcpu)->nested.msrs.cr0_fixed0;
u64 fixed1 = to_vmx(vcpu)->nested.msrs.cr0_fixed1;
struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
if (to_vmx(vcpu)->nested.msrs.secondary_ctls_high &
SECONDARY_EXEC_UNRESTRICTED_GUEST &&
nested_cpu_has2(vmcs12, SECONDARY_EXEC_UNRESTRICTED_GUEST))
fixed0 &= ~(X86_CR0_PE | X86_CR0_PG);
return fixed_bits_valid(val, fixed0, fixed1);
}
| 10,099 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.