unique_id
int64
13
189k
target
int64
0
1
code
stringlengths
20
241k
__index_level_0__
int64
0
18.9k
69,469
0
static int nfs_idmap_read_and_verify_message(struct idmap_msg *im, struct idmap_msg *upcall, struct key *key, struct key *authkey) { char id_str[NFS_UINT_MAXLEN]; size_t len; int ret = -ENOKEY; /* ret = -ENOKEY */ if (upcall->im_type != im->im_type || upcall->im_conv != im->im_conv) goto out; switch (im->im_conv) { case IDMAP_CONV_NAMETOID: if (strcmp(upcall->im_name, im->im_name) != 0) break; /* Note: here we store the NUL terminator too */ len = sprintf(id_str, "%d", im->im_id) + 1; ret = nfs_idmap_instantiate(key, authkey, id_str, len); break; case IDMAP_CONV_IDTONAME: if (upcall->im_id != im->im_id) break; len = strlen(im->im_name); ret = nfs_idmap_instantiate(key, authkey, im->im_name, len); break; default: ret = -EINVAL; } out: return ret; }
13,200
173,385
0
void* Parcel::writeInplace(size_t len) { if (len > INT32_MAX) { return NULL; } const size_t padded = pad_size(len); if (mDataPos+padded < mDataPos) { return NULL; } if ((mDataPos+padded) <= mDataCapacity) { restart_write: uint8_t* const data = mData+mDataPos; if (padded != len) { #if BYTE_ORDER == BIG_ENDIAN static const uint32_t mask[4] = { 0x00000000, 0xffffff00, 0xffff0000, 0xff000000 }; #endif #if BYTE_ORDER == LITTLE_ENDIAN static const uint32_t mask[4] = { 0x00000000, 0x00ffffff, 0x0000ffff, 0x000000ff }; #endif *reinterpret_cast<uint32_t*>(data+padded-4) &= mask[padded-len]; } finishWrite(padded); return data; } status_t err = growData(padded); if (err == NO_ERROR) goto restart_write; return NULL; }
13,201
8,955
0
static bool net_tx_pkt_do_sw_fragmentation(struct NetTxPkt *pkt, NetClientState *nc) { struct iovec fragment[NET_MAX_FRAG_SG_LIST]; size_t fragment_len = 0; bool more_frags = false; /* some pointers for shorter code */ void *l2_iov_base, *l3_iov_base; size_t l2_iov_len, l3_iov_len; int src_idx = NET_TX_PKT_PL_START_FRAG, dst_idx; size_t src_offset = 0; size_t fragment_offset = 0; l2_iov_base = pkt->vec[NET_TX_PKT_L2HDR_FRAG].iov_base; l2_iov_len = pkt->vec[NET_TX_PKT_L2HDR_FRAG].iov_len; l3_iov_base = pkt->vec[NET_TX_PKT_L3HDR_FRAG].iov_base; l3_iov_len = pkt->vec[NET_TX_PKT_L3HDR_FRAG].iov_len; /* Copy headers */ fragment[NET_TX_PKT_FRAGMENT_L2_HDR_POS].iov_base = l2_iov_base; fragment[NET_TX_PKT_FRAGMENT_L2_HDR_POS].iov_len = l2_iov_len; fragment[NET_TX_PKT_FRAGMENT_L3_HDR_POS].iov_base = l3_iov_base; fragment[NET_TX_PKT_FRAGMENT_L3_HDR_POS].iov_len = l3_iov_len; /* Put as much data as possible and send */ do { fragment_len = net_tx_pkt_fetch_fragment(pkt, &src_idx, &src_offset, fragment, &dst_idx); more_frags = (fragment_offset + fragment_len < pkt->payload_len); eth_setup_ip4_fragmentation(l2_iov_base, l2_iov_len, l3_iov_base, l3_iov_len, fragment_len, fragment_offset, more_frags); eth_fix_ip4_checksum(l3_iov_base, l3_iov_len); net_tx_pkt_sendv(pkt, nc, fragment, dst_idx); fragment_offset += fragment_len; } while (fragment_len && more_frags); return true; }
13,202
31,922
0
static void perf_cgroup_attach(struct cgroup *cgrp, struct cgroup_taskset *tset) { struct task_struct *task; cgroup_taskset_for_each(task, cgrp, tset) task_function_call(task, __perf_cgroup_move, task); }
13,203
180,065
1
static int __init big_key_crypto_init(void) { int ret = -EINVAL; /* init RNG * big_key_rng = crypto_alloc_rng(big_key_rng_name, 0, 0); if (IS_ERR(big_key_rng)) { big_key_rng = NULL; return -EFAULT; } /* seed RNG */ ret = crypto_rng_reset(big_key_rng, NULL, crypto_rng_seedsize(big_key_rng)); if (ret) goto error; /* init block cipher */ big_key_skcipher = crypto_alloc_skcipher(big_key_alg_name, 0, CRYPTO_ALG_ASYNC); if (IS_ERR(big_key_skcipher)) { big_key_skcipher = NULL; ret = -EFAULT; goto error; } return 0; error: crypto_free_rng(big_key_rng); big_key_rng = NULL; return ret; }
13,204
141,429
0
uint64_t PaintLayerScrollableArea::Id() const { return DOMNodeIds::IdForNode(GetLayoutBox()->GetNode()); }
13,205
23,171
0
static int _nfs4_proc_access(struct inode *inode, struct nfs_access_entry *entry) { struct nfs_server *server = NFS_SERVER(inode); struct nfs4_accessargs args = { .fh = NFS_FH(inode), .bitmask = server->attr_bitmask, }; struct nfs4_accessres res = { .server = server, }; struct rpc_message msg = { .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_ACCESS], .rpc_argp = &args, .rpc_resp = &res, .rpc_cred = entry->cred, }; int mode = entry->mask; int status; /* * Determine which access bits we want to ask for... */ if (mode & MAY_READ) args.access |= NFS4_ACCESS_READ; if (S_ISDIR(inode->i_mode)) { if (mode & MAY_WRITE) args.access |= NFS4_ACCESS_MODIFY | NFS4_ACCESS_EXTEND | NFS4_ACCESS_DELETE; if (mode & MAY_EXEC) args.access |= NFS4_ACCESS_LOOKUP; } else { if (mode & MAY_WRITE) args.access |= NFS4_ACCESS_MODIFY | NFS4_ACCESS_EXTEND; if (mode & MAY_EXEC) args.access |= NFS4_ACCESS_EXECUTE; } res.fattr = nfs_alloc_fattr(); if (res.fattr == NULL) return -ENOMEM; status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0); if (!status) { entry->mask = 0; if (res.access & NFS4_ACCESS_READ) entry->mask |= MAY_READ; if (res.access & (NFS4_ACCESS_MODIFY | NFS4_ACCESS_EXTEND | NFS4_ACCESS_DELETE)) entry->mask |= MAY_WRITE; if (res.access & (NFS4_ACCESS_LOOKUP|NFS4_ACCESS_EXECUTE)) entry->mask |= MAY_EXEC; nfs_refresh_inode(inode, res.fattr); } nfs_free_fattr(res.fattr); return status; }
13,206
188,128
1
WORD32 ih264d_video_decode(iv_obj_t *dec_hdl, void *pv_api_ip, void *pv_api_op) { /* ! */ dec_struct_t * ps_dec = (dec_struct_t *)(dec_hdl->pv_codec_handle); WORD32 i4_err_status = 0; UWORD8 *pu1_buf = NULL; WORD32 buflen; UWORD32 u4_max_ofst, u4_length_of_start_code = 0; UWORD32 bytes_consumed = 0; UWORD32 cur_slice_is_nonref = 0; UWORD32 u4_next_is_aud; UWORD32 u4_first_start_code_found = 0; WORD32 ret = 0,api_ret_value = IV_SUCCESS; WORD32 header_data_left = 0,frame_data_left = 0; UWORD8 *pu1_bitstrm_buf; ivd_video_decode_ip_t *ps_dec_ip; ivd_video_decode_op_t *ps_dec_op; ithread_set_name((void*)"Parse_thread"); ps_dec_ip = (ivd_video_decode_ip_t *)pv_api_ip; ps_dec_op = (ivd_video_decode_op_t *)pv_api_op; { UWORD32 u4_size; u4_size = ps_dec_op->u4_size; memset(ps_dec_op, 0, sizeof(ivd_video_decode_op_t)); ps_dec_op->u4_size = u4_size; } ps_dec->pv_dec_out = ps_dec_op; if(ps_dec->init_done != 1) { return IV_FAIL; } /*Data memory barries instruction,so that bitstream write by the application is complete*/ DATA_SYNC(); if(0 == ps_dec->u1_flushfrm) { if(ps_dec_ip->pv_stream_buffer == NULL) { ps_dec_op->u4_error_code |= 1 << IVD_UNSUPPORTEDPARAM; ps_dec_op->u4_error_code |= IVD_DEC_FRM_BS_BUF_NULL; return IV_FAIL; } if(ps_dec_ip->u4_num_Bytes <= 0) { ps_dec_op->u4_error_code |= 1 << IVD_UNSUPPORTEDPARAM; ps_dec_op->u4_error_code |= IVD_DEC_NUMBYTES_INV; return IV_FAIL; } } ps_dec->u1_pic_decode_done = 0; ps_dec_op->u4_num_bytes_consumed = 0; ps_dec->ps_out_buffer = NULL; if(ps_dec_ip->u4_size >= offsetof(ivd_video_decode_ip_t, s_out_buffer)) ps_dec->ps_out_buffer = &ps_dec_ip->s_out_buffer; ps_dec->u4_fmt_conv_cur_row = 0; ps_dec->u4_output_present = 0; ps_dec->s_disp_op.u4_error_code = 1; ps_dec->u4_fmt_conv_num_rows = FMT_CONV_NUM_ROWS; if(0 == ps_dec->u4_share_disp_buf && ps_dec->i4_decode_header == 0) { UWORD32 i; if((ps_dec->ps_out_buffer->u4_num_bufs == 0) || (ps_dec->ps_out_buffer->u4_num_bufs > IVD_VIDDEC_MAX_IO_BUFFERS)) { ps_dec_op->u4_error_code |= 1 << IVD_UNSUPPORTEDPARAM; ps_dec_op->u4_error_code |= IVD_DISP_FRM_ZERO_OP_BUFS; return IV_FAIL; } for(i = 0; i < ps_dec->ps_out_buffer->u4_num_bufs; i++) { if(ps_dec->ps_out_buffer->pu1_bufs[i] == NULL) { ps_dec_op->u4_error_code |= 1 << IVD_UNSUPPORTEDPARAM; ps_dec_op->u4_error_code |= IVD_DISP_FRM_OP_BUF_NULL; return IV_FAIL; } if(ps_dec->ps_out_buffer->u4_min_out_buf_size[i] == 0) { ps_dec_op->u4_error_code |= 1 << IVD_UNSUPPORTEDPARAM; ps_dec_op->u4_error_code |= IVD_DISP_FRM_ZERO_OP_BUF_SIZE; return IV_FAIL; } } } if(ps_dec->u4_total_frames_decoded >= NUM_FRAMES_LIMIT) { ps_dec_op->u4_error_code = ERROR_FRAME_LIMIT_OVER; return IV_FAIL; } /* ! */ ps_dec->u4_ts = ps_dec_ip->u4_ts; ps_dec_op->u4_error_code = 0; ps_dec_op->e_pic_type = -1; ps_dec_op->u4_output_present = 0; ps_dec_op->u4_frame_decoded_flag = 0; ps_dec->i4_frametype = -1; ps_dec->i4_content_type = -1; ps_dec->u4_slice_start_code_found = 0; /* In case the deocder is not in flush mode(in shared mode), then decoder has to pick up a buffer to write current frame. Check if a frame is available in such cases */ if(ps_dec->u1_init_dec_flag == 1 && ps_dec->u4_share_disp_buf == 1 && ps_dec->u1_flushfrm == 0) { UWORD32 i; WORD32 disp_avail = 0, free_id; /* Check if at least one buffer is available with the codec */ /* If not then return to application with error */ for(i = 0; i < ps_dec->u1_pic_bufs; i++) { if(0 == ps_dec->u4_disp_buf_mapping[i] || 1 == ps_dec->u4_disp_buf_to_be_freed[i]) { disp_avail = 1; break; } } if(0 == disp_avail) { /* If something is queued for display wait for that buffer to be returned */ ps_dec_op->u4_error_code = IVD_DEC_REF_BUF_NULL; ps_dec_op->u4_error_code |= (1 << IVD_UNSUPPORTEDPARAM); return (IV_FAIL); } while(1) { pic_buffer_t *ps_pic_buf; ps_pic_buf = (pic_buffer_t *)ih264_buf_mgr_get_next_free( (buf_mgr_t *)ps_dec->pv_pic_buf_mgr, &free_id); if(ps_pic_buf == NULL) { UWORD32 i, display_queued = 0; /* check if any buffer was given for display which is not returned yet */ for(i = 0; i < (MAX_DISP_BUFS_NEW); i++) { if(0 != ps_dec->u4_disp_buf_mapping[i]) { display_queued = 1; break; } } /* If some buffer is queued for display, then codec has to singal an error and wait for that buffer to be returned. If nothing is queued for display then codec has ownership of all display buffers and it can reuse any of the existing buffers and continue decoding */ if(1 == display_queued) { /* If something is queued for display wait for that buffer to be returned */ ps_dec_op->u4_error_code = IVD_DEC_REF_BUF_NULL; ps_dec_op->u4_error_code |= (1 << IVD_UNSUPPORTEDPARAM); return (IV_FAIL); } } else { /* If the buffer is with display, then mark it as in use and then look for a buffer again */ if(1 == ps_dec->u4_disp_buf_mapping[free_id]) { ih264_buf_mgr_set_status( (buf_mgr_t *)ps_dec->pv_pic_buf_mgr, free_id, BUF_MGR_IO); } else { /** * Found a free buffer for present call. Release it now. * Will be again obtained later. */ ih264_buf_mgr_release((buf_mgr_t *)ps_dec->pv_pic_buf_mgr, free_id, BUF_MGR_IO); break; } } } } if(ps_dec->u1_flushfrm) { if(ps_dec->u1_init_dec_flag == 0) { /*Come out of flush mode and return*/ ps_dec->u1_flushfrm = 0; return (IV_FAIL); } ih264d_get_next_display_field(ps_dec, ps_dec->ps_out_buffer, &(ps_dec->s_disp_op)); if(0 == ps_dec->s_disp_op.u4_error_code) { /* check output buffer size given by the application */ if(check_app_out_buf_size(ps_dec) != IV_SUCCESS) { ps_dec_op->u4_error_code= IVD_DISP_FRM_ZERO_OP_BUF_SIZE; return (IV_FAIL); } ps_dec->u4_fmt_conv_cur_row = 0; ps_dec->u4_fmt_conv_num_rows = ps_dec->s_disp_frame_info.u4_y_ht; ih264d_format_convert(ps_dec, &(ps_dec->s_disp_op), ps_dec->u4_fmt_conv_cur_row, ps_dec->u4_fmt_conv_num_rows); ps_dec->u4_fmt_conv_cur_row += ps_dec->u4_fmt_conv_num_rows; ps_dec->u4_output_present = 1; } ih264d_release_display_field(ps_dec, &(ps_dec->s_disp_op)); ps_dec_op->u4_pic_wd = (UWORD32)ps_dec->u2_disp_width; ps_dec_op->u4_pic_ht = (UWORD32)ps_dec->u2_disp_height; ps_dec_op->u4_new_seq = 0; ps_dec_op->u4_output_present = ps_dec->u4_output_present; ps_dec_op->u4_progressive_frame_flag = ps_dec->s_disp_op.u4_progressive_frame_flag; ps_dec_op->e_output_format = ps_dec->s_disp_op.e_output_format; ps_dec_op->s_disp_frm_buf = ps_dec->s_disp_op.s_disp_frm_buf; ps_dec_op->e4_fld_type = ps_dec->s_disp_op.e4_fld_type; ps_dec_op->u4_ts = ps_dec->s_disp_op.u4_ts; ps_dec_op->u4_disp_buf_id = ps_dec->s_disp_op.u4_disp_buf_id; /*In the case of flush ,since no frame is decoded set pic type as invalid*/ ps_dec_op->u4_is_ref_flag = -1; ps_dec_op->e_pic_type = IV_NA_FRAME; ps_dec_op->u4_frame_decoded_flag = 0; if(0 == ps_dec->s_disp_op.u4_error_code) { return (IV_SUCCESS); } else return (IV_FAIL); } if(ps_dec->u1_res_changed == 1) { /*if resolution has changed and all buffers have been flushed, reset decoder*/ ih264d_init_decoder(ps_dec); } ps_dec->u4_prev_nal_skipped = 0; ps_dec->u2_cur_mb_addr = 0; ps_dec->u2_total_mbs_coded = 0; ps_dec->u2_cur_slice_num = 0; ps_dec->cur_dec_mb_num = 0; ps_dec->cur_recon_mb_num = 0; ps_dec->u4_first_slice_in_pic = 1; ps_dec->u1_slice_header_done = 0; ps_dec->u1_dangling_field = 0; ps_dec->u4_dec_thread_created = 0; ps_dec->u4_bs_deblk_thread_created = 0; ps_dec->u4_cur_bs_mb_num = 0; ps_dec->u4_start_recon_deblk = 0; ps_dec->u4_sps_cnt_in_process = 0; DEBUG_THREADS_PRINTF(" Starting process call\n"); ps_dec->u4_pic_buf_got = 0; do { WORD32 buf_size; pu1_buf = (UWORD8*)ps_dec_ip->pv_stream_buffer + ps_dec_op->u4_num_bytes_consumed; u4_max_ofst = ps_dec_ip->u4_num_Bytes - ps_dec_op->u4_num_bytes_consumed; /* If dynamic bitstream buffer is not allocated and * header decode is done, then allocate dynamic bitstream buffer */ if((NULL == ps_dec->pu1_bits_buf_dynamic) && (ps_dec->i4_header_decoded & 1)) { WORD32 size; void *pv_buf; void *pv_mem_ctxt = ps_dec->pv_mem_ctxt; size = MAX(256000, ps_dec->u2_pic_wd * ps_dec->u2_pic_ht * 3 / 2); pv_buf = ps_dec->pf_aligned_alloc(pv_mem_ctxt, 128, size); RETURN_IF((NULL == pv_buf), IV_FAIL); ps_dec->pu1_bits_buf_dynamic = pv_buf; ps_dec->u4_dynamic_bits_buf_size = size; } if(ps_dec->pu1_bits_buf_dynamic) { pu1_bitstrm_buf = ps_dec->pu1_bits_buf_dynamic; buf_size = ps_dec->u4_dynamic_bits_buf_size; } else { pu1_bitstrm_buf = ps_dec->pu1_bits_buf_static; buf_size = ps_dec->u4_static_bits_buf_size; } u4_next_is_aud = 0; buflen = ih264d_find_start_code(pu1_buf, 0, u4_max_ofst, &u4_length_of_start_code, &u4_next_is_aud); if(buflen == -1) buflen = 0; /* Ignore bytes beyond the allocated size of intermediate buffer */ /* Since 8 bytes are read ahead, ensure 8 bytes are free at the end of the buffer, which will be memset to 0 after emulation prevention */ buflen = MIN(buflen, buf_size - 8); bytes_consumed = buflen + u4_length_of_start_code; ps_dec_op->u4_num_bytes_consumed += bytes_consumed; { UWORD8 u1_firstbyte, u1_nal_ref_idc; if(ps_dec->i4_app_skip_mode == IVD_SKIP_B) { u1_firstbyte = *(pu1_buf + u4_length_of_start_code); u1_nal_ref_idc = (UWORD8)(NAL_REF_IDC(u1_firstbyte)); if(u1_nal_ref_idc == 0) { /*skip non reference frames*/ cur_slice_is_nonref = 1; continue; } else { if(1 == cur_slice_is_nonref) { /*We have encountered a referenced frame,return to app*/ ps_dec_op->u4_num_bytes_consumed -= bytes_consumed; ps_dec_op->e_pic_type = IV_B_FRAME; ps_dec_op->u4_error_code = IVD_DEC_FRM_SKIPPED; ps_dec_op->u4_error_code |= (1 << IVD_UNSUPPORTEDPARAM); ps_dec_op->u4_frame_decoded_flag = 0; ps_dec_op->u4_size = sizeof(ivd_video_decode_op_t); /*signal the decode thread*/ ih264d_signal_decode_thread(ps_dec); /* close deblock thread if it is not closed yet*/ if(ps_dec->u4_num_cores == 3) { ih264d_signal_bs_deblk_thread(ps_dec); } return (IV_FAIL); } } } } if(buflen) { memcpy(pu1_bitstrm_buf, pu1_buf + u4_length_of_start_code, buflen); /* Decoder may read extra 8 bytes near end of the frame */ if((buflen + 8) < buf_size) { memset(pu1_bitstrm_buf + buflen, 0, 8); } u4_first_start_code_found = 1; } else { /*start code not found*/ if(u4_first_start_code_found == 0) { /*no start codes found in current process call*/ ps_dec->i4_error_code = ERROR_START_CODE_NOT_FOUND; ps_dec_op->u4_error_code |= 1 << IVD_INSUFFICIENTDATA; if(ps_dec->u4_pic_buf_got == 0) { ih264d_fill_output_struct_from_context(ps_dec, ps_dec_op); ps_dec_op->u4_error_code = ps_dec->i4_error_code; ps_dec_op->u4_frame_decoded_flag = 0; return (IV_FAIL); } else { ps_dec->u1_pic_decode_done = 1; continue; } } else { /* a start code has already been found earlier in the same process call*/ frame_data_left = 0; header_data_left = 0; continue; } } ps_dec->u4_return_to_app = 0; ret = ih264d_parse_nal_unit(dec_hdl, ps_dec_op, pu1_bitstrm_buf, buflen); if(ret != OK) { UWORD32 error = ih264d_map_error(ret); ps_dec_op->u4_error_code = error | ret; api_ret_value = IV_FAIL; if((ret == IVD_RES_CHANGED) || (ret == IVD_MEM_ALLOC_FAILED) || (ret == ERROR_UNAVAIL_PICBUF_T) || (ret == ERROR_UNAVAIL_MVBUF_T) || (ret == ERROR_INV_SPS_PPS_T) || (ret == IVD_DISP_FRM_ZERO_OP_BUF_SIZE)) { ps_dec->u4_slice_start_code_found = 0; break; } if((ret == ERROR_INCOMPLETE_FRAME) || (ret == ERROR_DANGLING_FIELD_IN_PIC)) { ps_dec_op->u4_num_bytes_consumed -= bytes_consumed; api_ret_value = IV_FAIL; break; } if(ret == ERROR_IN_LAST_SLICE_OF_PIC) { api_ret_value = IV_FAIL; break; } } if(ps_dec->u4_return_to_app) { /*We have encountered a referenced frame,return to app*/ ps_dec_op->u4_num_bytes_consumed -= bytes_consumed; ps_dec_op->u4_error_code = IVD_DEC_FRM_SKIPPED; ps_dec_op->u4_error_code |= (1 << IVD_UNSUPPORTEDPARAM); ps_dec_op->u4_frame_decoded_flag = 0; ps_dec_op->u4_size = sizeof(ivd_video_decode_op_t); /*signal the decode thread*/ ih264d_signal_decode_thread(ps_dec); /* close deblock thread if it is not closed yet*/ if(ps_dec->u4_num_cores == 3) { ih264d_signal_bs_deblk_thread(ps_dec); } return (IV_FAIL); } header_data_left = ((ps_dec->i4_decode_header == 1) && (ps_dec->i4_header_decoded != 3) && (ps_dec_op->u4_num_bytes_consumed < ps_dec_ip->u4_num_Bytes)); frame_data_left = (((ps_dec->i4_decode_header == 0) && ((ps_dec->u1_pic_decode_done == 0) || (u4_next_is_aud == 1))) && (ps_dec_op->u4_num_bytes_consumed < ps_dec_ip->u4_num_Bytes)); } while(( header_data_left == 1)||(frame_data_left == 1)); if((ps_dec->u4_pic_buf_got == 1) && (ret != IVD_MEM_ALLOC_FAILED) && ps_dec->u2_total_mbs_coded < ps_dec->u2_frm_ht_in_mbs * ps_dec->u2_frm_wd_in_mbs) { // last slice - missing/corruption WORD32 num_mb_skipped; WORD32 prev_slice_err; pocstruct_t temp_poc; WORD32 ret1; WORD32 ht_in_mbs; ht_in_mbs = ps_dec->u2_pic_ht >> (4 + ps_dec->ps_cur_slice->u1_field_pic_flag); num_mb_skipped = (ht_in_mbs * ps_dec->u2_frm_wd_in_mbs) - ps_dec->u2_total_mbs_coded; if(ps_dec->u4_first_slice_in_pic && (ps_dec->u4_pic_buf_got == 0)) prev_slice_err = 1; else prev_slice_err = 2; if(ps_dec->u4_first_slice_in_pic && (ps_dec->u2_total_mbs_coded == 0)) prev_slice_err = 1; ret1 = ih264d_mark_err_slice_skip(ps_dec, num_mb_skipped, ps_dec->u1_nal_unit_type == IDR_SLICE_NAL, ps_dec->ps_cur_slice->u2_frame_num, &temp_poc, prev_slice_err); if((ret1 == ERROR_UNAVAIL_PICBUF_T) || (ret1 == ERROR_UNAVAIL_MVBUF_T) || (ret1 == ERROR_INV_SPS_PPS_T)) { ret = ret1; } } if((ret == IVD_RES_CHANGED) || (ret == IVD_MEM_ALLOC_FAILED) || (ret == ERROR_UNAVAIL_PICBUF_T) || (ret == ERROR_UNAVAIL_MVBUF_T) || (ret == ERROR_INV_SPS_PPS_T)) { /* signal the decode thread */ ih264d_signal_decode_thread(ps_dec); /* close deblock thread if it is not closed yet */ if(ps_dec->u4_num_cores == 3) { ih264d_signal_bs_deblk_thread(ps_dec); } /* dont consume bitstream for change in resolution case */ if(ret == IVD_RES_CHANGED) { ps_dec_op->u4_num_bytes_consumed -= bytes_consumed; } return IV_FAIL; } if(ps_dec->u1_separate_parse) { /* If Format conversion is not complete, complete it here */ if(ps_dec->u4_num_cores == 2) { /*do deblocking of all mbs*/ if((ps_dec->u4_nmb_deblk == 0) &&(ps_dec->u4_start_recon_deblk == 1) && (ps_dec->ps_cur_sps->u1_mb_aff_flag == 0)) { UWORD32 u4_num_mbs,u4_max_addr; tfr_ctxt_t s_tfr_ctxt; tfr_ctxt_t *ps_tfr_cxt = &s_tfr_ctxt; pad_mgr_t *ps_pad_mgr = &ps_dec->s_pad_mgr; /*BS is done for all mbs while parsing*/ u4_max_addr = (ps_dec->u2_frm_wd_in_mbs * ps_dec->u2_frm_ht_in_mbs) - 1; ps_dec->u4_cur_bs_mb_num = u4_max_addr + 1; ih264d_init_deblk_tfr_ctxt(ps_dec, ps_pad_mgr, ps_tfr_cxt, ps_dec->u2_frm_wd_in_mbs, 0); u4_num_mbs = u4_max_addr - ps_dec->u4_cur_deblk_mb_num + 1; DEBUG_PERF_PRINTF("mbs left for deblocking= %d \n",u4_num_mbs); if(u4_num_mbs != 0) ih264d_check_mb_map_deblk(ps_dec, u4_num_mbs, ps_tfr_cxt,1); ps_dec->u4_start_recon_deblk = 0; } } /*signal the decode thread*/ ih264d_signal_decode_thread(ps_dec); /* close deblock thread if it is not closed yet*/ if(ps_dec->u4_num_cores == 3) { ih264d_signal_bs_deblk_thread(ps_dec); } } DATA_SYNC(); if((ps_dec_op->u4_error_code & 0xff) != ERROR_DYNAMIC_RESOLUTION_NOT_SUPPORTED) { ps_dec_op->u4_pic_wd = (UWORD32)ps_dec->u2_disp_width; ps_dec_op->u4_pic_ht = (UWORD32)ps_dec->u2_disp_height; } //Report if header (sps and pps) has not been decoded yet if(ps_dec->i4_header_decoded != 3) { ps_dec_op->u4_error_code |= (1 << IVD_INSUFFICIENTDATA); } if(ps_dec->i4_decode_header == 1 && ps_dec->i4_header_decoded != 3) { ps_dec_op->u4_error_code |= (1 << IVD_INSUFFICIENTDATA); } if(ps_dec->u4_prev_nal_skipped) { /*We have encountered a referenced frame,return to app*/ ps_dec_op->u4_error_code = IVD_DEC_FRM_SKIPPED; ps_dec_op->u4_error_code |= (1 << IVD_UNSUPPORTEDPARAM); ps_dec_op->u4_frame_decoded_flag = 0; ps_dec_op->u4_size = sizeof(ivd_video_decode_op_t); /* close deblock thread if it is not closed yet*/ if(ps_dec->u4_num_cores == 3) { ih264d_signal_bs_deblk_thread(ps_dec); } return (IV_FAIL); } if((ps_dec->u4_pic_buf_got == 1) && (ERROR_DANGLING_FIELD_IN_PIC != i4_err_status)) { /* * For field pictures, set the bottom and top picture decoded u4_flag correctly. */ if(ps_dec->ps_cur_slice->u1_field_pic_flag) { if(1 == ps_dec->ps_cur_slice->u1_bottom_field_flag) { ps_dec->u1_top_bottom_decoded |= BOT_FIELD_ONLY; } else { ps_dec->u1_top_bottom_decoded |= TOP_FIELD_ONLY; } } else { ps_dec->u1_top_bottom_decoded = TOP_FIELD_ONLY | BOT_FIELD_ONLY; } /* if new frame in not found (if we are still getting slices from previous frame) * ih264d_deblock_display is not called. Such frames will not be added to reference /display */ if ((ps_dec->ps_dec_err_status->u1_err_flag & REJECT_CUR_PIC) == 0) { /* Calling Function to deblock Picture and Display */ ret = ih264d_deblock_display(ps_dec); } /*set to complete ,as we dont support partial frame decode*/ if(ps_dec->i4_header_decoded == 3) { ps_dec->u2_total_mbs_coded = ps_dec->ps_cur_sps->u2_max_mb_addr + 1; } /*Update the i4_frametype at the end of picture*/ if(ps_dec->ps_cur_slice->u1_nal_unit_type == IDR_SLICE_NAL) { ps_dec->i4_frametype = IV_IDR_FRAME; } else if(ps_dec->i4_pic_type == B_SLICE) { ps_dec->i4_frametype = IV_B_FRAME; } else if(ps_dec->i4_pic_type == P_SLICE) { ps_dec->i4_frametype = IV_P_FRAME; } else if(ps_dec->i4_pic_type == I_SLICE) { ps_dec->i4_frametype = IV_I_FRAME; } else { H264_DEC_DEBUG_PRINT("Shouldn't come here\n"); } //Update the content type ps_dec->i4_content_type = ps_dec->ps_cur_slice->u1_field_pic_flag; ps_dec->u4_total_frames_decoded = ps_dec->u4_total_frames_decoded + 2; ps_dec->u4_total_frames_decoded = ps_dec->u4_total_frames_decoded - ps_dec->ps_cur_slice->u1_field_pic_flag; } /* close deblock thread if it is not closed yet*/ if(ps_dec->u4_num_cores == 3) { ih264d_signal_bs_deblk_thread(ps_dec); } { /* In case the decoder is configured to run in low delay mode, * then get display buffer and then format convert. * Note in this mode, format conversion does not run paralelly in a thread and adds to the codec cycles */ if((IVD_DECODE_FRAME_OUT == ps_dec->e_frm_out_mode) && ps_dec->u1_init_dec_flag) { ih264d_get_next_display_field(ps_dec, ps_dec->ps_out_buffer, &(ps_dec->s_disp_op)); if(0 == ps_dec->s_disp_op.u4_error_code) { ps_dec->u4_fmt_conv_cur_row = 0; ps_dec->u4_output_present = 1; } } ih264d_fill_output_struct_from_context(ps_dec, ps_dec_op); /* If Format conversion is not complete, complete it here */ if(ps_dec->u4_output_present && (ps_dec->u4_fmt_conv_cur_row < ps_dec->s_disp_frame_info.u4_y_ht)) { ps_dec->u4_fmt_conv_num_rows = ps_dec->s_disp_frame_info.u4_y_ht - ps_dec->u4_fmt_conv_cur_row; ih264d_format_convert(ps_dec, &(ps_dec->s_disp_op), ps_dec->u4_fmt_conv_cur_row, ps_dec->u4_fmt_conv_num_rows); ps_dec->u4_fmt_conv_cur_row += ps_dec->u4_fmt_conv_num_rows; } ih264d_release_display_field(ps_dec, &(ps_dec->s_disp_op)); } if(ps_dec->i4_decode_header == 1 && (ps_dec->i4_header_decoded & 1) == 1) { ps_dec_op->u4_progressive_frame_flag = 1; if((NULL != ps_dec->ps_cur_sps) && (1 == (ps_dec->ps_cur_sps->u1_is_valid))) { if((0 == ps_dec->ps_sps->u1_frame_mbs_only_flag) && (0 == ps_dec->ps_sps->u1_mb_aff_flag)) ps_dec_op->u4_progressive_frame_flag = 0; } } if((TOP_FIELD_ONLY | BOT_FIELD_ONLY) == ps_dec->u1_top_bottom_decoded) { ps_dec->u1_top_bottom_decoded = 0; } /*--------------------------------------------------------------------*/ /* Do End of Pic processing. */ /* Should be called only if frame was decoded in previous process call*/ /*--------------------------------------------------------------------*/ if(ps_dec->u4_pic_buf_got == 1) { if(1 == ps_dec->u1_last_pic_not_decoded) { ret = ih264d_end_of_pic_dispbuf_mgr(ps_dec); if(ret != OK) return ret; ret = ih264d_end_of_pic(ps_dec); if(ret != OK) return ret; } else { ret = ih264d_end_of_pic(ps_dec); if(ret != OK) return ret; } } /*Data memory barrier instruction,so that yuv write by the library is complete*/ DATA_SYNC(); H264_DEC_DEBUG_PRINT("The num bytes consumed: %d\n", ps_dec_op->u4_num_bytes_consumed); return api_ret_value; }
13,207
123,210
0
void RenderWidgetHostViewAura::OnBlur() { host_->SetActive(false); host_->Blur(); DetachFromInputMethod(); host_->SetInputMethodActive(false); if (is_fullscreen_ && !in_shutdown_) { in_shutdown_ = true; host_->Shutdown(); } }
13,208
22,101
0
NORET_TYPE void complete_and_exit(struct completion *comp, long code) { if (comp) complete(comp); do_exit(code); }
13,209
132,736
0
void ChromotingInstance::InjectClipboardEvent( const protocol::ClipboardEvent& event) { scoped_ptr<base::DictionaryValue> data(new base::DictionaryValue()); data->SetString("mimeType", event.mime_type()); data->SetString("item", event.data()); PostLegacyJsonMessage("injectClipboardItem", data.Pass()); }
13,210
150,589
0
~DataReductionProxyConfigServiceClient() { network_connection_tracker_->RemoveNetworkConnectionObserver(this); }
13,211
10,624
0
Ins_MD( TT_ExecContext exc, FT_Long* args ) { FT_UShort K, L; FT_F26Dot6 D; K = (FT_UShort)args[1]; L = (FT_UShort)args[0]; if ( BOUNDS( L, exc->zp0.n_points ) || BOUNDS( K, exc->zp1.n_points ) ) { if ( exc->pedantic_hinting ) exc->error = FT_THROW( Invalid_Reference ); D = 0; } else { if ( exc->opcode & 1 ) D = PROJECT( exc->zp0.cur + L, exc->zp1.cur + K ); else { /* XXX: UNDOCUMENTED: twilight zone special case */ if ( exc->GS.gep0 == 0 || exc->GS.gep1 == 0 ) { FT_Vector* vec1 = exc->zp0.org + L; FT_Vector* vec2 = exc->zp1.org + K; D = DUALPROJ( vec1, vec2 ); } else { FT_Vector* vec1 = exc->zp0.orus + L; FT_Vector* vec2 = exc->zp1.orus + K; if ( exc->metrics.x_scale == exc->metrics.y_scale ) { /* this should be faster */ D = DUALPROJ( vec1, vec2 ); D = FT_MulFix( D, exc->metrics.x_scale ); } else { FT_Vector vec; vec.x = FT_MulFix( vec1->x - vec2->x, exc->metrics.x_scale ); vec.y = FT_MulFix( vec1->y - vec2->y, exc->metrics.y_scale ); D = FAST_DUALPROJ( &vec ); } } } } #ifdef TT_SUPPORT_SUBPIXEL_HINTING_INFINALITY /* Disable Type 2 Vacuform Rounds - e.g. Arial Narrow */ if ( SUBPIXEL_HINTING_INFINALITY && exc->ignore_x_mode && FT_ABS( D ) == 64 ) D += 1; #endif /* TT_SUPPORT_SUBPIXEL_HINTING_INFINALITY */ args[0] = D; }
13,212
175,830
0
static int swap_frame_buffers (VP8_COMMON *cm) { int err = 0; /* The alternate reference frame or golden frame can be updated * using the new, last, or golden/alt ref frame. If it * is updated using the newly decoded frame it is a refresh. * An update using the last or golden/alt ref frame is a copy. */ if (cm->copy_buffer_to_arf) { int new_fb = 0; if (cm->copy_buffer_to_arf == 1) new_fb = cm->lst_fb_idx; else if (cm->copy_buffer_to_arf == 2) new_fb = cm->gld_fb_idx; else err = -1; ref_cnt_fb (cm->fb_idx_ref_cnt, &cm->alt_fb_idx, new_fb); } if (cm->copy_buffer_to_gf) { int new_fb = 0; if (cm->copy_buffer_to_gf == 1) new_fb = cm->lst_fb_idx; else if (cm->copy_buffer_to_gf == 2) new_fb = cm->alt_fb_idx; else err = -1; ref_cnt_fb (cm->fb_idx_ref_cnt, &cm->gld_fb_idx, new_fb); } if (cm->refresh_golden_frame) ref_cnt_fb (cm->fb_idx_ref_cnt, &cm->gld_fb_idx, cm->new_fb_idx); if (cm->refresh_alt_ref_frame) ref_cnt_fb (cm->fb_idx_ref_cnt, &cm->alt_fb_idx, cm->new_fb_idx); if (cm->refresh_last_frame) { ref_cnt_fb (cm->fb_idx_ref_cnt, &cm->lst_fb_idx, cm->new_fb_idx); cm->frame_to_show = &cm->yv12_fb[cm->lst_fb_idx]; } else cm->frame_to_show = &cm->yv12_fb[cm->new_fb_idx]; cm->fb_idx_ref_cnt[cm->new_fb_idx]--; return err; }
13,213
178,624
1
static gboolean nbd_negotiate_continue(QIOChannel *ioc, GIOCondition condition, void *opaque) { qemu_coroutine_enter(opaque); return TRUE; }
13,214
19,018
0
static __sum16 tcp_v4_checksum_init(struct sk_buff *skb) { const struct iphdr *iph = ip_hdr(skb); if (skb->ip_summed == CHECKSUM_COMPLETE) { if (!tcp_v4_check(skb->len, iph->saddr, iph->daddr, skb->csum)) { skb->ip_summed = CHECKSUM_UNNECESSARY; return 0; } } skb->csum = csum_tcpudp_nofold(iph->saddr, iph->daddr, skb->len, IPPROTO_TCP, 0); if (skb->len <= 76) { return __skb_checksum_complete(skb); } return 0; }
13,215
25,113
0
struct rtable *ip_route_output_flow(struct net *net, struct flowi4 *flp4, struct sock *sk) { struct rtable *rt = __ip_route_output_key(net, flp4); if (IS_ERR(rt)) return rt; if (flp4->flowi4_proto) rt = (struct rtable *) xfrm_lookup(net, &rt->dst, flowi4_to_flowi(flp4), sk, 0); return rt; }
13,216
173,313
0
rechunk_length(struct IDAT *idat) /* Return the length for the next IDAT chunk, taking into account * rechunking. */ { png_uint_32 len = idat->global->idat_max; if (len == 0) /* use original chunk lengths */ { const struct IDAT_list *cur; unsigned int count; if (idat->idat_index == 0) /* at the new chunk (first time) */ return idat->idat_length; /* use the cache */ /* Otherwise rechunk_length is called at the end of a chunk for the length * of the next one. */ cur = idat->idat_cur; count = idat->idat_count; assert(idat->idat_index == idat->idat_length && idat->idat_length == cur->lengths[count]); /* Return length of the *next* chunk */ if (++count < cur->count) return cur->lengths[count]; /* End of this list */ assert(cur != idat->idat_list_tail); cur = cur->next; assert(cur != NULL && cur->count > 0); return cur->lengths[0]; } else /* rechunking */ { /* The chunk size is the lesser of file->idat_max and the number * of remaining bytes. */ png_uint_32 have = idat->idat_length - idat->idat_index; if (len > have) { struct IDAT_list *cur = idat->idat_cur; unsigned int j = idat->idat_count+1; /* the next IDAT in the list */ do { /* Add up the remaining bytes. This can't overflow because the * individual lengths are always <= 0x7fffffff, so when we add two * of them overflow is not possible. */ assert(cur != NULL); for (;;) { /* NOTE: IDAT_list::count here, not IDAT_list::length */ for (; j < cur->count; ++j) { have += cur->lengths[j]; if (len <= have) return len; } /* If this was the end return the count of the available bytes */ if (cur == idat->idat_list_tail) return have; cur = cur->next; j = 0; } } while (len > have); } return len; } }
13,217
118,428
0
void RenderFrameImpl::OnCSSInsertRequest(const std::string& css) { frame_->document().insertStyleSheet(WebString::fromUTF8(css)); }
13,218
40,774
0
void x25_destroy_socket_from_timer(struct sock *sk) { sock_hold(sk); bh_lock_sock(sk); __x25_destroy_socket(sk); bh_unlock_sock(sk); sock_put(sk); }
13,219
23,174
0
static int _nfs4_proc_open(struct nfs4_opendata *data) { struct inode *dir = data->dir->d_inode; struct nfs_server *server = NFS_SERVER(dir); struct nfs_openargs *o_arg = &data->o_arg; struct nfs_openres *o_res = &data->o_res; int status; status = nfs4_run_open_task(data, 0); if (!data->rpc_done) return status; if (status != 0) { if (status == -NFS4ERR_BADNAME && !(o_arg->open_flags & O_CREAT)) return -ENOENT; return status; } if (o_arg->open_flags & O_CREAT) { update_changeattr(dir, &o_res->cinfo); nfs_post_op_update_inode(dir, o_res->dir_attr); } else nfs_refresh_inode(dir, o_res->dir_attr); if ((o_res->rflags & NFS4_OPEN_RESULT_LOCKTYPE_POSIX) == 0) server->caps &= ~NFS_CAP_POSIX_LOCK; if(o_res->rflags & NFS4_OPEN_RESULT_CONFIRM) { status = _nfs4_proc_open_confirm(data); if (status != 0) return status; } if (!(o_res->f_attr->valid & NFS_ATTR_FATTR)) _nfs4_proc_getattr(server, &o_res->fh, o_res->f_attr); return 0; }
13,220
6,852
0
void http_resync_states(struct stream *s) { struct http_txn *txn = s->txn; #ifdef DEBUG_FULL int old_req_state = txn->req.msg_state; int old_res_state = txn->rsp.msg_state; #endif http_sync_req_state(s); while (1) { if (!http_sync_res_state(s)) break; if (!http_sync_req_state(s)) break; } DPRINTF(stderr,"[%u] %s: stream=%p old=%s,%s cur=%s,%s " "req->analysers=0x%08x res->analysers=0x%08x\n", now_ms, __FUNCTION__, s, h1_msg_state_str(old_req_state), h1_msg_state_str(old_res_state), h1_msg_state_str(txn->req.msg_state), h1_msg_state_str(txn->rsp.msg_state), s->req.analysers, s->res.analysers); /* OK, both state machines agree on a compatible state. * There are a few cases we're interested in : * - HTTP_MSG_CLOSED on both sides means we've reached the end in both * directions, so let's simply disable both analysers. * - HTTP_MSG_CLOSED on the response only or HTTP_MSG_ERROR on either * means we must abort the request. * - HTTP_MSG_TUNNEL on either means we have to disable analyser on * corresponding channel. * - HTTP_MSG_DONE or HTTP_MSG_CLOSED on the request and HTTP_MSG_DONE * on the response with server-close mode means we've completed one * request and we must re-initialize the server connection. */ if (txn->req.msg_state == HTTP_MSG_CLOSED && txn->rsp.msg_state == HTTP_MSG_CLOSED) { s->req.analysers &= AN_REQ_FLT_END; channel_auto_close(&s->req); channel_auto_read(&s->req); s->res.analysers &= AN_RES_FLT_END; channel_auto_close(&s->res); channel_auto_read(&s->res); } else if (txn->rsp.msg_state == HTTP_MSG_CLOSED || txn->rsp.msg_state == HTTP_MSG_ERROR || txn->req.msg_state == HTTP_MSG_ERROR) { s->res.analysers &= AN_RES_FLT_END; channel_auto_close(&s->res); channel_auto_read(&s->res); s->req.analysers &= AN_REQ_FLT_END; channel_abort(&s->req); channel_auto_close(&s->req); channel_auto_read(&s->req); channel_truncate(&s->req); } else if (txn->req.msg_state == HTTP_MSG_TUNNEL || txn->rsp.msg_state == HTTP_MSG_TUNNEL) { if (txn->req.msg_state == HTTP_MSG_TUNNEL) { s->req.analysers &= AN_REQ_FLT_END; if (HAS_REQ_DATA_FILTERS(s)) s->req.analysers |= AN_REQ_FLT_XFER_DATA; } if (txn->rsp.msg_state == HTTP_MSG_TUNNEL) { s->res.analysers &= AN_RES_FLT_END; if (HAS_RSP_DATA_FILTERS(s)) s->res.analysers |= AN_RES_FLT_XFER_DATA; } channel_auto_close(&s->req); channel_auto_read(&s->req); channel_auto_close(&s->res); channel_auto_read(&s->res); } else if ((txn->req.msg_state == HTTP_MSG_DONE || txn->req.msg_state == HTTP_MSG_CLOSED) && txn->rsp.msg_state == HTTP_MSG_DONE && ((txn->flags & TX_CON_WANT_MSK) == TX_CON_WANT_SCL || (txn->flags & TX_CON_WANT_MSK) == TX_CON_WANT_KAL)) { /* server-close/keep-alive: terminate this transaction, * possibly killing the server connection and reinitialize * a fresh-new transaction, but only once we're sure there's * enough room in the request and response buffer to process * another request. They must not hold any pending output data * and the response buffer must realigned * (realign is done is http_end_txn_clean_session). */ if (s->req.buf->o) s->req.flags |= CF_WAKE_WRITE; else if (s->res.buf->o) s->res.flags |= CF_WAKE_WRITE; else { s->req.analysers = AN_REQ_FLT_END; s->res.analysers = AN_RES_FLT_END; txn->flags |= TX_WAIT_CLEANUP; } } }
13,221
64,687
0
set_bm_backward_skip(UChar* s, UChar* end, OnigEncoding enc ARG_UNUSED, int** skip) { int i, len; if (IS_NULL(*skip)) { *skip = (int* )xmalloc(sizeof(int) * ONIG_CHAR_TABLE_SIZE); if (IS_NULL(*skip)) return ONIGERR_MEMORY; } len = end - s; for (i = 0; i < ONIG_CHAR_TABLE_SIZE; i++) (*skip)[i] = len; for (i = len - 1; i > 0; i--) (*skip)[s[i]] = i; return 0; }
13,222
115,741
0
void ConnectionToClient::set_input_stub(protocol::InputStub* input_stub) { input_stub_ = input_stub; }
13,223
166,232
0
void MediaStreamManager::SetUpRequest(const std::string& label) { DCHECK_CURRENTLY_ON(BrowserThread::IO); DeviceRequest* request = FindRequest(label); if (!request) { DVLOG(1) << "SetUpRequest label " << label << " doesn't exist!!"; return; // This can happen if the request has been canceled. } request->SetAudioType(AdjustAudioStreamTypeBasedOnCommandLineSwitches( request->controls.audio.stream_type)); request->SetVideoType(request->controls.video.stream_type); const bool is_display_capture = request->video_type() == MEDIA_DISPLAY_VIDEO_CAPTURE; if (is_display_capture && !SetUpDisplayCaptureRequest(request)) { FinalizeRequestFailed(label, request, MEDIA_DEVICE_SCREEN_CAPTURE_FAILURE); return; } const bool is_tab_capture = request->audio_type() == MEDIA_GUM_TAB_AUDIO_CAPTURE || request->video_type() == MEDIA_GUM_TAB_VIDEO_CAPTURE; if (is_tab_capture) { if (!SetUpTabCaptureRequest(request, label)) { FinalizeRequestFailed(label, request, MEDIA_DEVICE_TAB_CAPTURE_FAILURE); } return; } const bool is_screen_capture = request->video_type() == MEDIA_GUM_DESKTOP_VIDEO_CAPTURE; if (is_screen_capture && !SetUpScreenCaptureRequest(request)) { FinalizeRequestFailed(label, request, MEDIA_DEVICE_SCREEN_CAPTURE_FAILURE); return; } if (!is_tab_capture && !is_screen_capture && !is_display_capture) { if (IsDeviceMediaType(request->audio_type()) || IsDeviceMediaType(request->video_type())) { StartEnumeration(request, label); return; } if (!SetUpDeviceCaptureRequest(request, MediaDeviceEnumeration())) { FinalizeRequestFailed(label, request, MEDIA_DEVICE_NO_HARDWARE); return; } } ReadOutputParamsAndPostRequestToUI(label, request, MediaDeviceEnumeration()); }
13,224
112,566
0
void Document::setSelectedStylesheetSet(const String& aString) { m_styleSheetCollection->setSelectedStylesheetSetName(aString); styleResolverChanged(DeferRecalcStyle); }
13,225
1,666
0
gx_dc_binary_masked_load(gx_device_color * pdevc, const gs_gstate * pgs, gx_device * dev, gs_color_select_t select) { int code = (*gx_dc_type_data_ht_binary.load) (pdevc, pgs, dev, select); if (code < 0) return code; FINISH_PATTERN_LOAD }
13,226
47,856
0
int __sk_mem_schedule(struct sock *sk, int size, int kind) { struct proto *prot = sk->sk_prot; int amt = sk_mem_pages(size); long allocated; sk->sk_forward_alloc += amt * SK_MEM_QUANTUM; allocated = sk_memory_allocated_add(sk, amt); if (mem_cgroup_sockets_enabled && sk->sk_memcg && !mem_cgroup_charge_skmem(sk->sk_memcg, amt)) goto suppress_allocation; /* Under limit. */ if (allocated <= sk_prot_mem_limits(sk, 0)) { sk_leave_memory_pressure(sk); return 1; } /* Under pressure. */ if (allocated > sk_prot_mem_limits(sk, 1)) sk_enter_memory_pressure(sk); /* Over hard limit. */ if (allocated > sk_prot_mem_limits(sk, 2)) goto suppress_allocation; /* guarantee minimum buffer size under pressure */ if (kind == SK_MEM_RECV) { if (atomic_read(&sk->sk_rmem_alloc) < prot->sysctl_rmem[0]) return 1; } else { /* SK_MEM_SEND */ if (sk->sk_type == SOCK_STREAM) { if (sk->sk_wmem_queued < prot->sysctl_wmem[0]) return 1; } else if (atomic_read(&sk->sk_wmem_alloc) < prot->sysctl_wmem[0]) return 1; } if (sk_has_memory_pressure(sk)) { int alloc; if (!sk_under_memory_pressure(sk)) return 1; alloc = sk_sockets_allocated_read_positive(sk); if (sk_prot_mem_limits(sk, 2) > alloc * sk_mem_pages(sk->sk_wmem_queued + atomic_read(&sk->sk_rmem_alloc) + sk->sk_forward_alloc)) return 1; } suppress_allocation: if (kind == SK_MEM_SEND && sk->sk_type == SOCK_STREAM) { sk_stream_moderate_sndbuf(sk); /* Fail only if socket is _under_ its sndbuf. * In this case we cannot block, so that we have to fail. */ if (sk->sk_wmem_queued + size >= sk->sk_sndbuf) return 1; } trace_sock_exceed_buf_limit(sk, prot, allocated); /* Alas. Undo changes. */ sk->sk_forward_alloc -= amt * SK_MEM_QUANTUM; sk_memory_allocated_sub(sk, amt); if (mem_cgroup_sockets_enabled && sk->sk_memcg) mem_cgroup_uncharge_skmem(sk->sk_memcg, amt); return 0; }
13,227
114,077
0
string16 Accelerator::GetShortcutText() const { int string_id = 0; switch(key_code_) { case ui::VKEY_TAB: string_id = IDS_APP_TAB_KEY; break; case ui::VKEY_RETURN: string_id = IDS_APP_ENTER_KEY; break; case ui::VKEY_ESCAPE: string_id = IDS_APP_ESC_KEY; break; case ui::VKEY_PRIOR: string_id = IDS_APP_PAGEUP_KEY; break; case ui::VKEY_NEXT: string_id = IDS_APP_PAGEDOWN_KEY; break; case ui::VKEY_END: string_id = IDS_APP_END_KEY; break; case ui::VKEY_HOME: string_id = IDS_APP_HOME_KEY; break; case ui::VKEY_INSERT: string_id = IDS_APP_INSERT_KEY; break; case ui::VKEY_DELETE: string_id = IDS_APP_DELETE_KEY; break; case ui::VKEY_LEFT: string_id = IDS_APP_LEFT_ARROW_KEY; break; case ui::VKEY_RIGHT: string_id = IDS_APP_RIGHT_ARROW_KEY; break; case ui::VKEY_BACK: string_id = IDS_APP_BACKSPACE_KEY; break; case ui::VKEY_F1: string_id = IDS_APP_F1_KEY; break; case ui::VKEY_F11: string_id = IDS_APP_F11_KEY; break; default: break; } string16 shortcut; if (!string_id) { #if defined(OS_WIN) wchar_t key; if (key_code_ >= '0' && key_code_ <= '9') key = key_code_; else key = LOWORD(::MapVirtualKeyW(key_code_, MAPVK_VK_TO_CHAR)); shortcut += key; #elif defined(USE_AURA) const uint16 c = GetCharacterFromKeyCode(key_code_, false); if (c != 0) { shortcut += static_cast<string16::value_type>(base::ToUpperASCII(c)); } #elif defined(TOOLKIT_GTK) const gchar* name = NULL; switch (key_code_) { case ui::VKEY_OEM_2: name = static_cast<const gchar*>("/"); break; default: name = gdk_keyval_name(gdk_keyval_to_lower(key_code_)); break; } if (name) { if (name[0] != 0 && name[1] == 0) shortcut += static_cast<string16::value_type>(g_ascii_toupper(name[0])); else shortcut += UTF8ToUTF16(name); } #endif } else { shortcut = l10n_util::GetStringUTF16(string_id); } string16 shortcut_rtl; bool adjust_shortcut_for_rtl = false; if (base::i18n::IsRTL() && shortcut.length() == 1 && !IsAsciiAlpha(shortcut[0]) && !IsAsciiDigit(shortcut[0])) { adjust_shortcut_for_rtl = true; shortcut_rtl.assign(shortcut); } if (IsShiftDown()) shortcut = l10n_util::GetStringFUTF16(IDS_APP_SHIFT_MODIFIER, shortcut); if (IsCtrlDown()) shortcut = l10n_util::GetStringFUTF16(IDS_APP_CONTROL_MODIFIER, shortcut); else if (IsAltDown()) shortcut = l10n_util::GetStringFUTF16(IDS_APP_ALT_MODIFIER, shortcut); if (adjust_shortcut_for_rtl) { int key_length = static_cast<int>(shortcut_rtl.length()); DCHECK_GT(key_length, 0); shortcut_rtl.append(ASCIIToUTF16("+")); shortcut_rtl.append(shortcut, 0, shortcut.length() - key_length - 1); shortcut.swap(shortcut_rtl); } return shortcut; }
13,228
184,841
1
UserCloudPolicyManagerChromeOS::UserCloudPolicyManagerChromeOS( scoped_ptr<CloudPolicyStore> store, scoped_ptr<CloudExternalDataManager> external_data_manager, const base::FilePath& component_policy_cache_path, bool wait_for_policy_fetch, base::TimeDelta initial_policy_fetch_timeout, const scoped_refptr<base::SequencedTaskRunner>& task_runner, const scoped_refptr<base::SequencedTaskRunner>& file_task_runner, const scoped_refptr<base::SequencedTaskRunner>& io_task_runner) : CloudPolicyManager( PolicyNamespaceKey(dm_protocol::kChromeUserPolicyType, std::string()), store.get(), task_runner, file_task_runner, io_task_runner), store_(store.Pass()), external_data_manager_(external_data_manager.Pass()), component_policy_cache_path_(component_policy_cache_path), wait_for_policy_fetch_(wait_for_policy_fetch), policy_fetch_timeout_(false, false) { time_init_started_ = base::Time::Now(); if (wait_for_policy_fetch_) { policy_fetch_timeout_.Start( FROM_HERE, initial_policy_fetch_timeout, base::Bind(&UserCloudPolicyManagerChromeOS::OnBlockingFetchTimeout, base::Unretained(this))); } }
13,229
184,485
1
void WebPagePrivate::didComposite() { if (!m_page->settings()->developerExtrasEnabled()) return; InspectorInstrumentation::didComposite(m_page); }
13,230
124,900
0
LayoutUnit RenderBox::minPreferredLogicalWidth() const { if (preferredLogicalWidthsDirty()) { #ifndef NDEBUG SetLayoutNeededForbiddenScope layoutForbiddenScope(const_cast<RenderBox&>(*this)); #endif const_cast<RenderBox*>(this)->computePreferredLogicalWidths(); } return m_minPreferredLogicalWidth; }
13,231
70,444
0
jas_image_t *jpc_decode(jas_stream_t *in, char *optstr) { jpc_dec_importopts_t opts; jpc_dec_t *dec; jas_image_t *image; dec = 0; if (jpc_dec_parseopts(optstr, &opts)) { goto error; } jpc_initluts(); if (!(dec = jpc_dec_create(&opts, in))) { goto error; } /* Do most of the work. */ if (jpc_dec_decode(dec)) { goto error; } if (jas_image_numcmpts(dec->image) >= 3) { jas_image_setclrspc(dec->image, JAS_CLRSPC_SRGB); jas_image_setcmpttype(dec->image, 0, JAS_IMAGE_CT_COLOR(JAS_CLRSPC_CHANIND_RGB_R)); jas_image_setcmpttype(dec->image, 1, JAS_IMAGE_CT_COLOR(JAS_CLRSPC_CHANIND_RGB_G)); jas_image_setcmpttype(dec->image, 2, JAS_IMAGE_CT_COLOR(JAS_CLRSPC_CHANIND_RGB_B)); } else { jas_image_setclrspc(dec->image, JAS_CLRSPC_SGRAY); jas_image_setcmpttype(dec->image, 0, JAS_IMAGE_CT_COLOR(JAS_CLRSPC_CHANIND_GRAY_Y)); } /* Save the return value. */ image = dec->image; /* Stop the image from being discarded. */ dec->image = 0; /* Destroy decoder. */ jpc_dec_destroy(dec); return image; error: if (dec) { jpc_dec_destroy(dec); } return 0; }
13,232
167,200
0
base::string16 GlobalConfirmInfoBar::DelegateProxy::GetButtonLabel( InfoBarButton button) const { return global_info_bar_ ? global_info_bar_->delegate_->GetButtonLabel(button) : base::string16(); }
13,233
62,623
0
write_bits(netdissect_options *ndo, unsigned int val, const char *fmt) { const char *p = fmt; int i = 0; while ((p = strchr(fmt, '|'))) { size_t l = PTR_DIFF(p, fmt); if (l && (val & (1 << i))) ND_PRINT((ndo, "%.*s ", (int)l, fmt)); fmt = p + 1; i++; } }
13,234
182,481
1
static int __rds_rdma_map(struct rds_sock *rs, struct rds_get_mr_args *args, u64 *cookie_ret, struct rds_mr **mr_ret) { struct rds_mr *mr = NULL, *found; unsigned int nr_pages; struct page **pages = NULL; struct scatterlist *sg; void *trans_private; unsigned long flags; rds_rdma_cookie_t cookie; unsigned int nents; long i; int ret; if (rs->rs_bound_addr == 0) { ret = -ENOTCONN; /* XXX not a great errno */ goto out; } if (!rs->rs_transport->get_mr) { ret = -EOPNOTSUPP; goto out; } nr_pages = rds_pages_in_vec(&args->vec); if (nr_pages == 0) { ret = -EINVAL; goto out; } /* Restrict the size of mr irrespective of underlying transport * To account for unaligned mr regions, subtract one from nr_pages */ if ((nr_pages - 1) > (RDS_MAX_MSG_SIZE >> PAGE_SHIFT)) { ret = -EMSGSIZE; goto out; } rdsdebug("RDS: get_mr addr %llx len %llu nr_pages %u\n", args->vec.addr, args->vec.bytes, nr_pages); /* XXX clamp nr_pages to limit the size of this alloc? */ pages = kcalloc(nr_pages, sizeof(struct page *), GFP_KERNEL); if (!pages) { ret = -ENOMEM; goto out; } mr = kzalloc(sizeof(struct rds_mr), GFP_KERNEL); if (!mr) { ret = -ENOMEM; goto out; } refcount_set(&mr->r_refcount, 1); RB_CLEAR_NODE(&mr->r_rb_node); mr->r_trans = rs->rs_transport; mr->r_sock = rs; if (args->flags & RDS_RDMA_USE_ONCE) mr->r_use_once = 1; if (args->flags & RDS_RDMA_INVALIDATE) mr->r_invalidate = 1; if (args->flags & RDS_RDMA_READWRITE) mr->r_write = 1; /* * Pin the pages that make up the user buffer and transfer the page * pointers to the mr's sg array. We check to see if we've mapped * the whole region after transferring the partial page references * to the sg array so that we can have one page ref cleanup path. * * For now we have no flag that tells us whether the mapping is * r/o or r/w. We need to assume r/w, or we'll do a lot of RDMA to * the zero page. */ ret = rds_pin_pages(args->vec.addr, nr_pages, pages, 1); if (ret < 0) goto out; nents = ret; sg = kcalloc(nents, sizeof(*sg), GFP_KERNEL); if (!sg) { ret = -ENOMEM; goto out; } WARN_ON(!nents); sg_init_table(sg, nents); /* Stick all pages into the scatterlist */ for (i = 0 ; i < nents; i++) sg_set_page(&sg[i], pages[i], PAGE_SIZE, 0); rdsdebug("RDS: trans_private nents is %u\n", nents); /* Obtain a transport specific MR. If this succeeds, the * s/g list is now owned by the MR. * Note that dma_map() implies that pending writes are * flushed to RAM, so no dma_sync is needed here. */ trans_private = rs->rs_transport->get_mr(sg, nents, rs, &mr->r_key); if (IS_ERR(trans_private)) { for (i = 0 ; i < nents; i++) put_page(sg_page(&sg[i])); kfree(sg); ret = PTR_ERR(trans_private); goto out; } mr->r_trans_private = trans_private; rdsdebug("RDS: get_mr put_user key is %x cookie_addr %p\n", mr->r_key, (void *)(unsigned long) args->cookie_addr); /* The user may pass us an unaligned address, but we can only * map page aligned regions. So we keep the offset, and build * a 64bit cookie containing <R_Key, offset> and pass that * around. */ cookie = rds_rdma_make_cookie(mr->r_key, args->vec.addr & ~PAGE_MASK); if (cookie_ret) *cookie_ret = cookie; if (args->cookie_addr && put_user(cookie, (u64 __user *)(unsigned long) args->cookie_addr)) { ret = -EFAULT; goto out; } /* Inserting the new MR into the rbtree bumps its * reference count. */ spin_lock_irqsave(&rs->rs_rdma_lock, flags); found = rds_mr_tree_walk(&rs->rs_rdma_keys, mr->r_key, mr); spin_unlock_irqrestore(&rs->rs_rdma_lock, flags); BUG_ON(found && found != mr); rdsdebug("RDS: get_mr key is %x\n", mr->r_key); if (mr_ret) { refcount_inc(&mr->r_refcount); *mr_ret = mr; } ret = 0; out: kfree(pages); if (mr) rds_mr_put(mr); return ret; }
13,235
187,420
1
status_t MediaPlayerService::Client::setNextPlayer(const sp<IMediaPlayer>& player) { ALOGV("setNextPlayer"); Mutex::Autolock l(mLock); sp<Client> c = static_cast<Client*>(player.get()); mNextClient = c; if (c != NULL) { if (mAudioOutput != NULL) { mAudioOutput->setNextOutput(c->mAudioOutput); } else if ((mPlayer != NULL) && !mPlayer->hardwareOutput()) { ALOGE("no current audio output"); } if ((mPlayer != NULL) && (mNextClient->getPlayer() != NULL)) { mPlayer->setNextPlayer(mNextClient->getPlayer()); } } return OK; }
13,236
139,315
0
static inline bool isValidCachedResult(const Font* font, hb_direction_t dir, const String& localeString, const CachedShapingResults* cachedResults) { ASSERT(cachedResults); return cachedResults->dir == dir && cachedResults->font == *font && !cachedResults->font.loadingCustomFonts() && !font->loadingCustomFonts() && cachedResults->locale == localeString; }
13,237
168,404
0
TestBrowserWindow::TestLocationBar::GetPageTransition() const { return ui::PAGE_TRANSITION_LINK; }
13,238
188,359
1
const char* Track::GetLanguage() const { return m_info.language; }
13,239
110,999
0
void FileSystemOperation::CreateDirectory(const GURL& path_url, bool exclusive, bool recursive, const StatusCallback& callback) { DCHECK(SetPendingOperationType(kOperationCreateDirectory)); base::PlatformFileError result = SetUpFileSystemPath( path_url, &src_path_, &src_util_, PATH_FOR_CREATE); if (result != base::PLATFORM_FILE_OK) { callback.Run(result); delete this; return; } GetUsageAndQuotaThenRunTask( src_path_.origin(), src_path_.type(), base::Bind(&FileSystemOperation::DoCreateDirectory, base::Unretained(this), callback, exclusive, recursive), base::Bind(callback, base::PLATFORM_FILE_ERROR_FAILED)); }
13,240
89,506
0
SWFShape_drawScaledLine(SWFShape shape, int dx, int dy) { ShapeRecord record; if ( shape->isEnded ) return; if ( dx == 0 && dy == 0 ) return; record = newShapeRecord(shape, SHAPERECORD_LINETO); SWF_assert(SWFOutput_numSBits(dx) < 18); SWF_assert(SWFOutput_numSBits(dy) < 18); record.record.lineTo->dx = dx; record.record.lineTo->dy = dy; shape->xpos += dx; shape->ypos += dy; SWFRect_includePoint(SWFCharacter_getBounds(CHARACTER(shape)), shape->xpos, shape->ypos, shape->lineWidth); SWFRect_includePoint(shape->edgeBounds, shape->xpos, shape->ypos, 0); }
13,241
48,693
0
static void H2_STREAM_OUT_LOG(int lvl, h2_stream *s, const char *tag) { if (APLOG_C_IS_LEVEL(s->session->c, lvl)) { conn_rec *c = s->session->c; char buffer[4 * 1024]; const char *line = "(null)"; apr_size_t len, bmax = sizeof(buffer)/sizeof(buffer[0]); len = h2_util_bb_print(buffer, bmax, tag, "", s->out_buffer); ap_log_cerror(APLOG_MARK, lvl, 0, c, "bb_dump(%s): %s", c->log_id, len? buffer : line); } }
13,242
29,469
0
int wvlan_get_station_nickname(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct wl_private *lp = wl_priv(dev); unsigned long flags; int ret = 0; int status = -1; wvName_t *pName; /*------------------------------------------------------------------------*/ DBG_FUNC("wvlan_get_station_nickname"); DBG_ENTER(DbgInfo); wl_lock(lp, &flags); /* Get the current station name */ lp->ltvRecord.len = 1 + (sizeof(*pName) / sizeof(hcf_16)); lp->ltvRecord.typ = CFG_CNF_OWN_NAME; status = hcf_get_info(&(lp->hcfCtx), (LTVP)&(lp->ltvRecord)); if (status == HCF_SUCCESS) { pName = (wvName_t *)&(lp->ltvRecord.u.u32); memset(extra, '\0', HCF_MAX_NAME_LEN); wrqu->data.length = pName->length; memcpy(extra, pName->name, pName->length); } else { ret = -EFAULT; } wl_unlock(lp, &flags); /* out: */ DBG_LEAVE(DbgInfo); return ret; } /* wvlan_get_station_nickname */
13,243
96,952
0
static struct page *follow_pud_mask(struct vm_area_struct *vma, unsigned long address, p4d_t *p4dp, unsigned int flags, struct follow_page_context *ctx) { pud_t *pud; spinlock_t *ptl; struct page *page; struct mm_struct *mm = vma->vm_mm; pud = pud_offset(p4dp, address); if (pud_none(*pud)) return no_page_table(vma, flags); if (pud_huge(*pud) && vma->vm_flags & VM_HUGETLB) { page = follow_huge_pud(mm, address, pud, flags); if (page) return page; return no_page_table(vma, flags); } if (is_hugepd(__hugepd(pud_val(*pud)))) { page = follow_huge_pd(vma, address, __hugepd(pud_val(*pud)), flags, PUD_SHIFT); if (page) return page; return no_page_table(vma, flags); } if (pud_devmap(*pud)) { ptl = pud_lock(mm, pud); page = follow_devmap_pud(vma, address, pud, flags, &ctx->pgmap); spin_unlock(ptl); if (page) return page; } if (unlikely(pud_bad(*pud))) return no_page_table(vma, flags); return follow_pmd_mask(vma, address, pud, flags, ctx); }
13,244
62,981
0
static inline bool is_nmi(u32 intr_info) { return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VALID_MASK)) == (INTR_TYPE_NMI_INTR | INTR_INFO_VALID_MASK); }
13,245
20,439
0
static inline int ext2_feature_set_ok(struct super_block *sb) { if (EXT4_HAS_INCOMPAT_FEATURE(sb, ~EXT2_FEATURE_INCOMPAT_SUPP)) return 0; if (sb->s_flags & MS_RDONLY) return 1; if (EXT4_HAS_RO_COMPAT_FEATURE(sb, ~EXT2_FEATURE_RO_COMPAT_SUPP)) return 0; return 1; }
13,246
30,840
0
static int hash_sendmsg(struct kiocb *unused, struct socket *sock, struct msghdr *msg, size_t ignored) { int limit = ALG_MAX_PAGES * PAGE_SIZE; struct sock *sk = sock->sk; struct alg_sock *ask = alg_sk(sk); struct hash_ctx *ctx = ask->private; unsigned long iovlen; struct iovec *iov; long copied = 0; int err; if (limit > sk->sk_sndbuf) limit = sk->sk_sndbuf; lock_sock(sk); if (!ctx->more) { err = crypto_ahash_init(&ctx->req); if (err) goto unlock; } ctx->more = 0; for (iov = msg->msg_iov, iovlen = msg->msg_iovlen; iovlen > 0; iovlen--, iov++) { unsigned long seglen = iov->iov_len; char __user *from = iov->iov_base; while (seglen) { int len = min_t(unsigned long, seglen, limit); int newlen; newlen = af_alg_make_sg(&ctx->sgl, from, len, 0); if (newlen < 0) { err = copied ? 0 : newlen; goto unlock; } ahash_request_set_crypt(&ctx->req, ctx->sgl.sg, NULL, newlen); err = af_alg_wait_for_completion( crypto_ahash_update(&ctx->req), &ctx->completion); af_alg_free_sg(&ctx->sgl); if (err) goto unlock; seglen -= newlen; from += newlen; copied += newlen; } } err = 0; ctx->more = msg->msg_flags & MSG_MORE; if (!ctx->more) { ahash_request_set_crypt(&ctx->req, NULL, ctx->result, 0); err = af_alg_wait_for_completion(crypto_ahash_final(&ctx->req), &ctx->completion); } unlock: release_sock(sk); return err ?: copied; }
13,247
49,415
0
static ssize_t proc_fault_inject_write(struct file * file, const char __user * buf, size_t count, loff_t *ppos) { struct task_struct *task; char buffer[PROC_NUMBUF]; int make_it_fail; int rv; if (!capable(CAP_SYS_RESOURCE)) return -EPERM; memset(buffer, 0, sizeof(buffer)); if (count > sizeof(buffer) - 1) count = sizeof(buffer) - 1; if (copy_from_user(buffer, buf, count)) return -EFAULT; rv = kstrtoint(strstrip(buffer), 0, &make_it_fail); if (rv < 0) return rv; if (make_it_fail < 0 || make_it_fail > 1) return -EINVAL; task = get_proc_task(file_inode(file)); if (!task) return -ESRCH; task->make_it_fail = make_it_fail; put_task_struct(task); return count; }
13,248
36,494
0
static int snd_ctl_tlv_ioctl(struct snd_ctl_file *file, struct snd_ctl_tlv __user *_tlv, int op_flag) { struct snd_card *card = file->card; struct snd_ctl_tlv tlv; struct snd_kcontrol *kctl; struct snd_kcontrol_volatile *vd; unsigned int len; int err = 0; if (copy_from_user(&tlv, _tlv, sizeof(tlv))) return -EFAULT; if (tlv.length < sizeof(unsigned int) * 2) return -EINVAL; down_read(&card->controls_rwsem); kctl = snd_ctl_find_numid(card, tlv.numid); if (kctl == NULL) { err = -ENOENT; goto __kctl_end; } if (kctl->tlv.p == NULL) { err = -ENXIO; goto __kctl_end; } vd = &kctl->vd[tlv.numid - kctl->id.numid]; if ((op_flag == 0 && (vd->access & SNDRV_CTL_ELEM_ACCESS_TLV_READ) == 0) || (op_flag > 0 && (vd->access & SNDRV_CTL_ELEM_ACCESS_TLV_WRITE) == 0) || (op_flag < 0 && (vd->access & SNDRV_CTL_ELEM_ACCESS_TLV_COMMAND) == 0)) { err = -ENXIO; goto __kctl_end; } if (vd->access & SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK) { if (vd->owner != NULL && vd->owner != file) { err = -EPERM; goto __kctl_end; } err = kctl->tlv.c(kctl, op_flag, tlv.length, _tlv->tlv); if (err > 0) { struct snd_ctl_elem_id id = kctl->id; up_read(&card->controls_rwsem); snd_ctl_notify(card, SNDRV_CTL_EVENT_MASK_TLV, &id); return 0; } } else { if (op_flag) { err = -ENXIO; goto __kctl_end; } len = kctl->tlv.p[1] + 2 * sizeof(unsigned int); if (tlv.length < len) { err = -ENOMEM; goto __kctl_end; } if (copy_to_user(_tlv->tlv, kctl->tlv.p, len)) err = -EFAULT; } __kctl_end: up_read(&card->controls_rwsem); return err; }
13,249
12,815
0
static int tls1_get_curvelist(SSL *s, int sess, const unsigned char **pcurves, size_t *num_curves) { size_t pcurveslen = 0; if (sess) { *pcurves = s->session->tlsext_ellipticcurvelist; pcurveslen = s->session->tlsext_ellipticcurvelist_length; } else { /* For Suite B mode only include P-256, P-384 */ switch (tls1_suiteb(s)) { case SSL_CERT_FLAG_SUITEB_128_LOS: *pcurves = suiteb_curves; pcurveslen = sizeof(suiteb_curves); break; case SSL_CERT_FLAG_SUITEB_128_LOS_ONLY: *pcurves = suiteb_curves; pcurveslen = 2; break; case SSL_CERT_FLAG_SUITEB_192_LOS: *pcurves = suiteb_curves + 2; pcurveslen = 2; break; default: *pcurves = s->tlsext_ellipticcurvelist; pcurveslen = s->tlsext_ellipticcurvelist_length; } if (!*pcurves) { # ifdef OPENSSL_FIPS if (FIPS_mode()) { *pcurves = fips_curves_default; pcurveslen = sizeof(fips_curves_default); } else # endif { if (!s->server || s->cert->ecdh_tmp_auto) { *pcurves = eccurves_auto; pcurveslen = sizeof(eccurves_auto); } else { *pcurves = eccurves_all; pcurveslen = sizeof(eccurves_all); } } } } /* We do not allow odd length arrays to enter the system. */ if (pcurveslen & 1) { SSLerr(SSL_F_TLS1_GET_CURVELIST, ERR_R_INTERNAL_ERROR); *num_curves = 0; return 0; } else { *num_curves = pcurveslen / 2; return 1; } }
13,250
57,251
0
static struct nfs4_state *nfs4_try_open_cached(struct nfs4_opendata *opendata) { struct nfs4_state *state = opendata->state; struct nfs_inode *nfsi = NFS_I(state->inode); struct nfs_delegation *delegation; int open_mode = opendata->o_arg.open_flags; fmode_t fmode = opendata->o_arg.fmode; nfs4_stateid stateid; int ret = -EAGAIN; for (;;) { spin_lock(&state->owner->so_lock); if (can_open_cached(state, fmode, open_mode)) { update_open_stateflags(state, fmode); spin_unlock(&state->owner->so_lock); goto out_return_state; } spin_unlock(&state->owner->so_lock); rcu_read_lock(); delegation = rcu_dereference(nfsi->delegation); if (!can_open_delegated(delegation, fmode)) { rcu_read_unlock(); break; } /* Save the delegation */ nfs4_stateid_copy(&stateid, &delegation->stateid); rcu_read_unlock(); nfs_release_seqid(opendata->o_arg.seqid); if (!opendata->is_recover) { ret = nfs_may_open(state->inode, state->owner->so_cred, open_mode); if (ret != 0) goto out; } ret = -EAGAIN; /* Try to update the stateid using the delegation */ if (update_open_stateid(state, NULL, &stateid, fmode)) goto out_return_state; } out: return ERR_PTR(ret); out_return_state: atomic_inc(&state->count); return state; }
13,251
10,876
0
des_init_local(struct php_crypt_extended_data *data) { data->old_rawkey0 = data->old_rawkey1 = 0; data->saltbits = 0; data->old_salt = 0; data->initialized = 1; }
13,252
76,405
0
static void init_func_state(struct bpf_verifier_env *env, struct bpf_func_state *state, int callsite, int frameno, int subprogno) { state->callsite = callsite; state->frameno = frameno; state->subprogno = subprogno; init_reg_state(env, state); }
13,253
60,616
0
static int check_and_subscribe_port(struct snd_seq_client *client, struct snd_seq_client_port *port, struct snd_seq_subscribers *subs, bool is_src, bool exclusive, bool ack) { struct snd_seq_port_subs_info *grp; struct list_head *p; struct snd_seq_subscribers *s; int err; grp = is_src ? &port->c_src : &port->c_dest; err = -EBUSY; down_write(&grp->list_mutex); if (exclusive) { if (!list_empty(&grp->list_head)) goto __error; } else { if (grp->exclusive) goto __error; /* check whether already exists */ list_for_each(p, &grp->list_head) { s = get_subscriber(p, is_src); if (match_subs_info(&subs->info, &s->info)) goto __error; } } err = subscribe_port(client, port, grp, &subs->info, ack); if (err < 0) { grp->exclusive = 0; goto __error; } /* add to list */ write_lock_irq(&grp->list_lock); if (is_src) list_add_tail(&subs->src_list, &grp->list_head); else list_add_tail(&subs->dest_list, &grp->list_head); grp->exclusive = exclusive; atomic_inc(&subs->ref_count); write_unlock_irq(&grp->list_lock); err = 0; __error: up_write(&grp->list_mutex); return err; }
13,254
123,948
0
void RenderViewImpl::UpdateScrollState(WebFrame* frame) { WebSize offset = frame->scrollOffset(); WebSize minimum_offset = frame->minimumScrollOffset(); WebSize maximum_offset = frame->maximumScrollOffset(); bool is_pinned_to_left = offset.width <= minimum_offset.width; bool is_pinned_to_right = offset.width >= maximum_offset.width; if (is_pinned_to_left != cached_is_main_frame_pinned_to_left_ || is_pinned_to_right != cached_is_main_frame_pinned_to_right_) { Send(new ViewHostMsg_DidChangeScrollOffsetPinningForMainFrame( routing_id_, is_pinned_to_left, is_pinned_to_right)); cached_is_main_frame_pinned_to_left_ = is_pinned_to_left; cached_is_main_frame_pinned_to_right_ = is_pinned_to_right; } Send(new ViewHostMsg_DidChangeScrollOffset(routing_id_)); }
13,255
73,470
0
MagickExport MagickBooleanType GetOneVirtualMagickPixel(const Image *image, const ssize_t x,const ssize_t y,MagickPixelPacket *pixel, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); register const IndexPacket *magick_restrict indexes; register const PixelPacket *magick_restrict pixels; assert(image != (const Image *) NULL); assert(image->signature == MagickSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickSignature); assert(id < (int) cache_info->number_threads); pixels=GetVirtualPixelsFromNexus(image,GetPixelCacheVirtualMethod(image),x,y, 1UL,1UL,cache_info->nexus_info[id],exception); GetMagickPixelPacket(image,pixel); if (pixels == (const PixelPacket *) NULL) return(MagickFalse); indexes=GetVirtualIndexesFromNexus(cache_info,cache_info->nexus_info[id]); SetMagickPixelPacket(image,pixels,indexes,pixel); return(MagickTrue); }
13,256
127,287
0
virtual void SetUp() { }
13,257
124,424
0
void WebRuntimeFeatures::enableExperimentalCanvasFeatures(bool enable) { RuntimeEnabledFeatures::setExperimentalCanvasFeaturesEnabled(enable); }
13,258
143,049
0
ScriptProcessorNode* BaseAudioContext::createScriptProcessor( uint32_t buffer_size, ExceptionState& exception_state) { DCHECK(IsMainThread()); return ScriptProcessorNode::Create(*this, buffer_size, exception_state); }
13,259
28,441
0
static int aac_biosparm(struct scsi_device *sdev, struct block_device *bdev, sector_t capacity, int *geom) { struct diskparm *param = (struct diskparm *)geom; unsigned char *buf; dprintk((KERN_DEBUG "aac_biosparm.\n")); /* * Assuming extended translation is enabled - #REVISIT# */ if (capacity >= 2 * 1024 * 1024) { /* 1 GB in 512 byte sectors */ if(capacity >= 4 * 1024 * 1024) { /* 2 GB in 512 byte sectors */ param->heads = 255; param->sectors = 63; } else { param->heads = 128; param->sectors = 32; } } else { param->heads = 64; param->sectors = 32; } param->cylinders = cap_to_cyls(capacity, param->heads * param->sectors); /* * Read the first 1024 bytes from the disk device, if the boot * sector partition table is valid, search for a partition table * entry whose end_head matches one of the standard geometry * translations ( 64/32, 128/32, 255/63 ). */ buf = scsi_bios_ptable(bdev); if (!buf) return 0; if(*(__le16 *)(buf + 0x40) == cpu_to_le16(0xaa55)) { struct partition *first = (struct partition * )buf; struct partition *entry = first; int saved_cylinders = param->cylinders; int num; unsigned char end_head, end_sec; for(num = 0; num < 4; num++) { end_head = entry->end_head; end_sec = entry->end_sector & 0x3f; if(end_head == 63) { param->heads = 64; param->sectors = 32; break; } else if(end_head == 127) { param->heads = 128; param->sectors = 32; break; } else if(end_head == 254) { param->heads = 255; param->sectors = 63; break; } entry++; } if (num == 4) { end_head = first->end_head; end_sec = first->end_sector & 0x3f; } param->cylinders = cap_to_cyls(capacity, param->heads * param->sectors); if (num < 4 && end_sec == param->sectors) { if (param->cylinders != saved_cylinders) dprintk((KERN_DEBUG "Adopting geometry: heads=%d, sectors=%d from partition table %d.\n", param->heads, param->sectors, num)); } else if (end_head > 0 || end_sec > 0) { dprintk((KERN_DEBUG "Strange geometry: heads=%d, sectors=%d in partition table %d.\n", end_head + 1, end_sec, num)); dprintk((KERN_DEBUG "Using geometry: heads=%d, sectors=%d.\n", param->heads, param->sectors)); } } kfree(buf); return 0; }
13,260
8,155
0
void Gfx::opSetRenderingIntent(Object args[], int numArgs) { }
13,261
78,392
0
epass2003_hook_path(struct sc_path *path, int inc) { u8 fid_h = path->value[path->len - 2]; u8 fid_l = path->value[path->len - 1]; switch (fid_h) { case 0x29: case 0x30: case 0x31: case 0x32: case 0x33: case 0x34: if (inc) fid_l = fid_l * FID_STEP; else fid_l = fid_l / FID_STEP; path->value[path->len - 1] = fid_l; return 1; default: break; } return 0; }
13,262
45,413
0
int ecryptfs_destroy_crypto(void) { struct ecryptfs_key_tfm *key_tfm, *key_tfm_tmp; mutex_lock(&key_tfm_list_mutex); list_for_each_entry_safe(key_tfm, key_tfm_tmp, &key_tfm_list, key_tfm_list) { list_del(&key_tfm->key_tfm_list); if (key_tfm->key_tfm) crypto_free_blkcipher(key_tfm->key_tfm); kmem_cache_free(ecryptfs_key_tfm_cache, key_tfm); } mutex_unlock(&key_tfm_list_mutex); return 0; }
13,263
1,203
0
void JBIG2Stream::readEndOfStripeSeg(Guint length) { Guint i; for (i = 0; i < length; ++i) { curStr->getChar(); } }
13,264
144,275
0
void UserSelectionScreen::SetUsersLoaded(bool loaded) { users_loaded_ = loaded; }
13,265
84,808
0
static int set_user_dscr(struct task_struct *task, unsigned long dscr) { return -EIO; }
13,266
66,616
0
static void remove_vqs(struct ports_device *portdev) { portdev->vdev->config->del_vqs(portdev->vdev); kfree(portdev->in_vqs); kfree(portdev->out_vqs); }
13,267
150,124
0
LayerTreeHostTestSetNeedsCommit1() : num_commits_(0), num_draws_(0) {}
13,268
123,257
0
void UnlockCompositor() { defer_compositor_lock_ = false; compositor_lock_ = NULL; }
13,269
59,643
0
uriPathTest(const char *filename ATTRIBUTE_UNUSED, const char *result ATTRIBUTE_UNUSED, const char *err ATTRIBUTE_UNUSED, int options ATTRIBUTE_UNUSED) { int parsed; int failures = 0; /* * register the new I/O handlers */ if (xmlRegisterInputCallbacks(uripMatch, uripOpen, uripRead, uripClose) < 0) { fprintf(stderr, "failed to register HTTP handler\n"); return(-1); } for (urip_current = 0;urip_testURLs[urip_current] != NULL;urip_current++) { urip_success = 1; parsed = urip_checkURL(urip_testURLs[urip_current]); if (urip_success != 1) { fprintf(stderr, "failed the URL passing test for %s", urip_testURLs[urip_current]); failures++; } else if (parsed != 1) { fprintf(stderr, "failed the parsing test for %s", urip_testURLs[urip_current]); failures++; } nb_tests++; } xmlPopInputCallbacks(); return(failures); }
13,270
99,788
0
void TestRunner::Init(JobLevel job_level, TokenLevel startup_token, TokenLevel main_token) { broker_ = NULL; policy_ = NULL; timeout_ = kDefaultTimeout; state_ = AFTER_REVERT; broker_ = GetBroker(); if (!broker_) return; policy_ = broker_->CreatePolicy(); if (!policy_) return; policy_->SetJobLevel(job_level, 0); policy_->SetTokenLevel(startup_token, main_token); is_init_ = true; }
13,271
143,460
0
HTMLPreloadScanner::~HTMLPreloadScanner() { }
13,272
67,679
0
void *napi_alloc_frag(unsigned int fragsz) { return __napi_alloc_frag(fragsz, GFP_ATOMIC | __GFP_COLD); }
13,273
58,629
0
BOOL rdp_read_header(rdpRdp* rdp, STREAM* s, UINT16* length, UINT16* channel_id) { UINT16 initiator; enum DomainMCSPDU MCSPDU; MCSPDU = (rdp->settings->ServerMode) ? DomainMCSPDU_SendDataRequest : DomainMCSPDU_SendDataIndication; if (!mcs_read_domain_mcspdu_header(s, &MCSPDU, length)) { if (MCSPDU != DomainMCSPDU_DisconnectProviderUltimatum) return FALSE; } if (*length - 8 > stream_get_left(s)) return FALSE; if (MCSPDU == DomainMCSPDU_DisconnectProviderUltimatum) { BYTE reason; (void) per_read_enumerated(s, &reason, 0); DEBUG_RDP("DisconnectProviderUltimatum from server, reason code 0x%02x\n", reason); rdp->disconnect = TRUE; return TRUE; } if(stream_get_left(s) < 5) return FALSE; per_read_integer16(s, &initiator, MCS_BASE_CHANNEL_ID); /* initiator (UserId) */ per_read_integer16(s, channel_id, 0); /* channelId */ stream_seek(s, 1); /* dataPriority + Segmentation (0x70) */ if(!per_read_length(s, length)) /* userData (OCTET_STRING) */ return FALSE; if (*length > stream_get_left(s)) return FALSE; return TRUE; }
13,274
166,244
0
AudioServiceListener* MediaStreamManager::audio_service_listener() { DCHECK_CURRENTLY_ON(BrowserThread::UI); return audio_service_listener_.get(); }
13,275
167,197
0
void GlobalConfirmInfoBar::Close() { delete this; }
13,276
88,832
0
static void floppy_rb0_cb(struct bio *bio) { struct rb0_cbdata *cbdata = (struct rb0_cbdata *)bio->bi_private; int drive = cbdata->drive; if (bio->bi_status) { pr_info("floppy: error %d while reading block 0\n", bio->bi_status); set_bit(FD_OPEN_SHOULD_FAIL_BIT, &UDRS->flags); } complete(&cbdata->complete); }
13,277
23,581
0
static void dispatch_queued_ios(struct multipath *m) { int r; unsigned long flags; struct dm_mpath_io *mpio; union map_info *info; struct request *clone, *n; LIST_HEAD(cl); spin_lock_irqsave(&m->lock, flags); list_splice_init(&m->queued_ios, &cl); spin_unlock_irqrestore(&m->lock, flags); list_for_each_entry_safe(clone, n, &cl, queuelist) { list_del_init(&clone->queuelist); info = dm_get_rq_mapinfo(clone); mpio = info->ptr; r = map_io(m, clone, mpio, 1); if (r < 0) { mempool_free(mpio, m->mpio_pool); dm_kill_unmapped_request(clone, r); } else if (r == DM_MAPIO_REMAPPED) dm_dispatch_request(clone); else if (r == DM_MAPIO_REQUEUE) { mempool_free(mpio, m->mpio_pool); dm_requeue_unmapped_request(clone); } } }
13,278
77,519
0
ofputil_decode_ofp11_port(struct ofputil_phy_port *pp, const struct ofp11_port *op) { enum ofperr error; error = ofputil_port_from_ofp11(op->port_no, &pp->port_no); if (error) { return error; } pp->hw_addr = op->hw_addr; ovs_strlcpy(pp->name, op->name, OFP_MAX_PORT_NAME_LEN); pp->config = ntohl(op->config) & OFPPC11_ALL; pp->state = ntohl(op->state) & OFPPS11_ALL; pp->curr = netdev_port_features_from_ofp11(op->curr); pp->advertised = netdev_port_features_from_ofp11(op->advertised); pp->supported = netdev_port_features_from_ofp11(op->supported); pp->peer = netdev_port_features_from_ofp11(op->peer); pp->curr_speed = ntohl(op->curr_speed); pp->max_speed = ntohl(op->max_speed); return 0; }
13,279
100,800
0
void HttpResponseHeaders::GetNormalizedHeaders(std::string* output) const { output->assign(raw_headers_.c_str()); typedef base::hash_map<std::string, size_t> HeadersMap; HeadersMap headers_map; HeadersMap::iterator iter = headers_map.end(); std::vector<std::string> headers; for (size_t i = 0; i < parsed_.size(); ++i) { DCHECK(!parsed_[i].is_continuation()); std::string name(parsed_[i].name_begin, parsed_[i].name_end); std::string lower_name = StringToLowerASCII(name); iter = headers_map.find(lower_name); if (iter == headers_map.end()) { iter = headers_map.insert( HeadersMap::value_type(lower_name, headers.size())).first; headers.push_back(name + ": "); } else { headers[iter->second].append(", "); } std::string::const_iterator value_begin = parsed_[i].value_begin; std::string::const_iterator value_end = parsed_[i].value_end; while (++i < parsed_.size() && parsed_[i].is_continuation()) value_end = parsed_[i].value_end; --i; headers[iter->second].append(value_begin, value_end); } for (size_t i = 0; i < headers.size(); ++i) { output->push_back('\n'); output->append(headers[i]); } output->push_back('\n'); }
13,280
76,672
0
static void cc_init(void) { int i; if (is_cc_init) return; for (i = 0; i < CS_MAX; i++) cc_tab[i].valid = false; set_cc(CS_HSTEM, true, 2, true); set_cc(CS_VSTEM, true, 2, true); set_cc(CS_VMOVETO, true, 1, true); set_cc(CS_RLINETO, true, 2, true); set_cc(CS_HLINETO, true, 1, true); set_cc(CS_VLINETO, true, 1, true); set_cc(CS_RRCURVETO, true, 6, true); set_cc(CS_CLOSEPATH, false, 0, true); set_cc(CS_CALLSUBR, false, 1, false); set_cc(CS_RETURN, false, 0, false); set_cc(CS_HSBW, true, 2, true); set_cc(CS_ENDCHAR, false, 0, true); set_cc(CS_RMOVETO, true, 2, true); set_cc(CS_HMOVETO, true, 1, true); set_cc(CS_VHCURVETO, true, 4, true); set_cc(CS_HVCURVETO, true, 4, true); set_cc(CS_DOTSECTION, false, 0, true); set_cc(CS_VSTEM3, true, 6, true); set_cc(CS_HSTEM3, true, 6, true); set_cc(CS_SEAC, true, 5, true); set_cc(CS_SBW, true, 4, true); set_cc(CS_DIV, false, 2, false); set_cc(CS_CALLOTHERSUBR, false, 0, false); set_cc(CS_POP, false, 0, false); set_cc(CS_SETCURRENTPOINT, true, 2, true); is_cc_init = true; }
13,281
126,482
0
bool TabStripGtk::CanPaintOnlyFavicons(const GdkRectangle* rects, int num_rects, std::vector<int>* tabs_to_paint) { int t = 0; for (int r = 0; r < num_rects; ++r) { while (t < GetTabCount()) { TabGtk* tab = GetTabAt(t); if (GdkRectMatchesTabFaviconBounds(rects[r], tab) && tab->ShouldShowIcon()) { tabs_to_paint->push_back(t); ++t; break; } ++t; } } return static_cast<int>(tabs_to_paint->size()) == num_rects; }
13,282
89,883
0
upnp_event_notify_connect(struct upnp_event_notify * obj) { unsigned int i; const char * p; unsigned short port; #ifdef ENABLE_IPV6 struct sockaddr_storage addr; socklen_t addrlen; #else struct sockaddr_in addr; socklen_t addrlen; #endif if(!obj) return; memset(&addr, 0, sizeof(addr)); i = 0; if(obj->sub == NULL) { obj->state = EError; return; } p = obj->sub->callback; p += 7; /* http:// */ #ifdef ENABLE_IPV6 if(*p == '[') { /* ip v6 */ obj->addrstr[i++] = '['; p++; obj->ipv6 = 1; while(*p != ']' && i < (sizeof(obj->addrstr)-1)) obj->addrstr[i++] = *(p++); if(*p == ']') p++; if(i < (sizeof(obj->addrstr)-1)) obj->addrstr[i++] = ']'; } else { #endif while(*p != '/' && *p != ':' && i < (sizeof(obj->addrstr)-1)) obj->addrstr[i++] = *(p++); #ifdef ENABLE_IPV6 } #endif obj->addrstr[i] = '\0'; if(*p == ':') { obj->portstr[0] = *p; i = 1; p++; port = (unsigned short)atoi(p); while(*p != '/') { if(i<7) obj->portstr[i++] = *p; p++; } obj->portstr[i] = 0; } else { port = 80; obj->portstr[0] = '\0'; } obj->path = p; #ifdef ENABLE_IPV6 if(obj->ipv6) { char addrstr_tmp[48]; struct sockaddr_in6 * sa = (struct sockaddr_in6 *)&addr; sa->sin6_family = AF_INET6; i = (int)strlen(obj->addrstr); if(i > 2) { i -= 2; memcpy(addrstr_tmp, obj->addrstr + 1, i); addrstr_tmp[i] = '\0'; inet_pton(AF_INET6, addrstr_tmp, &(sa->sin6_addr)); } sa->sin6_port = htons(port); addrlen = sizeof(struct sockaddr_in6); } else { struct sockaddr_in * sa = (struct sockaddr_in *)&addr; sa->sin_family = AF_INET; inet_pton(AF_INET, obj->addrstr, &(sa->sin_addr)); sa->sin_port = htons(port); addrlen = sizeof(struct sockaddr_in); } #else addr.sin_family = AF_INET; inet_aton(obj->addrstr, &addr.sin_addr); addr.sin_port = htons(port); addrlen = sizeof(struct sockaddr_in); #endif syslog(LOG_DEBUG, "%s: '%s' %hu '%s'", "upnp_event_notify_connect", obj->addrstr, port, obj->path); obj->state = EConnecting; if(connect(obj->s, (struct sockaddr *)&addr, addrlen) < 0) { if(errno != EINPROGRESS && errno != EWOULDBLOCK) { syslog(LOG_ERR, "%s: connect(%d, %s, %u): %m", "upnp_event_notify_connect", obj->s, obj->addrstr, addrlen); obj->state = EError; } } }
13,283
41,246
0
static int mode_sense_page(SCSIDiskState *s, int page, uint8_t **p_outbuf, int page_control) { BlockDriverState *bdrv = s->bs; int cylinders, heads, secs; uint8_t *p = *p_outbuf; /* * If Changeable Values are requested, a mask denoting those mode parameters * that are changeable shall be returned. As we currently don't support * parameter changes via MODE_SELECT all bits are returned set to zero. * The buffer was already menset to zero by the caller of this function. */ switch (page) { case 4: /* Rigid disk device geometry page. */ if (s->qdev.type == TYPE_ROM) { return -1; } p[0] = 4; p[1] = 0x16; if (page_control == 1) { /* Changeable Values */ break; } /* if a geometry hint is available, use it */ bdrv_get_geometry_hint(bdrv, &cylinders, &heads, &secs); p[2] = (cylinders >> 16) & 0xff; p[3] = (cylinders >> 8) & 0xff; p[4] = cylinders & 0xff; p[5] = heads & 0xff; /* Write precomp start cylinder, disabled */ p[6] = (cylinders >> 16) & 0xff; p[7] = (cylinders >> 8) & 0xff; p[8] = cylinders & 0xff; /* Reduced current start cylinder, disabled */ p[9] = (cylinders >> 16) & 0xff; p[10] = (cylinders >> 8) & 0xff; p[11] = cylinders & 0xff; /* Device step rate [ns], 200ns */ p[12] = 0; p[13] = 200; /* Landing zone cylinder */ p[14] = 0xff; p[15] = 0xff; p[16] = 0xff; /* Medium rotation rate [rpm], 5400 rpm */ p[20] = (5400 >> 8) & 0xff; p[21] = 5400 & 0xff; break; case 5: /* Flexible disk device geometry page. */ if (s->qdev.type == TYPE_ROM) { return -1; } p[0] = 5; p[1] = 0x1e; if (page_control == 1) { /* Changeable Values */ break; } /* Transfer rate [kbit/s], 5Mbit/s */ p[2] = 5000 >> 8; p[3] = 5000 & 0xff; /* if a geometry hint is available, use it */ bdrv_get_geometry_hint(bdrv, &cylinders, &heads, &secs); p[4] = heads & 0xff; p[5] = secs & 0xff; p[6] = s->cluster_size * 2; p[8] = (cylinders >> 8) & 0xff; p[9] = cylinders & 0xff; /* Write precomp start cylinder, disabled */ p[10] = (cylinders >> 8) & 0xff; p[11] = cylinders & 0xff; /* Reduced current start cylinder, disabled */ p[12] = (cylinders >> 8) & 0xff; p[13] = cylinders & 0xff; /* Device step rate [100us], 100us */ p[14] = 0; p[15] = 1; /* Device step pulse width [us], 1us */ p[16] = 1; /* Device head settle delay [100us], 100us */ p[17] = 0; p[18] = 1; /* Motor on delay [0.1s], 0.1s */ p[19] = 1; /* Motor off delay [0.1s], 0.1s */ p[20] = 1; /* Medium rotation rate [rpm], 5400 rpm */ p[28] = (5400 >> 8) & 0xff; p[29] = 5400 & 0xff; break; case 8: /* Caching page. */ p[0] = 8; p[1] = 0x12; if (page_control == 1) { /* Changeable Values */ break; } if (bdrv_enable_write_cache(s->bs)) { p[2] = 4; /* WCE */ } break; case 0x2a: /* CD Capabilities and Mechanical Status page. */ if (s->qdev.type != TYPE_ROM) { return -1; } p[0] = 0x2a; p[1] = 0x14; if (page_control == 1) { /* Changeable Values */ break; } p[2] = 3; // CD-R & CD-RW read p[3] = 0; // Writing not supported p[4] = 0x7f; /* Audio, composite, digital out, mode 2 form 1&2, multi session */ p[5] = 0xff; /* CD DA, DA accurate, RW supported, RW corrected, C2 errors, ISRC, UPC, Bar code */ p[6] = 0x2d | (s->tray_locked ? 2 : 0); /* Locking supported, jumper present, eject, tray */ p[7] = 0; /* no volume & mute control, no changer */ p[8] = (50 * 176) >> 8; // 50x read speed p[9] = (50 * 176) & 0xff; p[10] = 0 >> 8; // No volume p[11] = 0 & 0xff; p[12] = 2048 >> 8; // 2M buffer p[13] = 2048 & 0xff; p[14] = (16 * 176) >> 8; // 16x read speed current p[15] = (16 * 176) & 0xff; p[18] = (16 * 176) >> 8; // 16x write speed p[19] = (16 * 176) & 0xff; p[20] = (16 * 176) >> 8; // 16x write speed current p[21] = (16 * 176) & 0xff; break; default: return -1; } *p_outbuf += p[1] + 2; return p[1] + 2; }
13,284
172,702
0
void MediaRecorder::notify(int msg, int ext1, int ext2) { ALOGV("message received msg=%d, ext1=%d, ext2=%d", msg, ext1, ext2); sp<MediaRecorderListener> listener; mLock.lock(); listener = mListener; mLock.unlock(); if (listener != NULL) { Mutex::Autolock _l(mNotifyLock); ALOGV("callback application"); listener->notify(msg, ext1, ext2); ALOGV("back from callback"); } }
13,285
125,811
0
bool ParamTraits<base::TimeDelta>::Read(const Message* m, PickleIterator* iter, param_type* r) { int64 value; bool ret = ParamTraits<int64>::Read(m, iter, &value); if (ret) *r = base::TimeDelta::FromInternalValue(value); return ret; }
13,286
50,960
0
struct vfsmount *mnt_clone_internal(struct path *path) { struct mount *p; p = clone_mnt(real_mount(path->mnt), path->dentry, CL_PRIVATE); if (IS_ERR(p)) return ERR_CAST(p); p->mnt.mnt_flags |= MNT_INTERNAL; return &p->mnt; }
13,287
28,757
0
void kvm_apic_post_state_restore(struct kvm_vcpu *vcpu, struct kvm_lapic_state *s) { struct kvm_lapic *apic = vcpu->arch.apic; kvm_lapic_set_base(vcpu, vcpu->arch.apic_base); /* set SPIV separately to get count of SW disabled APICs right */ apic_set_spiv(apic, *((u32 *)(s->regs + APIC_SPIV))); memcpy(vcpu->arch.apic->regs, s->regs, sizeof *s); /* call kvm_apic_set_id() to put apic into apic_map */ kvm_apic_set_id(apic, kvm_apic_id(apic)); kvm_apic_set_version(vcpu); apic_update_ppr(apic); hrtimer_cancel(&apic->lapic_timer.timer); update_divide_count(apic); start_apic_timer(apic); apic->irr_pending = true; apic->isr_count = kvm_apic_vid_enabled(vcpu->kvm) ? 1 : count_vectors(apic->regs + APIC_ISR); apic->highest_isr_cache = -1; kvm_x86_ops->hwapic_isr_update(vcpu->kvm, apic_find_highest_isr(apic)); kvm_make_request(KVM_REQ_EVENT, vcpu); kvm_rtc_eoi_tracking_restore_one(vcpu); }
13,288
78,126
0
sc_asn1_decode_object_id(const u8 *inbuf, size_t inlen, struct sc_object_id *id) { int a; const u8 *p = inbuf; int *octet; if (inlen == 0 || inbuf == NULL || id == NULL) return SC_ERROR_INVALID_ARGUMENTS; sc_init_oid(id); octet = id->value; a = *p; *octet++ = a / 40; *octet++ = a % 40; inlen--; while (inlen) { p++; a = *p & 0x7F; inlen--; while (inlen && *p & 0x80) { p++; a <<= 7; a |= *p & 0x7F; inlen--; } *octet++ = a; if (octet - id->value >= SC_MAX_OBJECT_ID_OCTETS) { sc_init_oid(id); return SC_ERROR_INVALID_ASN1_OBJECT; } }; return 0; }
13,289
156,438
0
void RenderFrameDevToolsAgentHost::DisconnectWebContents() { navigation_handles_.clear(); SetFrameTreeNode(nullptr); scoped_refptr<RenderFrameDevToolsAgentHost> protect(this); UpdateFrameHost(nullptr); for (DevToolsSession* session : sessions()) session->ResumeSendingMessagesToAgent(); }
13,290
31,266
0
static int hash_walk_next(struct crypto_hash_walk *walk) { unsigned int alignmask = walk->alignmask; unsigned int offset = walk->offset; unsigned int nbytes = min(walk->entrylen, ((unsigned int)(PAGE_SIZE)) - offset); walk->data = kmap_atomic(walk->pg); walk->data += offset; if (offset & alignmask) { unsigned int unaligned = alignmask + 1 - (offset & alignmask); if (nbytes > unaligned) nbytes = unaligned; } walk->entrylen -= nbytes; return nbytes; }
13,291
52,555
0
void h2o_http2_conn_register_stream(h2o_http2_conn_t *conn, h2o_http2_stream_t *stream) { khiter_t iter; int r; if (!h2o_http2_stream_is_push(stream->stream_id) && conn->pull_stream_ids.max_open < stream->stream_id) conn->pull_stream_ids.max_open = stream->stream_id; iter = kh_put(h2o_http2_stream_t, conn->streams, stream->stream_id, &r); assert(iter != kh_end(conn->streams)); kh_val(conn->streams, iter) = stream; }
13,292
49,763
0
static int arcmsr_hbaA_handle_isr(struct AdapterControlBlock *acb) { uint32_t outbound_intstatus; struct MessageUnit_A __iomem *reg = acb->pmuA; outbound_intstatus = readl(&reg->outbound_intstatus) & acb->outbound_int_enable; if (!(outbound_intstatus & ARCMSR_MU_OUTBOUND_HANDLE_INT)) return IRQ_NONE; do { writel(outbound_intstatus, &reg->outbound_intstatus); if (outbound_intstatus & ARCMSR_MU_OUTBOUND_DOORBELL_INT) arcmsr_hbaA_doorbell_isr(acb); if (outbound_intstatus & ARCMSR_MU_OUTBOUND_POSTQUEUE_INT) arcmsr_hbaA_postqueue_isr(acb); if (outbound_intstatus & ARCMSR_MU_OUTBOUND_MESSAGE0_INT) arcmsr_hbaA_message_isr(acb); outbound_intstatus = readl(&reg->outbound_intstatus) & acb->outbound_int_enable; } while (outbound_intstatus & (ARCMSR_MU_OUTBOUND_DOORBELL_INT | ARCMSR_MU_OUTBOUND_POSTQUEUE_INT | ARCMSR_MU_OUTBOUND_MESSAGE0_INT)); return IRQ_HANDLED; }
13,293
33,892
0
cib_remote_register_notification(cib_t * cib, const char *callback, int enabled) { xmlNode *notify_msg = create_xml_node(NULL, "cib_command"); cib_remote_opaque_t *private = cib->variant_opaque; crm_xml_add(notify_msg, F_CIB_OPERATION, T_CIB_NOTIFY); crm_xml_add(notify_msg, F_CIB_NOTIFY_TYPE, callback); crm_xml_add_int(notify_msg, F_CIB_NOTIFY_ACTIVATE, enabled); crm_send_remote_msg(private->callback.session, notify_msg, private->callback.encrypted); free_xml(notify_msg); return pcmk_ok; }
13,294
63,294
0
static int midi_setup_trackname(struct _mdi *mdi, char * text) { MIDI_EVENT_SDEBUG(__FUNCTION__,0, text); strip_text(text); _WM_CheckEventMemoryPool(mdi); mdi->events[mdi->event_count].do_event = *_WM_do_meta_trackname; mdi->events[mdi->event_count].event_data.channel = 0; mdi->events[mdi->event_count].event_data.data.string = text; mdi->events[mdi->event_count].samples_to_next = 0; mdi->event_count++; return (0); }
13,295
126,053
0
TabAppendedNotificationObserver::TabAppendedNotificationObserver( Browser* parent, AutomationProvider* automation, IPC::Message* reply_message, bool use_json_interface) : TabStripNotificationObserver(chrome::NOTIFICATION_TAB_PARENTED, automation), parent_(parent), reply_message_(reply_message), use_json_interface_(use_json_interface) { }
13,296
35,043
0
static int sctp_setsockopt_context(struct sock *sk, char __user *optval, int optlen) { struct sctp_assoc_value params; struct sctp_sock *sp; struct sctp_association *asoc; if (optlen != sizeof(struct sctp_assoc_value)) return -EINVAL; if (copy_from_user(&params, optval, optlen)) return -EFAULT; sp = sctp_sk(sk); if (params.assoc_id != 0) { asoc = sctp_id2assoc(sk, params.assoc_id); if (!asoc) return -EINVAL; asoc->default_rcv_context = params.assoc_value; } else { sp->default_rcv_context = params.assoc_value; } return 0; }
13,297
169,562
0
virtual ~NodeIDWrapper() {}
13,298
86,495
0
current_getpwuid(void) { uid_t ruid; /* GNU Hurd implementation has an extension where a process can exist in a * non-conforming environment, and thus be outside the realms of POSIX * process identifiers; on this platform, getuid() fails with a status of * (uid_t)(-1) and sets errno if a program is run from a non-conforming * environment. * * http://austingroupbugs.net/view.php?id=511 */ errno = 0; ruid = getuid (); return errno == 0 ? getpwuid (ruid) : NULL; }
13,299