func
stringlengths
0
484k
target
int64
0
1
cwe
listlengths
0
4
project
stringclasses
799 values
commit_id
stringlengths
40
40
hash
float64
1,215,700,430,453,689,100,000,000B
340,281,914,521,452,260,000,000,000,000B
size
int64
1
24k
message
stringlengths
0
13.3k
int smb_vfs_call_stat(struct vfs_handle_struct *handle, struct smb_filename *smb_fname) { VFS_FIND(stat); return handle->fns->stat(handle, smb_fname); }
0
[ "CWE-22" ]
samba
bd269443e311d96ef495a9db47d1b95eb83bb8f4
106,968,866,453,353,680,000,000,000,000,000,000,000
6
Fix bug 7104 - "wide links" and "unix extensions" are incompatible. Change parameter "wide links" to default to "no". Ensure "wide links = no" if "unix extensions = yes" on a share. Fix man pages to refect this. Remove "within share" checks for a UNIX symlink set - even if widelinks = no. The server will not follow that link anyway. Correct DEBUG message in check_reduced_name() to add missing "\n" so it's really clear when a path is being denied as it's outside the enclosing share path. Jeremy.
static int tcp_try_undo_recovery(struct sock *sk) { struct tcp_sock *tp = tcp_sk(sk); if (tcp_may_undo(tp)) { int mib_idx; /* Happy end! We did not retransmit anything * or our original transmission succeeded. */ DBGUNDO(sk, inet_csk(sk)->icsk_ca_state == TCP_CA_Loss ? "loss" : "retrans"); tcp_undo_cwr(sk, true); if (inet_csk(sk)->icsk_ca_state == TCP_CA_Loss) mib_idx = LINUX_MIB_TCPLOSSUNDO; else mib_idx = LINUX_MIB_TCPFULLUNDO; NET_INC_STATS_BH(sock_net(sk), mib_idx); tp->undo_marker = 0; } if (tp->snd_una == tp->high_seq && tcp_is_reno(tp)) { /* Hold old state until something *above* high_seq * is ACKed. For Reno it is MUST to prevent false * fast retransmits (RFC2582). SACK TCP is safe. */ tcp_moderate_cwnd(tp); return 1; } tcp_set_ca_state(sk, TCP_CA_Open); return 0; }
0
[]
net-next
fdf5af0daf8019cec2396cdef8fb042d80fe71fa
269,114,545,429,052,130,000,000,000,000,000,000,000
30
tcp: drop SYN+FIN messages Denys Fedoryshchenko reported that SYN+FIN attacks were bringing his linux machines to their limits. Dont call conn_request() if the TCP flags includes SYN flag Reported-by: Denys Fedoryshchenko <[email protected]> Signed-off-by: Eric Dumazet <[email protected]> Signed-off-by: David S. Miller <[email protected]>
bool FunctionContext::is_arg_constant(int i) const { if (i < 0 || i >= _impl->_constant_args.size()) { return false; } return _impl->_constant_args[i] != nullptr; }
0
[ "CWE-200" ]
incubator-doris
246ac4e37aa4da6836b7850cb990f02d1c3725a3
316,942,794,714,456,570,000,000,000,000,000,000,000
6
[fix] fix a bug of encryption function with iv may return wrong result (#8277)
static int vidioc_streamon(struct file *file, void *priv, enum v4l2_buf_type i) { struct usb_usbvision *usbvision = video_drvdata(file); usbvision->streaming = stream_on; call_all(usbvision, video, s_stream, 1); return 0; }
0
[ "CWE-17" ]
media_tree
fa52bd506f274b7619955917abfde355e3d19ffe
324,219,805,634,973,500,000,000,000,000,000,000,000
9
[media] usbvision: fix crash on detecting device with invalid configuration The usbvision driver crashes when a specially crafted usb device with invalid number of interfaces or endpoints is detected. This fix adds checks that the device has proper configuration expected by the driver. Reported-by: Ralf Spenneberg <[email protected]> Signed-off-by: Vladis Dronov <[email protected]> Signed-off-by: Mauro Carvalho Chehab <[email protected]>
static s32 gf_media_vvc_read_sps_bs_internal(GF_BitStream *bs, VVCState *vvc, u8 layer_id, u32 *vui_flag_pos) { s32 vps_id, sps_id; u32 i, CtbSizeY; VVC_SPS *sps; u8 sps_ptl_dpb_hrd_params_present_flag; if (vui_flag_pos) *vui_flag_pos = 0; sps_id = gf_bs_read_int_log(bs, 4, "sps_id"); if (sps_id >= 16) { return -1; } vps_id = gf_bs_read_int_log(bs, 4, "vps_id"); if (vps_id >= 16) { return -1; } if (!vps_id && !vvc->vps[0].state) { vvc->vps[0].state = 1; vvc->vps[0].num_ptl = 1; vvc->vps[0].max_layers = 1; vvc->vps[0].all_layers_independent = 1; } sps = &vvc->sps[sps_id]; if (!sps->state) { sps->state = 1; sps->id = sps_id; sps->vps_id = vps_id; } sps->max_sublayers = 1 + gf_bs_read_int_log(bs, 3, "max_sublayers_minus1"); sps->chroma_format_idc = gf_bs_read_int_log(bs, 2, "chroma_format_idc"); sps->log2_ctu_size = 5 + gf_bs_read_int_log(bs, 2, "log2_ctu_size_minus5"); CtbSizeY = 1<<sps->log2_ctu_size; sps_ptl_dpb_hrd_params_present_flag = gf_bs_read_int_log(bs, 1, "sps_ptl_dpb_hrd_params_present_flag"); if (sps_ptl_dpb_hrd_params_present_flag) { VVC_ProfileTierLevel ptl, *p_ptl; if (sps->vps_id) { p_ptl = &ptl; } else { p_ptl = &vvc->vps[0].ptl[0]; } memset(p_ptl, 0, sizeof(VVC_ProfileTierLevel)); p_ptl->pt_present = 1; p_ptl->ptl_max_tid = sps->max_sublayers; vvc_profile_tier_level(bs, p_ptl, 0); } sps->gdr_enabled = gf_bs_read_int_log(bs, 1, "gdr_enabled"); sps->ref_pic_resampling = gf_bs_read_int_log(bs, 1, "ref_pic_resampling"); if (sps->ref_pic_resampling) sps->res_change_in_clvs = gf_bs_read_int_log(bs, 1, "res_change_in_clvs"); sps->width = gf_bs_read_ue_log(bs, "width"); sps->height = gf_bs_read_ue_log(bs, "height"); sps->conf_window = gf_bs_read_int_log(bs, 1, "conformance_window_present_flag"); if (sps->conf_window) { sps->cw_left = gf_bs_read_ue_log(bs, "conformance_window_left"); sps->cw_right = gf_bs_read_ue_log(bs, "conformance_window_right"); sps->cw_top = gf_bs_read_ue_log(bs, "conformance_window_top"); sps->cw_bottom = gf_bs_read_ue_log(bs, "conformance_window_bottom"); } sps->subpic_info_present = gf_bs_read_int_log(bs, 1, "subpic_info_present"); if (sps->subpic_info_present) { sps->nb_subpics = 1 + gf_bs_read_ue_log(bs, "nb_subpics_minus1"); if (sps->nb_subpics>1) { u32 tmpWidthVal, tmpHeightVal; sps->independent_subpic_flags = gf_bs_read_int_log(bs, 1, "independent_subpic_flags"); sps->subpic_same_size = gf_bs_read_int_log(bs, 1, "subpic_same_size"); tmpWidthVal = (sps->width + CtbSizeY-1) / CtbSizeY; tmpWidthVal = gf_get_bit_size(tmpWidthVal); tmpHeightVal = (sps->height + CtbSizeY-1) / CtbSizeY; tmpHeightVal = gf_get_bit_size(tmpHeightVal); for (i=0; i<sps->nb_subpics; i++) { if( !sps->subpic_same_size || !i) { if (i && (sps->width > CtbSizeY)) gf_bs_read_int_log(bs, tmpWidthVal, "subpic_ctu_top_left_x"); if (i && (sps->height > CtbSizeY)) gf_bs_read_int_log(bs, tmpHeightVal, "subpic_ctu_top_left_y"); if ((i+1 < sps->nb_subpics) && (sps->width > CtbSizeY)) gf_bs_read_int_log(bs, tmpWidthVal, "subpic_width_minus1"); if ((i+1 < sps->nb_subpics) && (sps->height > CtbSizeY)) gf_bs_read_int_log(bs, tmpHeightVal, "subpic_height_minus1"); } if (!sps->independent_subpic_flags) { gf_bs_read_int_log(bs, 1, "subpic_treated_as_pic_flag"); gf_bs_read_int_log(bs, 1, "loop_filter_across_subpic_enabled_flag"); } } sps->subpicid_len = gf_bs_read_ue_log(bs, "subpic_id_len_minus1") + 1; sps->subpicid_mapping_explicit = gf_bs_read_int_log(bs, 1, "subpic_id_mapping_explicitly_signalled_flag"); if (sps->subpicid_mapping_explicit) { sps->subpicid_mapping_present = gf_bs_read_int_log(bs, 1, "subpic_id_mapping_present_flag"); if (sps->subpicid_mapping_present) { for (i=0; i<sps->nb_subpics; i++) { gf_bs_read_ue_log(bs, "subpic_id"); } } } } } sps->bitdepth = gf_bs_read_ue_log(bs, "bitdepth_minus8") + 8; gf_bs_read_int_log(bs, 1, "entropy_coding_sync_enabled_flag"); gf_bs_read_int_log(bs, 1, "entry_point_offsets_present_flag"); sps->log2_max_poc_lsb = 4 + gf_bs_read_int_log(bs, 4, "log2_max_poc_lsb_minus4"); if ((sps->poc_msb_cycle_flag = gf_bs_read_int_log(bs, 1, "poc_msb_cycle_flag"))) sps->poc_msb_cycle_len = 1 + gf_bs_read_ue_log(bs, "poc_msb_cycle_len_minus1"); u8 sps_num_extra_ph_bits = 8 * gf_bs_read_int_log(bs, 2, "sps_num_extra_ph_bytes"); for (i=0; i<sps_num_extra_ph_bits; i++) { if (gf_bs_read_int_log_idx(bs, 1, "extra_ph_bit_present_flag", 1)) sps->ph_num_extra_bits++; } u8 sps_num_extra_sh_bits = 8 * gf_bs_read_int_log(bs, 2, "num_extra_sh_bytes"); for (i=0; i<sps_num_extra_sh_bits; i++) { if (gf_bs_read_int_log_idx(bs, 1, "extra_sh_bit_present_flag", i)) sps->sh_num_extra_bits++; } if (sps_ptl_dpb_hrd_params_present_flag) { u8 sps_sublayer_dpb_params_flag = 0; if (sps->max_sublayers>1) { sps_sublayer_dpb_params_flag = gf_bs_read_int_log(bs, 1, "sps_sublayer_dpb_params_flag"); } for (i=(sps_sublayer_dpb_params_flag ? 0 : sps->max_sublayers-1); i < sps->max_sublayers; i++ ) { gf_bs_read_ue_log_idx(bs, "dpb_max_dec_pic_buffering_minus1", i); gf_bs_read_ue_log_idx(bs, "dpb_max_num_reorder_pics", i); gf_bs_read_ue_log_idx(bs, "dpb_max_latency_increase_plus1", i); } } gf_bs_read_ue_log(bs, "sps_log2_min_luma_coding_block_size_minus2"); gf_bs_read_int_log(bs, 1, "sps_partition_constraints_override_enabled_flag"); gf_bs_read_ue_log(bs, "sps_log2_min_luma_coding_block_size_minus2"); u8 sps_max_mtt_hierarchy_depth_intra_slice_luma = gf_bs_read_ue_log(bs, "sps_max_mtt_hierarchy_depth_intra_slice_luma"); if (sps_max_mtt_hierarchy_depth_intra_slice_luma != 0) { gf_bs_read_ue_log(bs, "sps_log2_diff_max_bt_min_qt_intra_slice_luma"); gf_bs_read_ue_log(bs, "sps_log2_diff_max_tt_min_qt_intra_slice_luma"); } u8 sps_qtbtt_dual_tree_intra_flag = 0; if (sps->chroma_format_idc) { sps_qtbtt_dual_tree_intra_flag = gf_bs_read_int_log(bs, 1, "sps_qtbtt_dual_tree_intra_flag"); } if (sps_qtbtt_dual_tree_intra_flag) { gf_bs_read_ue_log(bs, "sps_log2_diff_min_qt_min_cb_intra_slice_chroma"); u8 sps_max_mtt_hierarchy_depth_intra_slice_chroma = gf_bs_read_ue_log(bs, "sps_max_mtt_hierarchy_depth_intra_slice_chroma"); if( sps_max_mtt_hierarchy_depth_intra_slice_chroma != 0) { gf_bs_read_ue_log(bs, "sps_log2_diff_max_bt_min_qt_intra_slice_chroma"); gf_bs_read_ue_log(bs, "sps_log2_diff_max_tt_min_qt_intra_slice_chroma"); } } gf_bs_read_ue_log(bs, "sps_log2_diff_min_qt_min_cb_inter_slice"); u8 sps_max_mtt_hierarchy_depth_inter_slice = gf_bs_read_ue_log(bs, "sps_max_mtt_hierarchy_depth_inter_slice"); if (sps_max_mtt_hierarchy_depth_inter_slice != 0) { gf_bs_read_ue_log(bs, "sps_log2_diff_max_bt_min_qt_inter_slice"); gf_bs_read_ue_log(bs, "sps_log2_diff_max_tt_min_qt_inter_slice"); } //u8 sps_max_luma_transform_size_64_flag = 0; if (CtbSizeY > 32) { /*sps_max_luma_transform_size_64_flag = */gf_bs_read_int_log(bs, 1, "sps_max_luma_transform_size_64_flag"); } u8 sps_transform_skip_enabled_flag = gf_bs_read_int_log(bs, 1, "sps_transform_skip_enabled_flag"); if (sps_transform_skip_enabled_flag) { gf_bs_read_ue_log(bs, "sps_log2_transform_skip_max_size_minus2"); gf_bs_read_int_log(bs, 1, "sps_bdpcm_enabled_flag"); } if (gf_bs_read_int_log(bs, 1, "sps_mts_enabled_flag")) { gf_bs_read_int_log(bs, 1, "sps_explicit_mts_intra_enabled_flag"); gf_bs_read_int_log(bs, 1, "sps_explicit_mts_inter_enabled_flag"); } gf_bs_read_int_log(bs, 1, "sps_lfnst_enabled_flag"); if (sps->chroma_format_idc) { u8 sps_joint_cbcr_enabled_flag = gf_bs_read_int_log(bs, 1, "sps_joint_cbcr_enabled_flag"); u8 sps_same_qp_table_for_chroma_flag = gf_bs_read_int_log(bs, 1, "sps_same_qp_table_for_chroma_flag"); u32 numQpTables = sps_same_qp_table_for_chroma_flag ? 1 : (sps_joint_cbcr_enabled_flag ? 3 : 2); for (i=0; i<numQpTables; i++) { gf_bs_read_se_log_idx(bs, "sps_qp_table_start_minus26", i); u32 j, sps_num_points_in_qp_table = 1 + gf_bs_read_ue_log_idx(bs, "sps_num_points_in_qp_table_minus1", i); for (j=0; j<sps_num_points_in_qp_table; j++) { gf_bs_read_ue_log_idx2(bs, "sps_delta_qp_in_val_minus1", i, j); gf_bs_read_ue_log_idx2(bs, "sps_delta_qp_diff_val", i, j); } } } gf_bs_read_int_log(bs, 1, "sps_sao_enabled_flag"); sps->alf_enabled_flag = gf_bs_read_int_log(bs, 1, "sps_alf_enabled_flag"); if (sps->alf_enabled_flag && sps->chroma_format_idc) { gf_bs_read_int_log(bs, 1, "sps_ccalf_enabled_flag"); } /*! TODO parse the rest !*/ return sps_id; }
1
[ "CWE-190", "CWE-787" ]
gpac
51cdb67ff7c5f1242ac58c5aa603ceaf1793b788
148,825,474,867,684,870,000,000,000,000,000,000,000
195
add safety in avc/hevc/vvc sps/pps/vps ID check - cf #1720 #1721 #1722
get_variable_value (var) SHELL_VAR *var; { if (var == 0) return ((char *)NULL); #if defined (ARRAY_VARS) else if (array_p (var)) return (array_reference (array_cell (var), 0)); else if (assoc_p (var)) return (assoc_reference (assoc_cell (var), "0")); #endif else return (value_cell (var)); }
0
[]
bash
863d31ae775d56b785dc5b0105b6d251515d81d5
80,794,563,729,707,090,000,000,000,000,000,000,000
14
commit bash-20120224 snapshot
OFB crypt/decrypt data using key key with cipher cipher starting with iv */ PHP_FUNCTION(mcrypt_ofb) { zval **mode; char *cipher, *key, *data, *iv = NULL; int cipher_len, key_len, data_len, iv_len = 0; MCRYPT_GET_CRYPT_ARGS convert_to_long_ex(mode); php_mcrypt_do_crypt(cipher, key, key_len, data, data_len, "ofb", iv, iv_len, ZEND_NUM_ARGS(), Z_LVAL_PP(mode), return_value TSRMLS_CC);
1
[ "CWE-190" ]
php-src
6c5211a0cef0cc2854eaa387e0eb036e012904d0
271,487,334,764,332,950,000,000,000,000,000,000,000
12
Fix bug #72455: Heap Overflow due to integer overflows
static int get_qcc(Jpeg2000DecoderContext *s, int n, Jpeg2000QuantStyle *q, uint8_t *properties) { int compno; if (bytestream2_get_bytes_left(&s->g) < 1) return AVERROR_INVALIDDATA; compno = bytestream2_get_byteu(&s->g); if (compno >= s->ncomponents) { av_log(s->avctx, AV_LOG_ERROR, "Invalid compno %d. There are %d components in the image.\n", compno, s->ncomponents); return AVERROR_INVALIDDATA; } properties[compno] |= HAD_QCC; return get_qcx(s, n - 1, q + compno); }
0
[ "CWE-119", "CWE-787" ]
FFmpeg
9a271a9368eaabf99e6c2046103acb33957e63b7
140,860,919,764,587,540,000,000,000,000,000,000,000
20
jpeg2000: check log2_cblk dimensions Fixes out of array access Fixes Ticket2895 Found-by: Piotr Bandurski <[email protected]> Signed-off-by: Michael Niedermayer <[email protected]>
bar_value(int size, uint64_t val) { uint64_t mask; assert(size == 1 || size == 2 || size == 4 || size == 8); mask = (size < 8 ? 1UL << (size * 8) : 0UL) - 1; return val & mask; }
0
[ "CWE-617", "CWE-703" ]
acrn-hypervisor
6199e653418eda58cd698d8769820904453e2535
124,833,491,220,296,200,000,000,000,000,000,000,000
9
dm: validate the input in 'pci_emul_mem_handler()' checking the inputs explicitly instead of using Assert. Tracked-On: #4003 Signed-off-by: Yonghua Huang <[email protected]> Reviewed-by: Shuo Liu <[email protected]> Acked-by: Yu Wang <[email protected]>
gif_fill_in_pixels (GifContext *context, guchar *dest, gint offset, guchar v) { guchar *pixel = NULL; guchar (*cmap)[MAXCOLORMAPSIZE]; if (context->frame_cmap_active) cmap = context->frame_color_map; else cmap = context->global_color_map; if (context->gif89.transparent != -1) { pixel = dest + (context->draw_ypos + offset) * gdk_pixbuf_get_rowstride (context->frame->pixbuf) + context->draw_xpos * 4; *pixel = cmap [0][(guchar) v]; *(pixel+1) = cmap [1][(guchar) v]; *(pixel+2) = cmap [2][(guchar) v]; *(pixel+3) = (guchar) ((v == context->gif89.transparent) ? 0 : 255); } else { pixel = dest + (context->draw_ypos + offset) * gdk_pixbuf_get_rowstride (context->frame->pixbuf) + context->draw_xpos * 3; *pixel = cmap [0][(guchar) v]; *(pixel+1) = cmap [1][(guchar) v]; *(pixel+2) = cmap [2][(guchar) v]; } }
0
[]
gdk-pixbuf
f8569bb13e2aa1584dde61ca545144750f7a7c98
128,640,831,877,421,290,000,000,000,000,000,000,000
23
GIF: Don't return a partially initialized pixbuf structure It was found that gdk-pixbuf GIF image loader gdk_pixbuf__gif_image_load() routine did not properly handle certain return values from their subroutines. A remote attacker could provide a specially-crafted GIF image, which once opened in an application, linked against gdk-pixbuf would lead to gdk-pixbuf to return partially initialized pixbuf structure, possibly having huge width and height, leading to that particular application termination due excessive memory use. The CVE identifier of CVE-2011-2485 has been assigned to this issue.
ZEND_API int ZEND_FASTCALL is_not_identical_function(zval *result, zval *op1, zval *op2) /* {{{ */ { ZVAL_BOOL(result, !zend_is_identical(op1, op2)); return SUCCESS; }
0
[ "CWE-787" ]
php-src
f1ce8d5f5839cb2069ea37ff424fb96b8cd6932d
247,275,466,253,147,500,000,000,000,000,000,000,000
5
Fix #73122: Integer Overflow when concatenating strings We must avoid integer overflows in memory allocations, so we introduce an additional check in the VM, and bail out in the rare case of an overflow. Since the recent fix for bug #74960 still doesn't catch all possible overflows, we fix that right away.
static void uc_fw_unbind_ggtt(struct intel_uc_fw *uc_fw) { struct drm_i915_gem_object *obj = uc_fw->obj; struct i915_ggtt *ggtt = __uc_fw_to_gt(uc_fw)->ggtt; u64 start = uc_fw_ggtt_offset(uc_fw); ggtt->vm.clear_range(&ggtt->vm, start, obj->base.size); }
0
[ "CWE-20", "CWE-190" ]
linux
c784e5249e773689e38d2bc1749f08b986621a26
44,006,834,057,447,320,000,000,000,000,000,000,000
8
drm/i915/guc: Update to use firmware v49.0.1 The latest GuC firmware includes a number of interface changes that require driver updates to match. * Starting from Gen11, the ID to be provided to GuC needs to contain the engine class in bits [0..2] and the instance in bits [3..6]. NOTE: this patch breaks pointer dereferences in some existing GuC functions that use the guc_id to dereference arrays but these functions are not used for now as we have GuC submission disabled and we will update these functions in follow up patch which requires new IDs. * The new GuC requires the additional data structure (ADS) and associated 'private_data' pointer to be setup. This is basically a scratch area of memory that the GuC owns. The size is read from the CSS header. * There is now a physical to logical engine mapping table in the ADS which needs to be configured in order for the firmware to load. For now, the table is initialised with a 1 to 1 mapping. * GUC_CTL_CTXINFO has been removed from the initialization params. * reg_state_buffer is maintained internally by the GuC as part of the private data. * The ADS layout has changed significantly. This patch updates the shared structure and also adds better documentation of the layout. * While i915 does not use GuC doorbells, the firmware now requires that some initialisation is done. * The number of engine classes and instances supported in the ADS has been increased. Signed-off-by: John Harrison <[email protected]> Signed-off-by: Matthew Brost <[email protected]> Signed-off-by: Daniele Ceraolo Spurio <[email protected]> Signed-off-by: Oscar Mateo <[email protected]> Signed-off-by: Michel Thierry <[email protected]> Signed-off-by: Rodrigo Vivi <[email protected]> Signed-off-by: Michal Wajdeczko <[email protected]> Cc: Michal Winiarski <[email protected]> Cc: Tomasz Lis <[email protected]> Cc: Joonas Lahtinen <[email protected]> Reviewed-by: Daniele Ceraolo Spurio <[email protected]> Signed-off-by: Joonas Lahtinen <[email protected]> Link: https://patchwork.freedesktop.org/patch/msgid/[email protected]
static void wake_all_kswapds(unsigned int order, const struct alloc_context *ac) { struct zoneref *z; struct zone *zone; pg_data_t *last_pgdat = NULL; for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, ac->high_zoneidx, ac->nodemask) { if (last_pgdat != zone->zone_pgdat) wakeup_kswapd(zone, order, ac->high_zoneidx); last_pgdat = zone->zone_pgdat; } }
0
[]
linux
400e22499dd92613821374c8c6c88c7225359980
339,617,283,994,488,660,000,000,000,000,000,000,000
13
mm: don't warn about allocations which stall for too long Commit 63f53dea0c98 ("mm: warn about allocations which stall for too long") was a great step for reducing possibility of silent hang up problem caused by memory allocation stalls. But this commit reverts it, for it is possible to trigger OOM lockup and/or soft lockups when many threads concurrently called warn_alloc() (in order to warn about memory allocation stalls) due to current implementation of printk(), and it is difficult to obtain useful information due to limitation of synchronous warning approach. Current printk() implementation flushes all pending logs using the context of a thread which called console_unlock(). printk() should be able to flush all pending logs eventually unless somebody continues appending to printk() buffer. Since warn_alloc() started appending to printk() buffer while waiting for oom_kill_process() to make forward progress when oom_kill_process() is processing pending logs, it became possible for warn_alloc() to force oom_kill_process() loop inside printk(). As a result, warn_alloc() significantly increased possibility of preventing oom_kill_process() from making forward progress. ---------- Pseudo code start ---------- Before warn_alloc() was introduced: retry: if (mutex_trylock(&oom_lock)) { while (atomic_read(&printk_pending_logs) > 0) { atomic_dec(&printk_pending_logs); print_one_log(); } // Send SIGKILL here. mutex_unlock(&oom_lock) } goto retry; After warn_alloc() was introduced: retry: if (mutex_trylock(&oom_lock)) { while (atomic_read(&printk_pending_logs) > 0) { atomic_dec(&printk_pending_logs); print_one_log(); } // Send SIGKILL here. mutex_unlock(&oom_lock) } else if (waited_for_10seconds()) { atomic_inc(&printk_pending_logs); } goto retry; ---------- Pseudo code end ---------- Although waited_for_10seconds() becomes true once per 10 seconds, unbounded number of threads can call waited_for_10seconds() at the same time. Also, since threads doing waited_for_10seconds() keep doing almost busy loop, the thread doing print_one_log() can use little CPU resource. Therefore, this situation can be simplified like ---------- Pseudo code start ---------- retry: if (mutex_trylock(&oom_lock)) { while (atomic_read(&printk_pending_logs) > 0) { atomic_dec(&printk_pending_logs); print_one_log(); } // Send SIGKILL here. mutex_unlock(&oom_lock) } else { atomic_inc(&printk_pending_logs); } goto retry; ---------- Pseudo code end ---------- when printk() is called faster than print_one_log() can process a log. One of possible mitigation would be to introduce a new lock in order to make sure that no other series of printk() (either oom_kill_process() or warn_alloc()) can append to printk() buffer when one series of printk() (either oom_kill_process() or warn_alloc()) is already in progress. Such serialization will also help obtaining kernel messages in readable form. ---------- Pseudo code start ---------- retry: if (mutex_trylock(&oom_lock)) { mutex_lock(&oom_printk_lock); while (atomic_read(&printk_pending_logs) > 0) { atomic_dec(&printk_pending_logs); print_one_log(); } // Send SIGKILL here. mutex_unlock(&oom_printk_lock); mutex_unlock(&oom_lock) } else { if (mutex_trylock(&oom_printk_lock)) { atomic_inc(&printk_pending_logs); mutex_unlock(&oom_printk_lock); } } goto retry; ---------- Pseudo code end ---------- But this commit does not go that direction, for we don't want to introduce a new lock dependency, and we unlikely be able to obtain useful information even if we serialized oom_kill_process() and warn_alloc(). Synchronous approach is prone to unexpected results (e.g. too late [1], too frequent [2], overlooked [3]). As far as I know, warn_alloc() never helped with providing information other than "something is going wrong". I want to consider asynchronous approach which can obtain information during stalls with possibly relevant threads (e.g. the owner of oom_lock and kswapd-like threads) and serve as a trigger for actions (e.g. turn on/off tracepoints, ask libvirt daemon to take a memory dump of stalling KVM guest for diagnostic purpose). This commit temporarily loses ability to report e.g. OOM lockup due to unable to invoke the OOM killer due to !__GFP_FS allocation request. But asynchronous approach will be able to detect such situation and emit warning. Thus, let's remove warn_alloc(). [1] https://bugzilla.kernel.org/show_bug.cgi?id=192981 [2] http://lkml.kernel.org/r/CAM_iQpWuPVGc2ky8M-9yukECtS+zKjiDasNymX7rMcBjBFyM_A@mail.gmail.com [3] commit db73ee0d46379922 ("mm, vmscan: do not loop on too_many_isolated for ever")) Link: http://lkml.kernel.org/r/1509017339-4802-1-git-send-email-penguin-kernel@I-love.SAKURA.ne.jp Signed-off-by: Tetsuo Handa <[email protected]> Reported-by: Cong Wang <[email protected]> Reported-by: yuwang.yuwang <[email protected]> Reported-by: Johannes Weiner <[email protected]> Acked-by: Michal Hocko <[email protected]> Acked-by: Johannes Weiner <[email protected]> Cc: Vlastimil Babka <[email protected]> Cc: Mel Gorman <[email protected]> Cc: Dave Hansen <[email protected]> Cc: Sergey Senozhatsky <[email protected]> Cc: Petr Mladek <[email protected]> Cc: Steven Rostedt <[email protected]> Signed-off-by: Andrew Morton <[email protected]> Signed-off-by: Linus Torvalds <[email protected]>
static void put_arg_page(struct page *page) { }
0
[ "CWE-200" ]
linux-2.6
b66c5984017533316fd1951770302649baf1aa33
89,690,995,748,855,960,000,000,000,000,000,000,000
3
exec: do not leave bprm->interp on stack If a series of scripts are executed, each triggering module loading via unprintable bytes in the script header, kernel stack contents can leak into the command line. Normally execution of binfmt_script and binfmt_misc happens recursively. However, when modules are enabled, and unprintable bytes exist in the bprm->buf, execution will restart after attempting to load matching binfmt modules. Unfortunately, the logic in binfmt_script and binfmt_misc does not expect to get restarted. They leave bprm->interp pointing to their local stack. This means on restart bprm->interp is left pointing into unused stack memory which can then be copied into the userspace argv areas. After additional study, it seems that both recursion and restart remains the desirable way to handle exec with scripts, misc, and modules. As such, we need to protect the changes to interp. This changes the logic to require allocation for any changes to the bprm->interp. To avoid adding a new kmalloc to every exec, the default value is left as-is. Only when passing through binfmt_script or binfmt_misc does an allocation take place. For a proof of concept, see DoTest.sh from: http://www.halfdog.net/Security/2012/LinuxKernelBinfmtScriptStackDataDisclosure/ Signed-off-by: Kees Cook <[email protected]> Cc: halfdog <[email protected]> Cc: P J P <[email protected]> Cc: Alexander Viro <[email protected]> Cc: <[email protected]> Signed-off-by: Andrew Morton <[email protected]> Signed-off-by: Linus Torvalds <[email protected]>
static int php_array_element_export(zval **zv TSRMLS_DC, int num_args, va_list args, zend_hash_key *hash_key) /* {{{ */ { int level; smart_str *buf; level = va_arg(args, int); buf = va_arg(args, smart_str *); if (hash_key->nKeyLength == 0) { /* numeric key */ buffer_append_spaces(buf, level+1); smart_str_append_long(buf, (long) hash_key->h); smart_str_appendl(buf, " => ", 4); } else { /* string key */ char *key, *tmp_str; int key_len, tmp_len; key = php_addcslashes(hash_key->arKey, hash_key->nKeyLength - 1, &key_len, 0, "'\\", 2 TSRMLS_CC); tmp_str = php_str_to_str_ex(key, key_len, "\0", 1, "' . \"\\0\" . '", 12, &tmp_len, 0, NULL); buffer_append_spaces(buf, level + 1); smart_str_appendc(buf, '\''); smart_str_appendl(buf, tmp_str, tmp_len); smart_str_appendl(buf, "' => ", 5); efree(key); efree(tmp_str); } php_var_export_ex(zv, level + 2, buf TSRMLS_CC); smart_str_appendc(buf, ','); smart_str_appendc(buf, '\n'); return 0; }
0
[]
php-src
e8429400d40e3c3aa4b22ba701991d698a2f3b2f
276,789,099,281,452,000,000,000,000,000,000,000,000
35
Fix bug #70172 - Use After Free Vulnerability in unserialize()
void subselect_union_engine::exclude() { unit->exclude_level(); }
0
[ "CWE-89" ]
server
3c209bfc040ddfc41ece8357d772547432353fd2
246,455,366,774,503,420,000,000,000,000,000,000,000
4
MDEV-25994: Crash with union of my_decimal type in ORDER BY clause When single-row subquery fails with "Subquery reutrns more than 1 row" error, it will raise an error and return NULL. On the other hand, Item_singlerow_subselect sets item->maybe_null=0 for table-less subqueries like "(SELECT not_null_value)" (*) This discrepancy (item with maybe_null=0 returning NULL) causes the code in Type_handler_decimal_result::make_sort_key_part() to crash. Fixed this by allowing inference (*) only when the subquery is NOT a UNION.
autoar_extractor_do_sanitize_pathname (AutoarExtractor *self, const char *pathname_bytes) { GFile *extracted_filename; gboolean valid_filename; g_autofree char *sanitized_pathname = NULL; g_autofree char *utf8_pathname; utf8_pathname = autoar_common_get_utf8_pathname (pathname_bytes); extracted_filename = g_file_get_child (self->destination_dir, utf8_pathname ? utf8_pathname : pathname_bytes); valid_filename = is_valid_filename (extracted_filename, self->destination_dir); if (!valid_filename) { g_warning ("autoar_extractor_do_sanitize_pathname: %s is outside of the destination dir", g_file_peek_path (extracted_filename)); g_object_unref (extracted_filename); return NULL; } if (self->prefix != NULL && self->new_prefix != NULL) { g_autofree char *relative_path; /* Replace the old prefix with the new one */ relative_path = g_file_get_relative_path (self->prefix, extracted_filename); relative_path = relative_path != NULL ? relative_path : g_strdup (""); g_object_unref (extracted_filename); extracted_filename = g_file_get_child (self->new_prefix, relative_path); } sanitized_pathname = g_file_get_path (extracted_filename); g_debug ("autoar_extractor_do_sanitize_pathname: %s", sanitized_pathname); return extracted_filename; }
0
[ "CWE-22" ]
gnome-autoar
adb067e645732fdbe7103516e506d09eb6a54429
81,271,880,351,242,485,000,000,000,000,000,000,000
42
AutoarExtractor: Do not extract files outside the destination dir Currently, a malicious archive can cause that the files are extracted outside of the destination dir. This can happen if the archive contains a file whose parent is a symbolic link, which points outside of the destination dir. This is potentially a security threat similar to CVE-2020-11736. Let's skip such problematic files when extracting. Fixes: https://gitlab.gnome.org/GNOME/gnome-autoar/-/issues/7
bfad_im_num_of_ports_show(struct device *dev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(dev); struct bfad_im_port_s *im_port = (struct bfad_im_port_s *) shost->hostdata[0]; struct bfad_s *bfad = im_port->bfad; return snprintf(buf, PAGE_SIZE, "%d\n", bfa_get_nports(&bfad->bfa)); }
0
[ "CWE-400", "CWE-401" ]
linux
0e62395da2bd5166d7c9e14cbc7503b256a34cb0
150,625,694,796,674,940,000,000,000,000,000,000,000
11
scsi: bfa: release allocated memory in case of error In bfad_im_get_stats if bfa_port_get_stats fails, allocated memory needs to be released. Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Navid Emamdoost <[email protected]> Signed-off-by: Martin K. Petersen <[email protected]>
xmlCheckFilename (const char *path) { #ifdef HAVE_STAT struct stat stat_buffer; #endif if (path == NULL) return(0); #ifdef HAVE_STAT #if defined(_WIN32) || defined (__DJGPP__) && !defined (__CYGWIN__) /* * On Windows stat and wstat do not work with long pathname, * which start with '\\?\' */ if ((path[0] == '\\') && (path[1] == '\\') && (path[2] == '?') && (path[3] == '\\') ) return 1; if (xmlWrapStat(path, &stat_buffer) == -1) return 0; #else if (stat(path, &stat_buffer) == -1) return 0; #endif #ifdef S_ISDIR if (S_ISDIR(stat_buffer.st_mode)) return 2; #endif #endif /* HAVE_STAT */ return 1; }
0
[ "CWE-134" ]
libxml2
4472c3a5a5b516aaf59b89be602fbce52756c3e9
110,335,554,364,515,400,000,000,000,000,000,000,000
31
Fix some format string warnings with possible format string vulnerability For https://bugzilla.gnome.org/show_bug.cgi?id=761029 Decorate every method in libxml2 with the appropriate LIBXML_ATTR_FORMAT(fmt,args) macro and add some cleanups following the reports.
authority_certs_fetch_missing(networkstatus_t *status, time_t now, const char *dir_hint) { /* * The pending_id digestmap tracks pending certificate downloads by * identity digest; the pending_cert digestmap tracks pending downloads * by (identity digest, signing key digest) pairs. */ digestmap_t *pending_id; fp_pair_map_t *pending_cert; /* * The missing_id_digests smartlist will hold a list of id digests * we want to fetch the newest cert for; the missing_cert_digests * smartlist will hold a list of fp_pair_t with an identity and * signing key digest. */ smartlist_t *missing_cert_digests, *missing_id_digests; char *resource = NULL; cert_list_t *cl; const or_options_t *options = get_options(); const int cache = directory_caches_unknown_auth_certs(options); fp_pair_t *fp_tmp = NULL; char id_digest_str[2*DIGEST_LEN+1]; char sk_digest_str[2*DIGEST_LEN+1]; if (should_delay_dir_fetches(options, NULL)) return; pending_cert = fp_pair_map_new(); pending_id = digestmap_new(); missing_cert_digests = smartlist_new(); missing_id_digests = smartlist_new(); /* * First, we get the lists of already pending downloads so we don't * duplicate effort. */ list_pending_downloads(pending_id, NULL, DIR_PURPOSE_FETCH_CERTIFICATE, "fp/"); list_pending_fpsk_downloads(pending_cert); /* * Now, we download any trusted authority certs we don't have by * identity digest only. This gets the latest cert for that authority. */ SMARTLIST_FOREACH_BEGIN(trusted_dir_servers, dir_server_t *, ds) { int found = 0; if (!(ds->type & V3_DIRINFO)) continue; if (smartlist_contains_digest(missing_id_digests, ds->v3_identity_digest)) continue; cl = get_cert_list(ds->v3_identity_digest); SMARTLIST_FOREACH_BEGIN(cl->certs, authority_cert_t *, cert) { if (now < cert->expires) { /* It's not expired, and we weren't looking for something to * verify a consensus with. Call it done. */ download_status_reset(&(cl->dl_status_by_id)); /* No sense trying to download it specifically by signing key hash */ download_status_reset_by_sk_in_cl(cl, cert->signing_key_digest); found = 1; break; } } SMARTLIST_FOREACH_END(cert); if (!found && download_status_is_ready(&(cl->dl_status_by_id), now, options->TestingCertMaxDownloadTries) && !digestmap_get(pending_id, ds->v3_identity_digest)) { log_info(LD_DIR, "No current certificate known for authority %s " "(ID digest %s); launching request.", ds->nickname, hex_str(ds->v3_identity_digest, DIGEST_LEN)); smartlist_add(missing_id_digests, ds->v3_identity_digest); } } SMARTLIST_FOREACH_END(ds); /* * Next, if we have a consensus, scan through it and look for anything * signed with a key from a cert we don't have. Those get downloaded * by (fp,sk) pair, but if we don't know any certs at all for the fp * (identity digest), and it's one of the trusted dir server certs * we started off above or a pending download in pending_id, don't * try to get it yet. Most likely, the one we'll get for that will * have the right signing key too, and we'd just be downloading * redundantly. */ if (status) { SMARTLIST_FOREACH_BEGIN(status->voters, networkstatus_voter_info_t *, voter) { if (!smartlist_len(voter->sigs)) continue; /* This authority never signed this consensus, so don't * go looking for a cert with key digest 0000000000. */ if (!cache && !trusteddirserver_get_by_v3_auth_digest(voter->identity_digest)) continue; /* We are not a cache, and we don't know this authority.*/ /* * If we don't know *any* cert for this authority, and a download by ID * is pending or we added it to missing_id_digests above, skip this * one for now to avoid duplicate downloads. */ cl = get_cert_list(voter->identity_digest); if (smartlist_len(cl->certs) == 0) { /* We have no certs at all for this one */ /* Do we have a download of one pending? */ if (digestmap_get(pending_id, voter->identity_digest)) continue; /* * Are we about to launch a download of one due to the trusted * dir server check above? */ if (smartlist_contains_digest(missing_id_digests, voter->identity_digest)) continue; } SMARTLIST_FOREACH_BEGIN(voter->sigs, document_signature_t *, sig) { authority_cert_t *cert = authority_cert_get_by_digests(voter->identity_digest, sig->signing_key_digest); if (cert) { if (now < cert->expires) download_status_reset_by_sk_in_cl(cl, sig->signing_key_digest); continue; } if (download_status_is_ready_by_sk_in_cl( cl, sig->signing_key_digest, now, options->TestingCertMaxDownloadTries) && !fp_pair_map_get_by_digests(pending_cert, voter->identity_digest, sig->signing_key_digest)) { /* * Do this rather than hex_str(), since hex_str clobbers * old results and we call twice in the param list. */ base16_encode(id_digest_str, sizeof(id_digest_str), voter->identity_digest, DIGEST_LEN); base16_encode(sk_digest_str, sizeof(sk_digest_str), sig->signing_key_digest, DIGEST_LEN); if (voter->nickname) { log_info(LD_DIR, "We're missing a certificate from authority %s " "(ID digest %s) with signing key %s: " "launching request.", voter->nickname, id_digest_str, sk_digest_str); } else { log_info(LD_DIR, "We're missing a certificate from authority ID digest " "%s with signing key %s: launching request.", id_digest_str, sk_digest_str); } /* Allocate a new fp_pair_t to append */ fp_tmp = tor_malloc(sizeof(*fp_tmp)); memcpy(fp_tmp->first, voter->identity_digest, sizeof(fp_tmp->first)); memcpy(fp_tmp->second, sig->signing_key_digest, sizeof(fp_tmp->second)); smartlist_add(missing_cert_digests, fp_tmp); } } SMARTLIST_FOREACH_END(sig); } SMARTLIST_FOREACH_END(voter); } /* Bridge clients look up the node for the dir_hint */ const node_t *node = NULL; /* All clients, including bridge clients, look up the routerstatus for the * dir_hint */ const routerstatus_t *rs = NULL; /* If we still need certificates, try the directory that just successfully * served us a consensus or certificates. * As soon as the directory fails to provide additional certificates, we try * another, randomly selected directory. This avoids continual retries. * (We only ever have one outstanding request per certificate.) */ if (dir_hint) { if (options->UseBridges) { /* Bridge clients try the nodelist. If the dir_hint is from an authority, * or something else fetched over tor, we won't find the node here, but * we will find the rs. */ node = node_get_by_id(dir_hint); } /* All clients try the consensus routerstatus, then the fallback * routerstatus */ rs = router_get_consensus_status_by_id(dir_hint); if (!rs) { /* This will also find authorities */ const dir_server_t *ds = router_get_fallback_dirserver_by_digest( dir_hint); if (ds) { rs = &ds->fake_status; } } if (!node && !rs) { log_warn(LD_BUG, "Directory %s delivered a consensus, but %s" "no routerstatus could be found for it.", options->UseBridges ? "no node and " : "", hex_str(dir_hint, DIGEST_LEN)); } } /* Do downloads by identity digest */ if (smartlist_len(missing_id_digests) > 0) { int need_plus = 0; smartlist_t *fps = smartlist_new(); smartlist_add(fps, tor_strdup("fp/")); SMARTLIST_FOREACH_BEGIN(missing_id_digests, const char *, d) { char *fp = NULL; if (digestmap_get(pending_id, d)) continue; base16_encode(id_digest_str, sizeof(id_digest_str), d, DIGEST_LEN); if (need_plus) { tor_asprintf(&fp, "+%s", id_digest_str); } else { /* No need for tor_asprintf() in this case; first one gets no '+' */ fp = tor_strdup(id_digest_str); need_plus = 1; } smartlist_add(fps, fp); } SMARTLIST_FOREACH_END(d); if (smartlist_len(fps) > 1) { resource = smartlist_join_strings(fps, "", 0, NULL); /* node and rs are directories that just gave us a consensus or * certificates */ authority_certs_fetch_resource_impl(resource, dir_hint, node, rs); tor_free(resource); } /* else we didn't add any: they were all pending */ SMARTLIST_FOREACH(fps, char *, cp, tor_free(cp)); smartlist_free(fps); } /* Do downloads by identity digest/signing key pair */ if (smartlist_len(missing_cert_digests) > 0) { int need_plus = 0; smartlist_t *fp_pairs = smartlist_new(); smartlist_add(fp_pairs, tor_strdup("fp-sk/")); SMARTLIST_FOREACH_BEGIN(missing_cert_digests, const fp_pair_t *, d) { char *fp_pair = NULL; if (fp_pair_map_get(pending_cert, d)) continue; /* Construct string encodings of the digests */ base16_encode(id_digest_str, sizeof(id_digest_str), d->first, DIGEST_LEN); base16_encode(sk_digest_str, sizeof(sk_digest_str), d->second, DIGEST_LEN); /* Now tor_asprintf() */ if (need_plus) { tor_asprintf(&fp_pair, "+%s-%s", id_digest_str, sk_digest_str); } else { /* First one in the list doesn't get a '+' */ tor_asprintf(&fp_pair, "%s-%s", id_digest_str, sk_digest_str); need_plus = 1; } /* Add it to the list of pairs to request */ smartlist_add(fp_pairs, fp_pair); } SMARTLIST_FOREACH_END(d); if (smartlist_len(fp_pairs) > 1) { resource = smartlist_join_strings(fp_pairs, "", 0, NULL); /* node and rs are directories that just gave us a consensus or * certificates */ authority_certs_fetch_resource_impl(resource, dir_hint, node, rs); tor_free(resource); } /* else they were all pending */ SMARTLIST_FOREACH(fp_pairs, char *, p, tor_free(p)); smartlist_free(fp_pairs); } smartlist_free(missing_id_digests); SMARTLIST_FOREACH(missing_cert_digests, fp_pair_t *, p, tor_free(p)); smartlist_free(missing_cert_digests); digestmap_free(pending_id, NULL); fp_pair_map_free(pending_cert, NULL); }
0
[]
tor
1afc2ed956a35b40dfd1d207652af5b50c295da7
192,819,042,197,701,040,000,000,000,000,000,000,000
297
Fix policies.c instance of the "if (r=(a-b)) return r" pattern I think this one probably can't underflow, since the input ranges are small. But let's not tempt fate. This patch also replaces the "cmp" functions here with just "eq" functions, since nothing actually checked for anything besides 0 and nonzero. Related to 21278.
static void rlvl_destroy(jpc_enc_rlvl_t *rlvl) { jpc_enc_band_t *band; uint_fast16_t bandno; if (rlvl->bands) { for (bandno = 0, band = rlvl->bands; bandno < rlvl->numbands; ++bandno, ++band) { band_destroy(band); } jas_free(rlvl->bands); } }
1
[ "CWE-416" ]
jasper
03fe49ab96bf65fea784cdc256507ea88267fc7c
163,368,022,022,545,140,000,000,000,000,000,000,000
13
Fixed some potential double-free problems in the JPC codec.
static int v4l_g_frequency(const struct v4l2_ioctl_ops *ops, struct file *file, void *fh, void *arg) { struct video_device *vfd = video_devdata(file); struct v4l2_frequency *p = arg; if (vfd->vfl_type == VFL_TYPE_SDR) p->type = V4L2_TUNER_SDR; else p->type = (vfd->vfl_type == VFL_TYPE_RADIO) ? V4L2_TUNER_RADIO : V4L2_TUNER_ANALOG_TV; return ops->vidioc_g_frequency(file, fh, p); }
0
[ "CWE-401" ]
linux
fb18802a338b36f675a388fc03d2aa504a0d0899
194,622,904,617,774,420,000,000,000,000,000,000,000
13
media: v4l: ioctl: Fix memory leak in video_usercopy When an IOCTL with argument size larger than 128 that also used array arguments were handled, two memory allocations were made but alas, only the latter one of them was released. This happened because there was only a single local variable to hold such a temporary allocation. Fix this by adding separate variables to hold the pointers to the temporary allocations. Reported-by: Arnd Bergmann <[email protected]> Reported-by: [email protected] Fixes: d14e6d76ebf7 ("[media] v4l: Add multi-planar ioctl handling code") Cc: [email protected] Signed-off-by: Sakari Ailus <[email protected]> Acked-by: Arnd Bergmann <[email protected]> Acked-by: Hans Verkuil <[email protected]> Reviewed-by: Laurent Pinchart <[email protected]> Signed-off-by: Mauro Carvalho Chehab <[email protected]>
static void pl022_register_types(void) { type_register_static(&pl022_info); }
0
[ "CWE-119" ]
qemu
d8d0a0bc7e194300e53a346d25fe5724fd588387
199,977,727,212,406,400,000,000,000,000,000,000,000
4
pl022: fix buffer overun on invalid state load CVE-2013-4530 pl022.c did not bounds check tx_fifo_head and rx_fifo_head after loading them from file and before they are used to dereference array. Reported-by: Michael S. Tsirkin <[email protected] Reported-by: Anthony Liguori <[email protected]> Signed-off-by: Michael S. Tsirkin <[email protected]> Signed-off-by: Juan Quintela <[email protected]>
flatpak_proxy_new (const char *dbus_address, const char *socket_path) { FlatpakProxy *proxy; proxy = g_object_new (FLATPAK_TYPE_PROXY, "dbus-address", dbus_address, "socket-path", socket_path, NULL); return proxy; }
0
[ "CWE-284", "CWE-436" ]
flatpak
52346bf187b5a7f1c0fe9075b328b7ad6abe78f6
241,925,563,075,194,400,000,000,000,000,000,000,000
8
Fix vulnerability in dbus proxy During the authentication all client data is directly forwarded to the dbus daemon as is, until we detect the BEGIN command after which we start filtering the binary dbus protocol. Unfortunately the detection of the BEGIN command in the proxy did not exactly match the detection in the dbus daemon. A BEGIN followed by a space or tab was considered ok in the daemon but not by the proxy. This could be exploited to send arbitrary dbus messages to the host, which can be used to break out of the sandbox. This was noticed by Gabriel Campana of The Google Security Team. This fix makes the detection of the authentication phase end match the dbus code. In addition we duplicate the authentication line validation from dbus, which includes ensuring all data is ASCII, and limiting the size of a line to 16k. In fact, we add some extra stringent checks, disallowing ASCII control chars and requiring that auth lines start with a capital letter.
key2_print (const struct key2* k, const struct key_type *kt, const char* prefix0, const char* prefix1) { struct gc_arena gc = gc_new (); ASSERT (k->n == 2); dmsg (D_SHOW_KEY_SOURCE, "%s (cipher): %s", prefix0, format_hex (k->keys[0].cipher, kt->cipher_length, 0, &gc)); dmsg (D_SHOW_KEY_SOURCE, "%s (hmac): %s", prefix0, format_hex (k->keys[0].hmac, kt->hmac_length, 0, &gc)); dmsg (D_SHOW_KEY_SOURCE, "%s (cipher): %s", prefix1, format_hex (k->keys[1].cipher, kt->cipher_length, 0, &gc)); dmsg (D_SHOW_KEY_SOURCE, "%s (hmac): %s", prefix1, format_hex (k->keys[1].hmac, kt->hmac_length, 0, &gc)); gc_free (&gc); }
0
[ "CWE-200" ]
openvpn
11d21349a4e7e38a025849479b36ace7c2eec2ee
34,603,677,447,389,210,000,000,000,000,000,000,000
21
Use constant time memcmp when comparing HMACs in openvpn_decrypt. Signed-off-by: Steffan Karger <[email protected]> Acked-by: Gert Doering <[email protected]> Signed-off-by: Gert Doering <[email protected]>
httpLocalRequest(ObjectPtr object, int method, int from, int to, HTTPRequestPtr requestor, void *closure) { if(object->requestor == NULL) object->requestor = requestor; if(!disableLocalInterface && urlIsSpecial(object->key, object->key_size)) return httpSpecialRequest(object, method, from, to, requestor, closure); if(method >= METHOD_POST) { httpClientError(requestor, 405, internAtom("Method not allowed")); requestor->connection->flags &= ~CONN_READER; return 1; } /* objectFillFromDisk already did the real work but we have to make sure we don't get into an infinite loop. */ if(object->flags & OBJECT_INITIAL) { abortObject(object, 404, internAtom("Not found")); } object->age = current_time.tv_sec; object->date = current_time.tv_sec; object->flags &= ~OBJECT_VALIDATING; notifyObject(object); return 1; }
1
[ "CWE-617" ]
polipo
0e2b44af619e46e365971ea52b97457bc0778cd3
109,054,380,054,124,660,000,000,000,000,000,000,000
28
Try to read POST requests to local configuration interface correctly.
static int dissect_mswsp_smb2(tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree, void* data) { smb2_info_t *si = (smb2_info_t*)data; gboolean in; char* fid_name = NULL; guint32 open_frame = 0, close_frame = 0; if (!si) { return 0; } if (si->saved) { dcerpc_fetch_polhnd_data(&si->saved->policy_hnd, &fid_name, NULL, &open_frame, &close_frame, pinfo->num); } if (!fid_name || g_ascii_strcasecmp(fid_name, "File: MsFteWds") != 0) { return 0; } in = !(si->flags & SMB2_FLAGS_RESPONSE); p_add_proto_data(wmem_file_scope(), pinfo, proto_mswsp, 0, (void*)&SMB2); return dissect_mswsp(tvb, pinfo, tree, in, data); }
0
[ "CWE-770" ]
wireshark
b7a0650e061b5418ab4a8f72c6e4b00317aff623
133,719,507,854,516,980,000,000,000,000,000,000,000
23
MS-WSP: Don't allocate huge amounts of memory. Add a couple of memory allocation sanity checks, one of which fixes #17331.
static int pid_numa_maps_open(struct inode *inode, struct file *file) { return numa_maps_open(inode, file, &proc_pid_numa_maps_op); }
0
[ "CWE-200" ]
linux
ab676b7d6fbf4b294bf198fb27ade5b0e865c7ce
139,585,993,376,640,640,000,000,000,000,000,000,000
4
pagemap: do not leak physical addresses to non-privileged userspace As pointed by recent post[1] on exploiting DRAM physical imperfection, /proc/PID/pagemap exposes sensitive information which can be used to do attacks. This disallows anybody without CAP_SYS_ADMIN to read the pagemap. [1] http://googleprojectzero.blogspot.com/2015/03/exploiting-dram-rowhammer-bug-to-gain.html [ Eventually we might want to do anything more finegrained, but for now this is the simple model. - Linus ] Signed-off-by: Kirill A. Shutemov <[email protected]> Acked-by: Konstantin Khlebnikov <[email protected]> Acked-by: Andy Lutomirski <[email protected]> Cc: Pavel Emelyanov <[email protected]> Cc: Andrew Morton <[email protected]> Cc: Mark Seaborn <[email protected]> Cc: [email protected] Signed-off-by: Linus Torvalds <[email protected]>
void l2cap_chan_close(struct l2cap_chan *chan, int reason) { struct l2cap_conn *conn = chan->conn; BT_DBG("chan %p state %s", chan, state_to_string(chan->state)); switch (chan->state) { case BT_LISTEN: chan->ops->teardown(chan, 0); break; case BT_CONNECTED: case BT_CONFIG: if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) { __set_chan_timer(chan, chan->ops->get_sndtimeo(chan)); l2cap_send_disconn_req(chan, reason); } else l2cap_chan_del(chan, reason); break; case BT_CONNECT2: if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) { if (conn->hcon->type == ACL_LINK) l2cap_chan_connect_reject(chan); else if (conn->hcon->type == LE_LINK) l2cap_chan_le_connect_reject(chan); } l2cap_chan_del(chan, reason); break; case BT_CONNECT: case BT_DISCONN: l2cap_chan_del(chan, reason); break; default: chan->ops->teardown(chan, 0); break; } }
0
[ "CWE-787" ]
linux
e860d2c904d1a9f38a24eb44c9f34b8f915a6ea3
308,974,243,942,839,600,000,000,000,000,000,000,000
41
Bluetooth: Properly check L2CAP config option output buffer length Validate the output buffer length for L2CAP config requests and responses to avoid overflowing the stack buffer used for building the option blocks. Cc: [email protected] Signed-off-by: Ben Seri <[email protected]> Signed-off-by: Marcel Holtmann <[email protected]> Signed-off-by: Linus Torvalds <[email protected]>
static void t1_getline(void) { int c, l, eexec_scan; char *p; static const char eexec_str[] = "currentfile eexec"; static int eexec_len = 17; restart: if (t1_eof()) normal_error("type 1","unexpected end of file"); t1_line_ptr = t1_line_array; alloc_array(t1_line, 1, T1_BUF_SIZE); t1_cslen = 0; eexec_scan = 0; c = t1_getbyte(); if (c == EOF) goto exit; while (!t1_eof()) { if (t1_in_eexec == 1) c = edecrypt((byte) c); alloc_array(t1_line, 1, T1_BUF_SIZE); { char cc = (char) c; append_char_to_buf(cc, t1_line_ptr, t1_line_array, t1_line_limit); } if (t1_in_eexec == 0 && eexec_scan >= 0 && eexec_scan < eexec_len) { if (t1_line_array[eexec_scan] == eexec_str[eexec_scan]) eexec_scan++; else eexec_scan = -1; } if (c == 10 || c == 13 || (t1_pfa && eexec_scan == eexec_len && c == 32)) { break; } if (t1_cs && t1_cslen == 0 && (t1_line_ptr - t1_line_array > 4) && (t1_suffix(" RD ") || t1_suffix(" -| "))) { p = t1_line_ptr - 5; while (*p != ' ') p--; l = (int) t1_scan_num(p + 1, 0); t1_cslen = (unsigned short) l; /*tex |cs_start| is an index now */ cs_start = (int) (t1_line_ptr - t1_line_array); alloc_array(t1_line, l, T1_BUF_SIZE); while (l-- > 0) *t1_line_ptr++ = (t1_line_entry) edecrypt((byte) t1_getbyte()); } c = t1_getbyte(); } /*tex |append_eol| can append 2 chars */ alloc_array(t1_line, 2, T1_BUF_SIZE); append_eol(t1_line_ptr, t1_line_array, t1_line_limit); if (t1_line_ptr - t1_line_array < 2) goto restart; if (eexec_scan == eexec_len) t1_in_eexec = 1; exit: /*tex Ensure that |t1_buf_array| has as much room as |t1_line_array|. */ t1_buf_ptr = t1_buf_array; alloc_array(t1_buf, t1_line_limit, t1_line_limit); }
0
[ "CWE-119" ]
texlive-source
6ed0077520e2b0da1fd060c7f88db7b2e6068e4c
20,353,898,437,599,907,000,000,000,000,000,000,000
61
writet1 protection against buffer overflow git-svn-id: svn://tug.org/texlive/trunk/Build/source@48697 c570f23f-e606-0410-a88d-b1316a301751
system_call_script(thread_master_t *m, int (*func) (thread_t *), void * arg, unsigned long timer, notify_script_t* script) { pid_t pid; /* Daemonization to not degrade our scheduling timer */ if (log_file_name) flush_log_file(); pid = local_fork(); if (pid < 0) { /* fork error */ log_message(LOG_INFO, "Failed fork process"); return -1; } if (pid) { /* parent process */ thread_add_child(m, func, arg, pid, timer); return 0; } /* Child process */ #ifdef _MEM_CHECK_ skip_mem_dump(); #endif system_call(script); exit(0); /* Script errors aren't server errors */ }
1
[ "CWE-200" ]
keepalived
26c8d6374db33bcfcdcd758b1282f12ceef4b94f
25,082,536,199,682,144,000,000,000,000,000,000,000
31
Disable fopen_safe() append mode by default If a non privileged user creates /tmp/keepalived.log and has it open for read (e.g. tail -f), then even though keepalived will change the owner to root and remove all read/write permissions from non owners, the application which already has the file open will be able to read the added log entries. Accordingly, opening a file in append mode is disabled by default, and only enabled if --enable-smtp-alert-debug or --enable-log-file (which are debugging options and unset by default) are enabled. This should further alleviate security concerns related to CVE-2018-19046. Signed-off-by: Quentin Armitage <[email protected]>
static int __dwc3_gadget_ep_queue(struct dwc3_ep *dep, struct dwc3_request *req) { struct dwc3 *dwc = dep->dwc; if (!dep->endpoint.desc) { dev_err(dwc->dev, "%s: can't queue to disabled endpoint\n", dep->name); return -ESHUTDOWN; } if (WARN(req->dep != dep, "request %pK belongs to '%s'\n", &req->request, req->dep->name)) return -EINVAL; pm_runtime_get(dwc->dev); req->request.actual = 0; req->request.status = -EINPROGRESS; req->direction = dep->direction; req->epnum = dep->number; trace_dwc3_ep_queue(req); list_add_tail(&req->list, &dep->pending_list); /* * NOTICE: Isochronous endpoints should NEVER be prestarted. We must * wait for a XferNotReady event so we will know what's the current * (micro-)frame number. * * Without this trick, we are very, very likely gonna get Bus Expiry * errors which will force us issue EndTransfer command. */ if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) { if ((dep->flags & DWC3_EP_PENDING_REQUEST)) { if (dep->flags & DWC3_EP_TRANSFER_STARTED) { dwc3_stop_active_transfer(dwc, dep->number, true); dep->flags = DWC3_EP_ENABLED; } else { u32 cur_uf; cur_uf = __dwc3_gadget_get_frame(dwc); __dwc3_gadget_start_isoc(dwc, dep, cur_uf); dep->flags &= ~DWC3_EP_PENDING_REQUEST; } return 0; } if ((dep->flags & DWC3_EP_BUSY) && !(dep->flags & DWC3_EP_MISSED_ISOC)) goto out; return 0; } out: return __dwc3_gadget_kick_transfer(dep); }
0
[ "CWE-703", "CWE-667", "CWE-189" ]
linux
c91815b596245fd7da349ecc43c8def670d2269e
288,647,596,053,653,540,000,000,000,000,000,000,000
58
usb: dwc3: gadget: never call ->complete() from ->ep_queue() This is a requirement which has always existed but, somehow, wasn't reflected in the documentation and problems weren't found until now when Tuba Yavuz found a possible deadlock happening between dwc3 and f_hid. She described the situation as follows: spin_lock_irqsave(&hidg->write_spinlock, flags); // first acquire /* we our function has been disabled by host */ if (!hidg->req) { free_ep_req(hidg->in_ep, hidg->req); goto try_again; } [...] status = usb_ep_queue(hidg->in_ep, hidg->req, GFP_ATOMIC); => [...] => usb_gadget_giveback_request => f_hidg_req_complete => spin_lock_irqsave(&hidg->write_spinlock, flags); // second acquire Note that this happens because dwc3 would call ->complete() on a failed usb_ep_queue() due to failed Start Transfer command. This is, anyway, a theoretical situation because dwc3 currently uses "No Response Update Transfer" command for Bulk and Interrupt endpoints. It's still good to make this case impossible to happen even if the "No Reponse Update Transfer" command is changed. Reported-by: Tuba Yavuz <[email protected]> Signed-off-by: Felipe Balbi <[email protected]> Cc: stable <[email protected]> Signed-off-by: Greg Kroah-Hartman <[email protected]>
ProcessContextOptRef ListenerFactoryContextBaseImpl::processContext() { return server_.processContext(); }
0
[ "CWE-400" ]
envoy
dfddb529e914d794ac552e906b13d71233609bf7
70,890,656,767,217,270,000,000,000,000,000,000,000
3
listener: Add configurable accepted connection limits (#153) Add support for per-listener limits on accepted connections. Signed-off-by: Tony Allen <[email protected]>
request_counter_add_request (RequestCounter counter, Request request) { guint i; for (i = 0; i < REQUEST_TYPE_LAST; i++) { if (REQUEST_WANTS_TYPE (request, i)) { counter[i]++; } } }
0
[]
nautilus
7632a3e13874a2c5e8988428ca913620a25df983
78,250,140,564,129,480,000,000,000,000,000,000,000
11
Check for trusted desktop file launchers. 2009-02-24 Alexander Larsson <[email protected]> * libnautilus-private/nautilus-directory-async.c: Check for trusted desktop file launchers. * libnautilus-private/nautilus-file-private.h: * libnautilus-private/nautilus-file.c: * libnautilus-private/nautilus-file.h: Add nautilus_file_is_trusted_link. Allow unsetting of custom display name. * libnautilus-private/nautilus-mime-actions.c: Display dialog when trying to launch a non-trusted desktop file. svn path=/trunk/; revision=15003
gx_page_device_get_page_device(gx_device * dev) { return dev; }
0
[]
ghostpdl
c9b362ba908ca4b1d7c72663a33229588012d7d9
24,702,694,664,538,045,000,000,000,000,000,000,000
4
Bug 699670: disallow copying of the epo device The erasepage optimisation (epo) subclass device shouldn't be allowed to be copied because the subclass private data, child and parent pointers end up being shared between the original device and the copy. Add an epo_finish_copydevice which NULLs the three offending pointers, and then communicates to the caller that copying is not allowed. This also exposed a separate issue with the stype for subclasses devices. Devices are, I think, unique in having two stype objects associated with them: the usual one in the memory manager header, and the other stored in the device structere directly. In order for the stype to be correct, we have to use the stype for the incoming device, with the ssize of the original device (ssize should reflect the size of the memory allocation). We correctly did so with the stype in the device structure, but then used the prototype device's stype to patch the memory manager stype - meaning the ssize potentially no longer matched the allocated memory. This caused problems in the garbager where there is an implicit assumption that the size of a single object clump (c_alone == 1) is also the size (+ memory manager overheads) of the single object it contains. The solution is to use the same stype instance to patch the memory manager data as we do in the device structure (with the correct ssize).
CreateForeignDataWrapper(ParseState *pstate, CreateFdwStmt *stmt) { Relation rel; Datum values[Natts_pg_foreign_data_wrapper]; bool nulls[Natts_pg_foreign_data_wrapper]; HeapTuple tuple; Oid fdwId; bool handler_given; bool validator_given; Oid fdwhandler; Oid fdwvalidator; Datum fdwoptions; Oid ownerId; ObjectAddress myself; ObjectAddress referenced; rel = table_open(ForeignDataWrapperRelationId, RowExclusiveLock); /* Must be superuser */ if (!superuser()) ereport(ERROR, (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), errmsg("permission denied to create foreign-data wrapper \"%s\"", stmt->fdwname), errhint("Must be superuser to create a foreign-data wrapper."))); /* For now the owner cannot be specified on create. Use effective user ID. */ ownerId = GetUserId(); /* * Check that there is no other foreign-data wrapper by this name. */ if (GetForeignDataWrapperByName(stmt->fdwname, true) != NULL) ereport(ERROR, (errcode(ERRCODE_DUPLICATE_OBJECT), errmsg("foreign-data wrapper \"%s\" already exists", stmt->fdwname))); /* * Insert tuple into pg_foreign_data_wrapper. */ memset(values, 0, sizeof(values)); memset(nulls, false, sizeof(nulls)); fdwId = GetNewOidWithIndex(rel, ForeignDataWrapperOidIndexId, Anum_pg_foreign_data_wrapper_oid); values[Anum_pg_foreign_data_wrapper_oid - 1] = ObjectIdGetDatum(fdwId); values[Anum_pg_foreign_data_wrapper_fdwname - 1] = DirectFunctionCall1(namein, CStringGetDatum(stmt->fdwname)); values[Anum_pg_foreign_data_wrapper_fdwowner - 1] = ObjectIdGetDatum(ownerId); /* Lookup handler and validator functions, if given */ parse_func_options(pstate, stmt->func_options, &handler_given, &fdwhandler, &validator_given, &fdwvalidator); values[Anum_pg_foreign_data_wrapper_fdwhandler - 1] = ObjectIdGetDatum(fdwhandler); values[Anum_pg_foreign_data_wrapper_fdwvalidator - 1] = ObjectIdGetDatum(fdwvalidator); nulls[Anum_pg_foreign_data_wrapper_fdwacl - 1] = true; fdwoptions = transformGenericOptions(ForeignDataWrapperRelationId, PointerGetDatum(NULL), stmt->options, fdwvalidator); if (PointerIsValid(DatumGetPointer(fdwoptions))) values[Anum_pg_foreign_data_wrapper_fdwoptions - 1] = fdwoptions; else nulls[Anum_pg_foreign_data_wrapper_fdwoptions - 1] = true; tuple = heap_form_tuple(rel->rd_att, values, nulls); CatalogTupleInsert(rel, tuple); heap_freetuple(tuple); /* record dependencies */ myself.classId = ForeignDataWrapperRelationId; myself.objectId = fdwId; myself.objectSubId = 0; if (OidIsValid(fdwhandler)) { referenced.classId = ProcedureRelationId; referenced.objectId = fdwhandler; referenced.objectSubId = 0; recordDependencyOn(&myself, &referenced, DEPENDENCY_NORMAL); } if (OidIsValid(fdwvalidator)) { referenced.classId = ProcedureRelationId; referenced.objectId = fdwvalidator; referenced.objectSubId = 0; recordDependencyOn(&myself, &referenced, DEPENDENCY_NORMAL); } recordDependencyOnOwner(ForeignDataWrapperRelationId, fdwId, ownerId); /* dependency on extension */ recordDependencyOnCurrentExtension(&myself, false); /* Post creation hook for new foreign data wrapper */ InvokeObjectPostCreateHook(ForeignDataWrapperRelationId, fdwId, 0); table_close(rel, RowExclusiveLock); return myself; }
0
[ "CWE-94" ]
postgres
b9b21acc766db54d8c337d508d0fe2f5bf2daab0
153,821,150,289,648,030,000,000,000,000,000,000,000
110
In extensions, don't replace objects not belonging to the extension. Previously, if an extension script did CREATE OR REPLACE and there was an existing object not belonging to the extension, it would overwrite the object and adopt it into the extension. This is problematic, first because the overwrite is probably unintentional, and second because we didn't change the object's ownership. Thus a hostile user could create an object in advance of an expected CREATE EXTENSION command, and would then have ownership rights on an extension object, which could be modified for trojan-horse-type attacks. Hence, forbid CREATE OR REPLACE of an existing object unless it already belongs to the extension. (Note that we've always forbidden replacing an object that belongs to some other extension; only the behavior for previously-free-standing objects changes here.) For the same reason, also fail CREATE IF NOT EXISTS when there is an existing object that doesn't belong to the extension. Our thanks to Sven Klemm for reporting this problem. Security: CVE-2022-2625
int MonClient::ping_monitor(const string &mon_id, string *result_reply) { ldout(cct, 10) << __func__ << dendl; string new_mon_id; if (monmap.contains("noname-"+mon_id)) { new_mon_id = "noname-"+mon_id; } else { new_mon_id = mon_id; } if (new_mon_id.empty()) { ldout(cct, 10) << __func__ << " specified mon id is empty!" << dendl; return -EINVAL; } else if (!monmap.contains(new_mon_id)) { ldout(cct, 10) << __func__ << " no such monitor 'mon." << new_mon_id << "'" << dendl; return -ENOENT; } // N.B. monc isn't initialized auth_registry.refresh_config(); KeyRing keyring; keyring.from_ceph_context(cct); RotatingKeyRing rkeyring(cct, cct->get_module_type(), &keyring); MonClientPinger *pinger = new MonClientPinger(cct, &rkeyring, result_reply); Messenger *smsgr = Messenger::create_client_messenger(cct, "temp_ping_client"); smsgr->add_dispatcher_head(pinger); smsgr->set_auth_client(pinger); smsgr->start(); ConnectionRef con = smsgr->connect_to_mon(monmap.get_addrs(new_mon_id)); ldout(cct, 10) << __func__ << " ping mon." << new_mon_id << " " << con->get_peer_addr() << dendl; pinger->mc.reset(new MonConnection(cct, con, 0, &auth_registry)); pinger->mc->start(monmap.get_epoch(), entity_name); con->send_message(new MPing); int ret = pinger->wait_for_reply(cct->_conf->mon_client_ping_timeout); if (ret == 0) { ldout(cct,10) << __func__ << " got ping reply" << dendl; } else { ret = -ret; } con->mark_down(); pinger->mc.reset(); smsgr->shutdown(); smsgr->wait(); delete smsgr; delete pinger; return ret; }
0
[ "CWE-294" ]
ceph
6c14c2fb5650426285428dfe6ca1597e5ea1d07d
13,048,491,179,196,644,000,000,000,000,000,000,000
60
mon/MonClient: bring back CEPHX_V2 authorizer challenges Commit c58c5754dfd2 ("msg/async/ProtocolV1: use AuthServer and AuthClient") introduced a backwards compatibility issue into msgr1. To fix it, commit 321548010578 ("mon/MonClient: skip CEPHX_V2 challenge if client doesn't support it") set out to skip authorizer challenges for peers that don't support CEPHX_V2. However, it made it so that authorizer challenges are skipped for all peers in both msgr1 and msgr2 cases, effectively disabling the protection against replay attacks that was put in place in commit f80b848d3f83 ("auth/cephx: add authorizer challenge", CVE-2018-1128). This is because con->get_features() always returns 0 at that point. In msgr1 case, the peer shares its features along with the authorizer, but while they are available in connect_msg.features they aren't assigned to con until ProtocolV1::open(). In msgr2 case, the peer doesn't share its features until much later (in CLIENT_IDENT frame, i.e. after the authentication phase). The result is that !CEPHX_V2 branch is taken in all cases and replay attack protection is lost. Only clusters with cephx_service_require_version set to 2 on the service daemons would not be silently downgraded. But, since the default is 1 and there are no reports of looping on BADAUTHORIZER faults, I'm pretty sure that no one has ever done that. Note that cephx_require_version set to 2 would have no effect even though it is supposed to be stronger than cephx_service_require_version because MonClient::handle_auth_request() didn't check it. To fix: - for msgr1, check connect_msg.features (as was done before commit c58c5754dfd2) and challenge if CEPHX_V2 is supported. Together with two preceding patches that resurrect proper cephx_* option handling in msgr1, this covers both "I want old clients to work" and "I wish to require better authentication" use cases. - for msgr2, don't check anything and always challenge. CEPHX_V2 predates msgr2, anyone speaking msgr2 must support it. Signed-off-by: Ilya Dryomov <[email protected]> (cherry picked from commit 4a82c72e3bdddcb625933e83af8b50a444b961f1)
static void handle_skipped_hlink(struct file_struct *file, int itemizing, enum logcode code, int f_out) { char fbuf[MAXPATHLEN]; int new_last_ndx; struct file_list *save_flist = cur_flist; /* If we skip the last item in a chain of links and there was a * prior non-skipped hard-link waiting to finish, finish it now. */ if ((new_last_ndx = skip_hard_link(file, &cur_flist)) < 0) return; file = cur_flist->files[new_last_ndx - cur_flist->ndx_start]; cur_flist->in_progress--; /* undo prior increment */ f_name(file, fbuf); recv_generator(fbuf, file, new_last_ndx, itemizing, code, f_out); cur_flist = save_flist; }
0
[ "CWE-59" ]
rsync
e12a6c087ca1eecdb8eae5977be239c24f4dd3d9
70,226,983,275,703,050,000,000,000,000,000,000,000
19
Add parent-dir validation for --no-inc-recurse too.
static int dcbnl_getstate(struct net_device *netdev, struct nlmsghdr *nlh, u32 seq, struct nlattr **tb, struct sk_buff *skb) { /* if (!tb[DCB_ATTR_STATE] || !netdev->dcbnl_ops->getstate) */ if (!netdev->dcbnl_ops->getstate) return -EOPNOTSUPP; return nla_put_u8(skb, DCB_ATTR_STATE, netdev->dcbnl_ops->getstate(netdev)); }
0
[ "CWE-399" ]
linux-2.6
29cd8ae0e1a39e239a3a7b67da1986add1199fc0
286,277,531,011,482,500,000,000,000,000,000,000,000
10
dcbnl: fix various netlink info leaks The dcb netlink interface leaks stack memory in various places: * perm_addr[] buffer is only filled at max with 12 of the 32 bytes but copied completely, * no in-kernel driver fills all fields of an IEEE 802.1Qaz subcommand, so we're leaking up to 58 bytes for ieee_ets structs, up to 136 bytes for ieee_pfc structs, etc., * the same is true for CEE -- no in-kernel driver fills the whole struct, Prevent all of the above stack info leaks by properly initializing the buffers/structures involved. Signed-off-by: Mathias Krause <[email protected]> Signed-off-by: David S. Miller <[email protected]>
static ssize_t proc_write( struct file *file, const char __user *buffer, size_t len, loff_t *offset ) { ssize_t ret; struct proc_data *priv = file->private_data; if (!priv->wbuffer) return -EINVAL; ret = simple_write_to_buffer(priv->wbuffer, priv->maxwritelen, offset, buffer, len); if (ret > 0) priv->writelen = max_t(int, priv->writelen, *offset); return ret; }
0
[ "CWE-703", "CWE-264" ]
linux
550fd08c2cebad61c548def135f67aba284c6162
154,878,532,882,260,830,000,000,000,000,000,000,000
18
net: Audit drivers to identify those needing IFF_TX_SKB_SHARING cleared After the last patch, We are left in a state in which only drivers calling ether_setup have IFF_TX_SKB_SHARING set (we assume that drivers touching real hardware call ether_setup for their net_devices and don't hold any state in their skbs. There are a handful of drivers that violate this assumption of course, and need to be fixed up. This patch identifies those drivers, and marks them as not being able to support the safe transmission of skbs by clearning the IFF_TX_SKB_SHARING flag in priv_flags Signed-off-by: Neil Horman <[email protected]> CC: Karsten Keil <[email protected]> CC: "David S. Miller" <[email protected]> CC: Jay Vosburgh <[email protected]> CC: Andy Gospodarek <[email protected]> CC: Patrick McHardy <[email protected]> CC: Krzysztof Halasa <[email protected]> CC: "John W. Linville" <[email protected]> CC: Greg Kroah-Hartman <[email protected]> CC: Marcel Holtmann <[email protected]> CC: Johannes Berg <[email protected]> Signed-off-by: David S. Miller <[email protected]>
COMPS_HSList* comps_mrtree_keys(COMPS_MRTree * rt) { COMPS_HSList *tmplist, *tmp_subnodes, *ret; COMPS_HSListItem *it; struct Pair { COMPS_HSList * subnodes; char * key; char added; } *pair, *parent_pair; pair = malloc(sizeof(struct Pair)); pair->subnodes = rt->subnodes; pair->key = NULL; pair->added = 0; tmplist = comps_hslist_create(); comps_hslist_init(tmplist, NULL, NULL, &free); ret = comps_hslist_create(); comps_hslist_init(ret, NULL, NULL, &free); comps_hslist_append(tmplist, pair, 0); while (tmplist->first != NULL) { it = tmplist->first; comps_hslist_remove(tmplist, tmplist->first); tmp_subnodes = ((struct Pair*)it->data)->subnodes; parent_pair = (struct Pair*) it->data; free(it); for (it = tmp_subnodes->first; it != NULL; it=it->next) { pair = malloc(sizeof(struct Pair)); pair->subnodes = ((COMPS_MRTreeData*)it->data)->subnodes; pair->added = 0; if (parent_pair->key != NULL) { pair->key = malloc(sizeof(char) * (strlen(((COMPS_MRTreeData*)it->data)->key) + strlen(parent_pair->key) + 1)); memcpy(pair->key, parent_pair->key, sizeof(char) * strlen(parent_pair->key)); memcpy(pair->key+strlen(parent_pair->key), ((COMPS_MRTreeData*)it->data)->key, sizeof(char)*(strlen(((COMPS_MRTreeData*)it->data)->key)+1)); } else { pair->key = malloc(sizeof(char)* (strlen(((COMPS_MRTreeData*)it->data)->key) + 1)); memcpy(pair->key, ((COMPS_MRTreeData*)it->data)->key, sizeof(char)*(strlen(((COMPS_MRTreeData*)it->data)->key)+1)); } /* current node has data */ if (((COMPS_MRTreeData*)it->data)->data->first != NULL) { //printf("data not null for |%s|\n", pair->key); comps_hslist_append(ret, pair->key, 0); pair->added = 1; if (((COMPS_MRTreeData*)it->data)->subnodes->first != NULL) { // printf("subnodes found\b"); comps_hslist_append(tmplist, pair, 0); } else { free(pair); } /* current node hasn't data */ } else { if (((COMPS_MRTreeData*)it->data)->subnodes->first) { comps_hslist_append(tmplist, pair, 0); } else { free(pair->key); free(pair); } } } if (parent_pair->added == 0) free(parent_pair->key); free(parent_pair); } comps_hslist_destroy(&tmplist); return ret; }
0
[ "CWE-416", "CWE-862" ]
libcomps
e3a5d056633677959ad924a51758876d415e7046
300,176,989,397,351,050,000,000,000,000,000,000,000
77
Fix UAF in comps_objmrtree_unite function The added field is not used at all in many places and it is probably the left-over of some copy-paste.
_hb_ot_layout_set_glyph_class (hb_face_t *face, hb_codepoint_t glyph, hb_ot_layout_glyph_class_t klass) { if (HB_OBJECT_IS_INERT (face)) return; /* TODO optimize this? similar to old harfbuzz code for example */ hb_ot_layout_t *layout = &face->ot_layout; hb_ot_layout_class_t gdef_klass; unsigned int len = layout->new_gdef.len; if (HB_UNLIKELY (glyph > 65535)) return; /* XXX this is not threadsafe */ if (glyph >= len) { unsigned int new_len; unsigned char *new_klasses; new_len = len == 0 ? 120 : 2 * len; while (new_len <= glyph) new_len *= 2; if (new_len > 65536) new_len = 65536; new_klasses = (unsigned char *) realloc (layout->new_gdef.klasses, new_len * sizeof (unsigned char)); if (HB_UNLIKELY (!new_klasses)) return; memset (new_klasses + len, 0, new_len - len); layout->new_gdef.klasses = new_klasses; layout->new_gdef.len = new_len; } switch (klass) { default: case HB_OT_LAYOUT_GLYPH_CLASS_UNCLASSIFIED: gdef_klass = GDEF::UnclassifiedGlyph; break; case HB_OT_LAYOUT_GLYPH_CLASS_BASE_GLYPH: gdef_klass = GDEF::BaseGlyph; break; case HB_OT_LAYOUT_GLYPH_CLASS_LIGATURE: gdef_klass = GDEF::LigatureGlyph; break; case HB_OT_LAYOUT_GLYPH_CLASS_MARK: gdef_klass = GDEF::MarkGlyph; break; case HB_OT_LAYOUT_GLYPH_CLASS_COMPONENT: gdef_klass = GDEF::ComponentGlyph; break; } layout->new_gdef.klasses[glyph] = gdef_klass; return; }
0
[ "CWE-119" ]
pango
797d46714d27f147277fdd5346648d838c68fb8c
192,450,385,780,777,170,000,000,000,000,000,000,000
50
[HB/GDEF] Fix bug in building synthetic GDEF table
VOID CALLBACK tq_timer_cb(PVOID arg, BOOLEAN timed_out) { zend_bool *php_timed_out; /* The doc states it'll be always true, however it theoretically could be FALSE when the thread was signaled. */ if (!timed_out) { return; } php_timed_out = (zend_bool *)arg; *php_timed_out = 1; }
0
[ "CWE-134" ]
php-src
b101a6bbd4f2181c360bd38e7683df4a03cba83e
243,797,306,371,248,700,000,000,000,000,000,000,000
13
Use format string
static int addXMLCommand(XMLRPCCmd * xml) { if (XMLRPCCMD == NULL) XMLRPCCMD = mowgli_patricia_create(strcasecanon); mowgli_patricia_add(XMLRPCCMD, xml->name, xml); return XMLRPC_ERR_OK; }
0
[ "CWE-119", "CWE-787" ]
atheme
87580d767868360d2fed503980129504da84b63e
55,140,507,230,794,980,000,000,000,000,000,000,000
9
Do not copy more bytes than were allocated
bool LEX::sp_pop_loop_label(THD *thd, const LEX_CSTRING *label_name) { sp_label *lab= spcont->pop_label(); sphead->backpatch(lab); if (label_name->str && lex_string_cmp(system_charset_info, label_name, &lab->name) != 0) { my_error(ER_SP_LABEL_MISMATCH, MYF(0), label_name->str); return true; } return false; }
0
[ "CWE-703" ]
server
39feab3cd31b5414aa9b428eaba915c251ac34a2
115,992,714,816,811,590,000,000,000,000,000,000,000
13
MDEV-26412 Server crash in Item_field::fix_outer_field for INSERT SELECT IF an INSERT/REPLACE SELECT statement contained an ON expression in the top level select and this expression used a subquery with a column reference that could not be resolved then an attempt to resolve this reference as an outer reference caused a crash of the server. This happened because the outer context field in the Name_resolution_context structure was not set to NULL for such references. Rather it pointed to the first element in the select_stack. Note that starting from 10.4 we cannot use the SELECT_LEX::outer_select() method when parsing a SELECT construct. Approved by Oleksandr Byelkin <[email protected]>
TEST_F(RouterTest, HashKeyNoHashPolicy) { ON_CALL(callbacks_.route_->route_entry_, hashPolicy()).WillByDefault(Return(nullptr)); EXPECT_FALSE(router_.computeHashKey().has_value()); }
0
[ "CWE-703" ]
envoy
18871dbfb168d3512a10c78dd267ff7c03f564c6
161,173,970,478,412,280,000,000,000,000,000,000,000
4
[1.18] CVE-2022-21655 Crash with direct_response Signed-off-by: Otto van der Schaaf <[email protected]>
long vhost_dev_ioctl(struct vhost_dev *d, unsigned int ioctl, void __user *argp) { struct file *eventfp, *filep = NULL; struct eventfd_ctx *ctx = NULL; u64 p; long r; int i, fd; /* If you are not the owner, you can become one */ if (ioctl == VHOST_SET_OWNER) { r = vhost_dev_set_owner(d); goto done; } /* You must be the owner to do anything else */ r = vhost_dev_check_owner(d); if (r) goto done; switch (ioctl) { case VHOST_SET_MEM_TABLE: r = vhost_set_memory(d, argp); break; case VHOST_SET_LOG_BASE: if (copy_from_user(&p, argp, sizeof p)) { r = -EFAULT; break; } if ((u64)(unsigned long)p != p) { r = -EFAULT; break; } for (i = 0; i < d->nvqs; ++i) { struct vhost_virtqueue *vq; void __user *base = (void __user *)(unsigned long)p; vq = d->vqs[i]; mutex_lock(&vq->mutex); /* If ring is inactive, will check when it's enabled. */ if (vq->private_data && !vq_log_access_ok(vq, base)) r = -EFAULT; else vq->log_base = base; mutex_unlock(&vq->mutex); } break; case VHOST_SET_LOG_FD: r = get_user(fd, (int __user *)argp); if (r < 0) break; eventfp = fd == -1 ? NULL : eventfd_fget(fd); if (IS_ERR(eventfp)) { r = PTR_ERR(eventfp); break; } if (eventfp != d->log_file) { filep = d->log_file; ctx = d->log_ctx; d->log_ctx = eventfp ? eventfd_ctx_fileget(eventfp) : NULL; } else filep = eventfp; for (i = 0; i < d->nvqs; ++i) { mutex_lock(&d->vqs[i]->mutex); d->vqs[i]->log_ctx = d->log_ctx; mutex_unlock(&d->vqs[i]->mutex); } if (ctx) eventfd_ctx_put(ctx); if (filep) fput(filep); break; default: r = -ENOIOCTLCMD; break; } done: return r; }
1
[ "CWE-399" ]
linux
7932c0bd7740f4cd2aa168d3ce0199e7af7d72d5
219,968,646,784,447,100,000,000,000,000,000,000,000
78
vhost: actually track log eventfd file While reviewing vhost log code, I found out that log_file is never set. Note: I haven't tested the change (QEMU doesn't use LOG_FD yet). Cc: [email protected] Signed-off-by: Marc-André Lureau <[email protected]> Signed-off-by: Michael S. Tsirkin <[email protected]>
static void generate_json_object(FBuffer *buffer, VALUE Vstate, JSON_Generator_State *state, VALUE obj) { char *object_nl = state->object_nl; long object_nl_len = state->object_nl_len; char *indent = state->indent; long indent_len = state->indent_len; long max_nesting = state->max_nesting; char *delim = FBUFFER_PTR(state->object_delim); long delim_len = FBUFFER_LEN(state->object_delim); char *delim2 = FBUFFER_PTR(state->object_delim2); long delim2_len = FBUFFER_LEN(state->object_delim2); long depth = ++state->depth; int i, j; VALUE key, key_to_s, keys; if (max_nesting != 0 && depth > max_nesting) { fbuffer_free(buffer); rb_raise(eNestingError, "nesting of %ld is too deep", --state->depth); } fbuffer_append_char(buffer, '{'); keys = rb_funcall(obj, i_keys, 0); for(i = 0; i < RARRAY_LEN(keys); i++) { if (i > 0) fbuffer_append(buffer, delim, delim_len); if (object_nl) { fbuffer_append(buffer, object_nl, object_nl_len); } if (indent) { for (j = 0; j < depth; j++) { fbuffer_append(buffer, indent, indent_len); } } key = rb_ary_entry(keys, i); key_to_s = rb_funcall(key, i_to_s, 0); Check_Type(key_to_s, T_STRING); generate_json(buffer, Vstate, state, key_to_s); fbuffer_append(buffer, delim2, delim2_len); generate_json(buffer, Vstate, state, rb_hash_aref(obj, key)); } depth = --state->depth; if (object_nl) { fbuffer_append(buffer, object_nl, object_nl_len); if (indent) { for (j = 0; j < depth; j++) { fbuffer_append(buffer, indent, indent_len); } } } fbuffer_append_char(buffer, '}'); }
0
[ "CWE-119", "CWE-787" ]
json
8f782fd8e181d9cfe9387ded43a5ca9692266b85
233,130,728,992,771,500,000,000,000,000,000,000,000
48
Fix arbitrary heap exposure problem
static void cma_save_ib_info(struct rdma_cm_id *id, struct rdma_cm_id *listen_id, struct ib_sa_path_rec *path) { struct sockaddr_ib *listen_ib, *ib; listen_ib = (struct sockaddr_ib *) &listen_id->route.addr.src_addr; ib = (struct sockaddr_ib *) &id->route.addr.src_addr; ib->sib_family = listen_ib->sib_family; ib->sib_pkey = path->pkey; ib->sib_flowinfo = path->flow_label; memcpy(&ib->sib_addr, &path->sgid, 16); ib->sib_sid = listen_ib->sib_sid; ib->sib_sid_mask = cpu_to_be64(0xffffffffffffffffULL); ib->sib_scope_id = listen_ib->sib_scope_id; ib = (struct sockaddr_ib *) &id->route.addr.dst_addr; ib->sib_family = listen_ib->sib_family; ib->sib_pkey = path->pkey; ib->sib_flowinfo = path->flow_label; memcpy(&ib->sib_addr, &path->dgid, 16); }
0
[ "CWE-20" ]
linux
b2853fd6c2d0f383dbdf7427e263eb576a633867
135,345,327,618,853,800,000,000,000,000,000,000,000
21
IB/core: Don't resolve passive side RoCE L2 address in CMA REQ handler The code that resolves the passive side source MAC within the rdma_cm connection request handler was both redundant and buggy, so remove it. It was redundant since later, when an RC QP is modified to RTR state, the resolution will take place in the ib_core module. It was buggy because this callback also deals with UD SIDR exchange, for which we incorrectly looked at the REQ member of the CM event and dereferenced a random value. Fixes: dd5f03beb4f7 ("IB/core: Ethernet L2 attributes in verbs/cm structures") Signed-off-by: Moni Shoua <[email protected]> Signed-off-by: Or Gerlitz <[email protected]> Signed-off-by: Roland Dreier <[email protected]>
v9fs_vfs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t rdev) { struct v9fs_session_info *v9ses = v9fs_inode2v9ses(dir); int retval; char name[2 + U32_MAX_DIGITS + 1 + U32_MAX_DIGITS + 1]; u32 perm; p9_debug(P9_DEBUG_VFS, " %lu,%pd mode: %hx MAJOR: %u MINOR: %u\n", dir->i_ino, dentry, mode, MAJOR(rdev), MINOR(rdev)); /* build extension */ if (S_ISBLK(mode)) sprintf(name, "b %u %u", MAJOR(rdev), MINOR(rdev)); else if (S_ISCHR(mode)) sprintf(name, "c %u %u", MAJOR(rdev), MINOR(rdev)); else *name = 0; perm = unixmode2p9mode(v9ses, mode); retval = v9fs_vfs_mkspecial(dir, dentry, perm, name); return retval; }
0
[ "CWE-835" ]
linux
5e3cc1ee1405a7eb3487ed24f786dec01b4cbe1f
177,708,942,770,801,660,000,000,000,000,000,000,000
24
9p: use inode->i_lock to protect i_size_write() under 32-bit Use inode->i_lock to protect i_size_write(), else i_size_read() in generic_fillattr() may loop infinitely in read_seqcount_begin() when multiple processes invoke v9fs_vfs_getattr() or v9fs_vfs_getattr_dotl() simultaneously under 32-bit SMP environment, and a soft lockup will be triggered as show below: watchdog: BUG: soft lockup - CPU#5 stuck for 22s! [stat:2217] Modules linked in: CPU: 5 PID: 2217 Comm: stat Not tainted 5.0.0-rc1-00005-g7f702faf5a9e #4 Hardware name: Generic DT based system PC is at generic_fillattr+0x104/0x108 LR is at 0xec497f00 pc : [<802b8898>] lr : [<ec497f00>] psr: 200c0013 sp : ec497e20 ip : ed608030 fp : ec497e3c r10: 00000000 r9 : ec497f00 r8 : ed608030 r7 : ec497ebc r6 : ec497f00 r5 : ee5c1550 r4 : ee005780 r3 : 0000052d r2 : 00000000 r1 : ec497f00 r0 : ed608030 Flags: nzCv IRQs on FIQs on Mode SVC_32 ISA ARM Segment none Control: 10c5387d Table: ac48006a DAC: 00000051 CPU: 5 PID: 2217 Comm: stat Not tainted 5.0.0-rc1-00005-g7f702faf5a9e #4 Hardware name: Generic DT based system Backtrace: [<8010d974>] (dump_backtrace) from [<8010dc88>] (show_stack+0x20/0x24) [<8010dc68>] (show_stack) from [<80a1d194>] (dump_stack+0xb0/0xdc) [<80a1d0e4>] (dump_stack) from [<80109f34>] (show_regs+0x1c/0x20) [<80109f18>] (show_regs) from [<801d0a80>] (watchdog_timer_fn+0x280/0x2f8) [<801d0800>] (watchdog_timer_fn) from [<80198658>] (__hrtimer_run_queues+0x18c/0x380) [<801984cc>] (__hrtimer_run_queues) from [<80198e60>] (hrtimer_run_queues+0xb8/0xf0) [<80198da8>] (hrtimer_run_queues) from [<801973e8>] (run_local_timers+0x28/0x64) [<801973c0>] (run_local_timers) from [<80197460>] (update_process_times+0x3c/0x6c) [<80197424>] (update_process_times) from [<801ab2b8>] (tick_nohz_handler+0xe0/0x1bc) [<801ab1d8>] (tick_nohz_handler) from [<80843050>] (arch_timer_handler_virt+0x38/0x48) [<80843018>] (arch_timer_handler_virt) from [<80180a64>] (handle_percpu_devid_irq+0x8c/0x240) [<801809d8>] (handle_percpu_devid_irq) from [<8017ac20>] (generic_handle_irq+0x34/0x44) [<8017abec>] (generic_handle_irq) from [<8017b344>] (__handle_domain_irq+0x6c/0xc4) [<8017b2d8>] (__handle_domain_irq) from [<801022e0>] (gic_handle_irq+0x4c/0x88) [<80102294>] (gic_handle_irq) from [<80101a30>] (__irq_svc+0x70/0x98) [<802b8794>] (generic_fillattr) from [<8056b284>] (v9fs_vfs_getattr_dotl+0x74/0xa4) [<8056b210>] (v9fs_vfs_getattr_dotl) from [<802b8904>] (vfs_getattr_nosec+0x68/0x7c) [<802b889c>] (vfs_getattr_nosec) from [<802b895c>] (vfs_getattr+0x44/0x48) [<802b8918>] (vfs_getattr) from [<802b8a74>] (vfs_statx+0x9c/0xec) [<802b89d8>] (vfs_statx) from [<802b9428>] (sys_lstat64+0x48/0x78) [<802b93e0>] (sys_lstat64) from [<80101000>] (ret_fast_syscall+0x0/0x28) [[email protected]: updated comment to not refer to a function in another subsystem] Link: http://lkml.kernel.org/r/[email protected] Cc: [email protected] Fixes: 7549ae3e81cc ("9p: Use the i_size_[read, write]() macros instead of using inode->i_size directly.") Reported-by: Xing Gaopeng <[email protected]> Signed-off-by: Hou Tao <[email protected]> Signed-off-by: Dominique Martinet <[email protected]>
static void dwc3_disconnect_gadget(struct dwc3 *dwc) { if (dwc->gadget_driver && dwc->gadget_driver->disconnect) { spin_unlock(&dwc->lock); dwc->gadget_driver->disconnect(&dwc->gadget); spin_lock(&dwc->lock); } }
0
[ "CWE-703", "CWE-667", "CWE-189" ]
linux
c91815b596245fd7da349ecc43c8def670d2269e
57,650,698,906,469,470,000,000,000,000,000,000,000
8
usb: dwc3: gadget: never call ->complete() from ->ep_queue() This is a requirement which has always existed but, somehow, wasn't reflected in the documentation and problems weren't found until now when Tuba Yavuz found a possible deadlock happening between dwc3 and f_hid. She described the situation as follows: spin_lock_irqsave(&hidg->write_spinlock, flags); // first acquire /* we our function has been disabled by host */ if (!hidg->req) { free_ep_req(hidg->in_ep, hidg->req); goto try_again; } [...] status = usb_ep_queue(hidg->in_ep, hidg->req, GFP_ATOMIC); => [...] => usb_gadget_giveback_request => f_hidg_req_complete => spin_lock_irqsave(&hidg->write_spinlock, flags); // second acquire Note that this happens because dwc3 would call ->complete() on a failed usb_ep_queue() due to failed Start Transfer command. This is, anyway, a theoretical situation because dwc3 currently uses "No Response Update Transfer" command for Bulk and Interrupt endpoints. It's still good to make this case impossible to happen even if the "No Reponse Update Transfer" command is changed. Reported-by: Tuba Yavuz <[email protected]> Signed-off-by: Felipe Balbi <[email protected]> Cc: stable <[email protected]> Signed-off-by: Greg Kroah-Hartman <[email protected]>
static int sctp_bind_add(struct sock *sk, struct sockaddr *addrs, int addrlen) { int err; lock_sock(sk); err = sctp_setsockopt_bindx(sk, addrs, addrlen, SCTP_BINDX_ADD_ADDR); release_sock(sk); return err; }
0
[ "CWE-362" ]
linux
b166a20b07382b8bc1dcee2a448715c9c2c81b5b
223,221,693,962,563,700,000,000,000,000,000,000,000
10
net/sctp: fix race condition in sctp_destroy_sock If sctp_destroy_sock is called without sock_net(sk)->sctp.addr_wq_lock held and sp->do_auto_asconf is true, then an element is removed from the auto_asconf_splist without any proper locking. This can happen in the following functions: 1. In sctp_accept, if sctp_sock_migrate fails. 2. In inet_create or inet6_create, if there is a bpf program attached to BPF_CGROUP_INET_SOCK_CREATE which denies creation of the sctp socket. The bug is fixed by acquiring addr_wq_lock in sctp_destroy_sock instead of sctp_close. This addresses CVE-2021-23133. Reported-by: Or Cohen <[email protected]> Reviewed-by: Xin Long <[email protected]> Fixes: 610236587600 ("bpf: Add new cgroup attach type to enable sock modifications") Signed-off-by: Or Cohen <[email protected]> Acked-by: Marcelo Ricardo Leitner <[email protected]> Signed-off-by: David S. Miller <[email protected]>
static void bmpmask32toimage(const OPJ_UINT8* pData, OPJ_UINT32 stride, opj_image_t* image, OPJ_UINT32 redMask, OPJ_UINT32 greenMask, OPJ_UINT32 blueMask, OPJ_UINT32 alphaMask) { int index; OPJ_UINT32 width, height; OPJ_UINT32 x, y; const OPJ_UINT8 *pSrc = NULL; OPJ_BOOL hasAlpha; OPJ_UINT32 redShift, redPrec; OPJ_UINT32 greenShift, greenPrec; OPJ_UINT32 blueShift, bluePrec; OPJ_UINT32 alphaShift, alphaPrec; width = image->comps[0].w; height = image->comps[0].h; hasAlpha = image->numcomps > 3U; bmp_mask_get_shift_and_prec(redMask, &redShift, &redPrec); bmp_mask_get_shift_and_prec(greenMask, &greenShift, &greenPrec); bmp_mask_get_shift_and_prec(blueMask, &blueShift, &bluePrec); bmp_mask_get_shift_and_prec(alphaMask, &alphaShift, &alphaPrec); image->comps[0].bpp = redPrec; image->comps[0].prec = redPrec; image->comps[1].bpp = greenPrec; image->comps[1].prec = greenPrec; image->comps[2].bpp = bluePrec; image->comps[2].prec = bluePrec; if (hasAlpha) { image->comps[3].bpp = alphaPrec; image->comps[3].prec = alphaPrec; } index = 0; pSrc = pData + (height - 1U) * stride; for (y = 0; y < height; y++) { for (x = 0; x < width; x++) { OPJ_UINT32 value = 0U; value |= ((OPJ_UINT32)pSrc[4 * x + 0]) << 0; value |= ((OPJ_UINT32)pSrc[4 * x + 1]) << 8; value |= ((OPJ_UINT32)pSrc[4 * x + 2]) << 16; value |= ((OPJ_UINT32)pSrc[4 * x + 3]) << 24; image->comps[0].data[index] = (OPJ_INT32)((value & redMask) >> redShift); /* R */ image->comps[1].data[index] = (OPJ_INT32)((value & greenMask) >> greenShift); /* G */ image->comps[2].data[index] = (OPJ_INT32)((value & blueMask) >> blueShift); /* B */ if (hasAlpha) { image->comps[3].data[index] = (OPJ_INT32)((value & alphaMask) >> alphaShift); /* A */ } index++; } pSrc -= stride; } }
0
[ "CWE-119", "CWE-787" ]
openjpeg
baf0c1ad4572daa89caa3b12985bdd93530f0dd7
223,288,518,030,544,120,000,000,000,000,000,000,000
61
bmp_read_info_header(): reject bmp files with biBitCount == 0 (#983)
Status ModularFrameDecoder::DecodeGroup(const Rect& rect, BitReader* reader, int minShift, int maxShift, const ModularStreamId& stream, bool zerofill, PassesDecoderState* dec_state, ImageBundle* output) { JXL_DASSERT(stream.kind == ModularStreamId::kModularDC || stream.kind == ModularStreamId::kModularAC); const size_t xsize = rect.xsize(); const size_t ysize = rect.ysize(); Image gi(xsize, ysize, full_image.bitdepth, 0); // start at the first bigger-than-groupsize non-metachannel size_t c = full_image.nb_meta_channels; for (; c < full_image.channel.size(); c++) { Channel& fc = full_image.channel[c]; if (fc.w > frame_dim.group_dim || fc.h > frame_dim.group_dim) break; } size_t beginc = c; for (; c < full_image.channel.size(); c++) { Channel& fc = full_image.channel[c]; int shift = std::min(fc.hshift, fc.vshift); if (shift > maxShift) continue; if (shift < minShift) continue; Rect r(rect.x0() >> fc.hshift, rect.y0() >> fc.vshift, rect.xsize() >> fc.hshift, rect.ysize() >> fc.vshift, fc.w, fc.h); if (r.xsize() == 0 || r.ysize() == 0) continue; if (zerofill && use_full_image) { for (size_t y = 0; y < r.ysize(); ++y) { pixel_type* const JXL_RESTRICT row_out = r.Row(&fc.plane, y); memset(row_out, 0, r.xsize() * sizeof(*row_out)); } } else { Channel gc(r.xsize(), r.ysize()); if (zerofill) ZeroFillImage(&gc.plane); gc.hshift = fc.hshift; gc.vshift = fc.vshift; gi.channel.emplace_back(std::move(gc)); } } if (zerofill && use_full_image) return true; ModularOptions options; if (!zerofill) { if (!ModularGenericDecompress( reader, gi, /*header=*/nullptr, stream.ID(frame_dim), &options, /*undo_transforms=*/-1, &tree, &code, &context_map)) { return JXL_FAILURE("Failed to decode modular group"); } } // Undo global transforms that have been pushed to the group level if (!use_full_image) { for (auto t : global_transform) { JXL_RETURN_IF_ERROR(t.Inverse(gi, global_header.wp_header)); } JXL_RETURN_IF_ERROR(ModularImageToDecodedRect( gi, dec_state, nullptr, output, rect.Crop(dec_state->decoded))); return true; } int gic = 0; for (c = beginc; c < full_image.channel.size(); c++) { Channel& fc = full_image.channel[c]; int shift = std::min(fc.hshift, fc.vshift); if (shift > maxShift) continue; if (shift < minShift) continue; Rect r(rect.x0() >> fc.hshift, rect.y0() >> fc.vshift, rect.xsize() >> fc.hshift, rect.ysize() >> fc.vshift, fc.w, fc.h); if (r.xsize() == 0 || r.ysize() == 0) continue; JXL_ASSERT(use_full_image); CopyImageTo(/*rect_from=*/Rect(0, 0, r.xsize(), r.ysize()), /*from=*/gi.channel[gic].plane, /*rect_to=*/r, /*to=*/&fc.plane); gic++; } return true; }
0
[ "CWE-787" ]
libjxl
1c05e110d69b457696366fb4e762057b6855349b
326,059,685,007,902,220,000,000,000,000,000,000,000
74
fix use_full_image==false case (#365) Some fixes to the case where the full modular image is skipped: - don't assume that everything happens at the modular AC group level (minShift==0), there can also be upsampling causing channels to have nonzero shift even when there's no Squeeze - for partial decodes (when zerofill is true), don't try to fill the full image when it's not used. Instead initialize the decoded image with zeroes and skip the decoding.
CBINDInstallDlg::UpdateService(CString StartName) { SC_HANDLE hSCManager; SC_HANDLE hService; if(m_toolsOnly) return; SetCurrent(IDS_OPEN_SCM); hSCManager= OpenSCManager(NULL, NULL, SC_MANAGER_ALL_ACCESS); if (!hSCManager) { MsgBox(IDS_ERR_OPEN_SCM, GetErrMessage()); return; } DWORD dwStart = SERVICE_DEMAND_START; if (m_autoStart) dwStart = SERVICE_AUTO_START; DWORD dwServiceType = SERVICE_WIN32_OWN_PROCESS; CString namedLoc; namedLoc.Format("%s\\bin\\named.exe", m_targetDir); CStringA namedLocA(namedLoc); const char *str = (const char *) namedLocA; char pathBuffer[2 * MAX_PATH]; strncpy(pathBuffer, str, sizeof(pathBuffer) - 1); pathBuffer[sizeof(pathBuffer) - 1] = 0; PathQuoteSpaces(pathBuffer); SetCurrent(IDS_OPEN_SERVICE); hService = OpenService(hSCManager, BIND_SERVICE_NAME, SERVICE_CHANGE_CONFIG); if (!hService) { MsgBox(IDS_ERR_OPEN_SERVICE, GetErrMessage()); if (hSCManager) CloseServiceHandle(hSCManager); return; } else { if (ChangeServiceConfig(hService, dwServiceType, dwStart, SERVICE_ERROR_NORMAL, pathBuffer, NULL, NULL, NULL, StartName, m_accountPassword, BIND_DISPLAY_NAME) != TRUE) { DWORD err = GetLastError(); MsgBox(IDS_ERR_UPDATE_SERVICE, GetErrMessage()); } } if (hService) CloseServiceHandle(hService); if (hSCManager) CloseServiceHandle(hSCManager); SetItemStatus(IDC_REG_SERVICE); }
0
[ "CWE-284" ]
bind9
967a3b9419a3c12b8c0870c86d1ee3840bcbbad7
321,177,848,808,084,200,000,000,000,000,000,000,000
57
[master] quote service registry paths 4532. [security] The BIND installer on Windows used an unquoted service path, which can enable privilege escalation. (CVE-2017-3141) [RT #45229]
GetStructFieldBufSize(matvar_t *matvar, size_t *size) { int err; size_t nBytes = 0, type_buf_size; size_t tag_size = 8, array_flags_size = 8; *size = 0; if ( matvar == NULL ) return GetEmptyMatrixMaxBufSize(NULL, 2, size); /* Add the Array Flags tag and space to the number of bytes */ nBytes += tag_size + array_flags_size; /* In a struct field, the name is just a tag with 0 bytes */ nBytes += tag_size; err = GetTypeBufSize(matvar, &type_buf_size); if ( err ) return err; err = Add(&nBytes, nBytes, type_buf_size); if ( err ) return err; *size = nBytes; return MATIO_E_NO_ERROR; }
0
[ "CWE-200", "CWE-401" ]
matio
b53b62b756920f4c1509f4ee06427f66c3b5c9c4
180,432,200,620,506,460,000,000,000,000,000,000,000
27
Fix memory leak As reported by https://github.com/tbeu/matio/issues/186
n_start_visual_mode(int c) { #ifdef FEAT_CONCEAL int cursor_line_was_concealed = curwin->w_p_cole > 0 && conceal_cursor_line(curwin); #endif VIsual_mode = c; VIsual_active = TRUE; VIsual_reselect = TRUE; trigger_modechanged(); // Corner case: the 0 position in a tab may change when going into // virtualedit. Recalculate curwin->w_cursor to avoid bad highlighting. if (c == Ctrl_V && (get_ve_flags() & VE_BLOCK) && gchar_cursor() == TAB) { validate_virtcol(); coladvance(curwin->w_virtcol); } VIsual = curwin->w_cursor; #ifdef FEAT_FOLDING foldAdjustVisual(); #endif setmouse(); #ifdef FEAT_CONCEAL // Check if redraw is needed after changing the state. conceal_check_cursor_line(cursor_line_was_concealed); #endif if (p_smd && msg_silent == 0) redraw_cmdline = TRUE; // show visual mode later #ifdef FEAT_CLIPBOARD // Make sure the clipboard gets updated. Needed because start and // end may still be the same, and the selection needs to be owned clip_star.vmode = NUL; #endif // Only need to redraw this line, unless still need to redraw an old // Visual area (when 'lazyredraw' is set). if (curwin->w_redr_type < INVERTED) { curwin->w_old_cursor_lnum = curwin->w_cursor.lnum; curwin->w_old_visual_lnum = curwin->w_cursor.lnum; } }
1
[ "CWE-122" ]
vim
a062006b9de0b2947ab5fb376c6e67ef92a8cd69
50,861,159,610,133,700,000,000,000,000,000,000,000
47
patch 8.2.3610: crash when ModeChanged triggered too early Problem: Crash when ModeChanged triggered too early. Solution: Trigger ModeChanged after setting VIsual.
MagickExport Image *TransposeImage(const Image *image,ExceptionInfo *exception) { #define TransposeImageTag "Transpose/Image" CacheView *image_view, *transpose_view; Image *transpose_image; MagickBooleanType status; MagickOffsetType progress; RectangleInfo page; ssize_t y; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); transpose_image=CloneImage(image,image->rows,image->columns,MagickTrue, exception); if (transpose_image == (Image *) NULL) return((Image *) NULL); /* Transpose image. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); transpose_view=AcquireAuthenticCacheView(transpose_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,transpose_image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register const Quantum *magick_restrict p; register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,(ssize_t) image->rows-y-1, image->columns,1,exception); q=QueueCacheViewAuthenticPixels(transpose_view,(ssize_t) (image->rows-y-1), 0,1,transpose_image->rows,exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { register ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait transpose_traits=GetPixelChannelTraits(transpose_image, channel); if ((traits == UndefinedPixelTrait) || (transpose_traits == UndefinedPixelTrait)) continue; SetPixelChannel(transpose_image,channel,p[i],q); } p+=GetPixelChannels(image); q+=GetPixelChannels(transpose_image); } if (SyncCacheViewAuthenticPixels(transpose_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,TransposeImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } transpose_view=DestroyCacheView(transpose_view); image_view=DestroyCacheView(image_view); transpose_image->type=image->type; page=transpose_image->page; Swap(page.width,page.height); Swap(page.x,page.y); transpose_image->page=page; if (status == MagickFalse) transpose_image=DestroyImage(transpose_image); return(transpose_image); }
0
[ "CWE-190" ]
ImageMagick
64dc80b2e1907f7f20bf34d4df9483f938b0de71
126,077,873,436,594,700,000,000,000,000,000,000,000
112
https://github.com/ImageMagick/ImageMagick/issues/1731
check_secret_key (ECC_secret_key *sk, mpi_ec_t ec, int flags) { int rc = 1; mpi_point_struct Q; gcry_mpi_t x1, y1; gcry_mpi_t x2 = NULL; gcry_mpi_t y2 = NULL; point_init (&Q); x1 = mpi_new (0); if (ec->model == MPI_EC_MONTGOMERY) y1 = NULL; else y1 = mpi_new (0); /* G in E(F_p) */ if (!_gcry_mpi_ec_curve_point (&sk->E.G, ec)) { if (DBG_CIPHER) log_debug ("Bad check: Point 'G' does not belong to curve 'E'!\n"); goto leave; } /* G != PaI */ if (!mpi_cmp_ui (sk->E.G.z, 0)) { if (DBG_CIPHER) log_debug ("Bad check: 'G' cannot be Point at Infinity!\n"); goto leave; } /* Check order of curve. */ if (sk->E.dialect != ECC_DIALECT_ED25519 && !(flags & PUBKEY_FLAG_DJB_TWEAK)) { _gcry_mpi_ec_mul_point (&Q, sk->E.n, &sk->E.G, ec); if (mpi_cmp_ui (Q.z, 0)) { if (DBG_CIPHER) log_debug ("check_secret_key: E is not a curve of order n\n"); goto leave; } } /* Pubkey cannot be PaI */ if (!mpi_cmp_ui (sk->Q.z, 0)) { if (DBG_CIPHER) log_debug ("Bad check: Q can not be a Point at Infinity!\n"); goto leave; } /* pubkey = [d]G over E */ if (!_gcry_ecc_compute_public (&Q, ec, &sk->E.G, sk->d)) { if (DBG_CIPHER) log_debug ("Bad check: computation of dG failed\n"); goto leave; } if (_gcry_mpi_ec_get_affine (x1, y1, &Q, ec)) { if (DBG_CIPHER) log_debug ("Bad check: Q can not be a Point at Infinity!\n"); goto leave; } if ((flags & PUBKEY_FLAG_EDDSA)) ; /* Fixme: EdDSA is special. */ else if (!mpi_cmp_ui (sk->Q.z, 1)) { /* Fast path if Q is already in affine coordinates. */ if (mpi_cmp (x1, sk->Q.x) || (y1 && mpi_cmp (y1, sk->Q.y))) { if (DBG_CIPHER) log_debug ("Bad check: There is NO correspondence between 'd' and 'Q'!\n"); goto leave; } } else { x2 = mpi_new (0); y2 = mpi_new (0); if (_gcry_mpi_ec_get_affine (x2, y2, &sk->Q, ec)) { if (DBG_CIPHER) log_debug ("Bad check: Q can not be a Point at Infinity!\n"); goto leave; } if (mpi_cmp (x1, x2) || mpi_cmp (y1, y2)) { if (DBG_CIPHER) log_debug ("Bad check: There is NO correspondence between 'd' and 'Q'!\n"); goto leave; } } rc = 0; /* Okay. */ leave: mpi_free (x2); mpi_free (x1); mpi_free (y1); mpi_free (y2); point_free (&Q); return rc; }
0
[ "CWE-200" ]
libgcrypt
bf76acbf0da6b0f245e491bec12c0f0a1b5be7c9
196,953,019,922,368,270,000,000,000,000,000,000,000
107
ecc: Add input validation for X25519. * cipher/ecc.c (ecc_decrypt_raw): Add input validation. * mpi/ec.c (ec_p_init): Use scratch buffer for bad points. (_gcry_mpi_ec_bad_point): New. -- Following is the paper describing the attack: May the Fourth Be With You: A Microarchitectural Side Channel Attack on Real-World Applications of Curve25519 by Daniel Genkin, Luke Valenta, and Yuval Yarom In the current implementation, we do output checking and it results an error for those bad points. However, when attacked, the computation will done with leak of private key, even it will results errors. To mitigate leak, we added input validation. Note that we only list bad points with MSB=0. By X25519, MSB is always cleared. In future, we should implement constant-time field computation. Then, this input validation could be removed, if performance is important and we are sure for no leak. CVE-id: CVE-2017-0379 Signed-off-by: NIIBE Yutaka <[email protected]>
flags_from_portable(int pflags) { int flags = 0; if ((pflags & SSH2_FXF_READ) && (pflags & SSH2_FXF_WRITE)) { flags = O_RDWR; } else if (pflags & SSH2_FXF_READ) { flags = O_RDONLY; } else if (pflags & SSH2_FXF_WRITE) { flags = O_WRONLY; } if (pflags & SSH2_FXF_APPEND) flags |= O_APPEND; if (pflags & SSH2_FXF_CREAT) flags |= O_CREAT; if (pflags & SSH2_FXF_TRUNC) flags |= O_TRUNC; if (pflags & SSH2_FXF_EXCL) flags |= O_EXCL; return flags; }
0
[ "CWE-732", "CWE-703", "CWE-269" ]
src
a6981567e8e215acc1ef690c8dbb30f2d9b00a19
11,547,849,949,785,519,000,000,000,000,000,000,000
22
disallow creation (of empty files) in read-only mode; reported by Michal Zalewski, feedback & ok deraadt@
static void mct_u232_break_ctl(struct tty_struct *tty, int break_state) { struct usb_serial_port *port = tty->driver_data; struct mct_u232_private *priv = usb_get_serial_port_data(port); unsigned char lcr; unsigned long flags; spin_lock_irqsave(&priv->lock, flags); lcr = priv->last_lcr; if (break_state) lcr |= MCT_U232_SET_BREAK; spin_unlock_irqrestore(&priv->lock, flags); mct_u232_set_line_ctrl(port, lcr); } /* mct_u232_break_ctl */
0
[ "CWE-703" ]
linux
4e9a0b05257f29cf4b75f3209243ed71614d062e
284,791,309,673,935,430,000,000,000,000,000,000,000
16
USB: mct_u232: add sanity checking in probe An attack using the lack of sanity checking in probe is known. This patch checks for the existence of a second port. CVE-2016-3136 Signed-off-by: Oliver Neukum <[email protected]> CC: [email protected] [johan: add error message ] Signed-off-by: Johan Hovold <[email protected]> Signed-off-by: Greg Kroah-Hartman <[email protected]>
static int monitor_interception(struct vcpu_svm *svm) { printk_once(KERN_WARNING "kvm: MONITOR instruction emulated as NOP!\n"); return nop_interception(svm); }
0
[]
kvm
854e8bb1aa06c578c2c9145fa6bfe3680ef63b23
189,762,884,360,453,900,000,000,000,000,000,000,000
5
KVM: x86: Check non-canonical addresses upon WRMSR Upon WRMSR, the CPU should inject #GP if a non-canonical value (address) is written to certain MSRs. The behavior is "almost" identical for AMD and Intel (ignoring MSRs that are not implemented in either architecture since they would anyhow #GP). However, IA32_SYSENTER_ESP and IA32_SYSENTER_EIP cause #GP if non-canonical address is written on Intel but not on AMD (which ignores the top 32-bits). Accordingly, this patch injects a #GP on the MSRs which behave identically on Intel and AMD. To eliminate the differences between the architecutres, the value which is written to IA32_SYSENTER_ESP and IA32_SYSENTER_EIP is turned to canonical value before writing instead of injecting a #GP. Some references from Intel and AMD manuals: According to Intel SDM description of WRMSR instruction #GP is expected on WRMSR "If the source register contains a non-canonical address and ECX specifies one of the following MSRs: IA32_DS_AREA, IA32_FS_BASE, IA32_GS_BASE, IA32_KERNEL_GS_BASE, IA32_LSTAR, IA32_SYSENTER_EIP, IA32_SYSENTER_ESP." According to AMD manual instruction manual: LSTAR/CSTAR (SYSCALL): "The WRMSR instruction loads the target RIP into the LSTAR and CSTAR registers. If an RIP written by WRMSR is not in canonical form, a general-protection exception (#GP) occurs." IA32_GS_BASE and IA32_FS_BASE (WRFSBASE/WRGSBASE): "The address written to the base field must be in canonical form or a #GP fault will occur." IA32_KERNEL_GS_BASE (SWAPGS): "The address stored in the KernelGSbase MSR must be in canonical form." This patch fixes CVE-2014-3610. Cc: [email protected] Signed-off-by: Nadav Amit <[email protected]> Signed-off-by: Paolo Bonzini <[email protected]>
dfamust (struct dfa *d) { must *musts; must *mp; char *result; size_t ri; size_t i; int exact; token t; static must must0; struct dfamust *dm; static char empty_string[] = ""; result = empty_string; exact = 0; MALLOC (musts, d->tindex + 1); mp = musts; for (i = 0; i <= d->tindex; ++i) mp[i] = must0; for (i = 0; i <= d->tindex; ++i) { mp[i].in = xmalloc(sizeof *mp[i].in); mp[i].left = xmalloc(2); mp[i].right = xmalloc(2); mp[i].is = xmalloc(2); mp[i].left[0] = mp[i].right[0] = mp[i].is[0] = '\0'; mp[i].in[0] = NULL; } #ifdef DEBUG fprintf(stderr, "dfamust:\n"); for (i = 0; i < d->tindex; ++i) { fprintf(stderr, " %zd:", i); prtok(d->tokens[i]); } putc('\n', stderr); #endif for (ri = 0; ri < d->tindex; ++ri) { switch (t = d->tokens[ri]) { case LPAREN: case RPAREN: assert (!"neither LPAREN nor RPAREN may appear here"); case EMPTY: case BEGLINE: case ENDLINE: case BEGWORD: case ENDWORD: case LIMWORD: case NOTLIMWORD: case BACKREF: resetmust(mp); break; case STAR: case QMARK: assert (musts < mp); --mp; resetmust(mp); break; case OR: assert (&musts[2] <= mp); { char **new; must *lmp; must *rmp; size_t j, ln, rn, n; rmp = --mp; lmp = --mp; /* Guaranteed to be. Unlikely, but. . . */ if (!STREQ (lmp->is, rmp->is)) lmp->is[0] = '\0'; /* Left side--easy */ i = 0; while (lmp->left[i] != '\0' && lmp->left[i] == rmp->left[i]) ++i; lmp->left[i] = '\0'; /* Right side */ ln = strlen(lmp->right); rn = strlen(rmp->right); n = ln; if (n > rn) n = rn; for (i = 0; i < n; ++i) if (lmp->right[ln - i - 1] != rmp->right[rn - i - 1]) break; for (j = 0; j < i; ++j) lmp->right[j] = lmp->right[(ln - i) + j]; lmp->right[j] = '\0'; new = inboth(lmp->in, rmp->in); if (new == NULL) goto done; freelist(lmp->in); free(lmp->in); lmp->in = new; } break; case PLUS: assert (musts < mp); --mp; mp->is[0] = '\0'; break; case END: assert (mp == &musts[1]); for (i = 0; musts[0].in[i] != NULL; ++i) if (strlen(musts[0].in[i]) > strlen(result)) result = musts[0].in[i]; if (STREQ (result, musts[0].is)) exact = 1; goto done; case CAT: assert (&musts[2] <= mp); { must *lmp; must *rmp; rmp = --mp; lmp = --mp; /* In. Everything in left, plus everything in right, plus catenation of left's right and right's left. */ lmp->in = addlists(lmp->in, rmp->in); if (lmp->in == NULL) goto done; if (lmp->right[0] != '\0' && rmp->left[0] != '\0') { char *tp; tp = icpyalloc(lmp->right); tp = icatalloc(tp, rmp->left); lmp->in = enlist(lmp->in, tp, strlen(tp)); free(tp); if (lmp->in == NULL) goto done; } /* Left-hand */ if (lmp->is[0] != '\0') { lmp->left = icatalloc(lmp->left, rmp->left); if (lmp->left == NULL) goto done; } /* Right-hand */ if (rmp->is[0] == '\0') lmp->right[0] = '\0'; lmp->right = icatalloc(lmp->right, rmp->right); if (lmp->right == NULL) goto done; /* Guaranteed to be */ if (lmp->is[0] != '\0' && rmp->is[0] != '\0') { lmp->is = icatalloc(lmp->is, rmp->is); if (lmp->is == NULL) goto done; } else lmp->is[0] = '\0'; } break; default: if (t < END) { assert (!"oops! t >= END"); } else if (t == '\0') { /* not on *my* shift */ goto done; } else if (t >= CSET || !MBS_SUPPORT || t == ANYCHAR || t == MBCSET ) { /* easy enough */ resetmust(mp); } else { /* plain character */ resetmust(mp); mp->is[0] = mp->left[0] = mp->right[0] = t; mp->is[1] = mp->left[1] = mp->right[1] = '\0'; mp->in = enlist(mp->in, mp->is, (size_t)1); if (mp->in == NULL) goto done; } break; } #ifdef DEBUG fprintf(stderr, " node: %zd:", ri); prtok(d->tokens[ri]); fprintf(stderr, "\n in:"); for (i = 0; mp->in[i]; ++i) fprintf(stderr, " \"%s\"", mp->in[i]); fprintf(stderr, "\n is: \"%s\"\n", mp->is); fprintf(stderr, " left: \"%s\"\n", mp->left); fprintf(stderr, " right: \"%s\"\n", mp->right); #endif ++mp; } done: if (strlen(result)) { MALLOC(dm, 1); dm->exact = exact; MALLOC(dm->must, strlen(result) + 1); strcpy(dm->must, result); dm->next = d->musts; d->musts = dm; } mp = musts; for (i = 0; i <= d->tindex; ++i) { freelist(mp[i].in); free(mp[i].in); free(mp[i].left); free(mp[i].right); free(mp[i].is); } free(mp); }
0
[ "CWE-189" ]
grep
cbbc1a45b9f843c811905c97c90a5d31f8e6c189
33,238,087,462,623,470,000,000,000,000,000,000,000
226
grep: fix some core dumps with long lines etc. These problems mostly occur because the code attempts to stuff sizes into int or into unsigned int; this doesn't work on most 64-bit hosts and the errors can lead to core dumps. * NEWS: Document this. * src/dfa.c (token): Typedef to ptrdiff_t, since the enum's range could be as small as -128 .. 127 on practical hosts. (position.index): Now size_t, not unsigned int. (leaf_set.elems): Now size_t *, not unsigned int *. (dfa_state.hash, struct mb_char_classes.nchars, .nch_classes) (.nranges, .nequivs, .ncoll_elems, struct dfa.cindex, .calloc, .tindex) (.talloc, .depth, .nleaves, .nregexps, .nmultibyte_prop, .nmbcsets): (.mbcsets_alloc): Now size_t, not int. (dfa_state.first_end): Now token, not int. (state_num): New type. (struct mb_char_classes.cset): Now ptrdiff_t, not int. (struct dfa.utf8_anychar_classes): Now token[5], not int[5]. (struct dfa.sindex, .salloc, .tralloc): Now state_num, not int. (struct dfa.trans, .realtrans, .fails): Now state_num **, not int **. (struct dfa.newlines): Now state_num *, not int *. (prtok): Don't assume 'token' is no wider than int. (lexleft, parens, depth): Now size_t, not int. (charclass_index, nsubtoks) (parse_bracket_exp, addtok, copytoks, closure, insert, merge, delete) (state_index, epsclosure, state_separate_contexts) (dfaanalyze, dfastate, build_state, realloc_trans_if_necessary) (transit_state_singlebyte, match_anychar, match_mb_charset) (check_matching_with_multibyte_ops, transit_state_consume_1char) (transit_state, dfaexec, free_mbdata, dfaoptimize, dfafree) (freelist, enlist, addlists, inboth, dfamust): Don't assume indexes fit in 'int'. (lex): Avoid overflow in string-to-{hi,lo} conversions. (dfaanalyze): Redo indexing so that it works with size_t values, which cannot go negative. * src/dfa.h (dfaexec): Count argument is now size_t *, not int *. (dfastate): State numbers are now ptrdiff_t, not int. * src/dfasearch.c: Include "intprops.h", for TYPE_MAXIMUM. (kwset_exact_matches): Now size_t, not int. (EGexecute): Don't assume indexes fit in 'int'. Check for overflow before converting a ptrdiff_t to a regoff_t, as regoff_t is narrower than ptrdiff_t in 64-bit glibc (contra POSIX). Check for memory exhaustion in re_search rather than treating it merely as failure to match; use xalloc_die () to report any error. * src/kwset.c (struct trie.accepting): Now size_t, not unsigned int. (struct kwset.words): Now ptrdiff_t, not int. * src/kwset.h (struct kwsmatch.index): Now size_t, not int.
static void ipv6_mc_rejoin_groups(struct inet6_dev *idev) { struct ifmcaddr6 *pmc; ASSERT_RTNL(); mutex_lock(&idev->mc_lock); if (mld_in_v1_mode(idev)) { for_each_mc_mclock(idev, pmc) igmp6_join_group(pmc); } else { mld_send_report(idev, NULL); } mutex_unlock(&idev->mc_lock); }
0
[ "CWE-703" ]
linux
2d3916f3189172d5c69d33065c3c21119fe539fc
96,861,043,802,199,520,000,000,000,000,000,000,000
15
ipv6: fix skb drops in igmp6_event_query() and igmp6_event_report() While investigating on why a synchronize_net() has been added recently in ipv6_mc_down(), I found that igmp6_event_query() and igmp6_event_report() might drop skbs in some cases. Discussion about removing synchronize_net() from ipv6_mc_down() will happen in a different thread. Fixes: f185de28d9ae ("mld: add new workqueues for process mld events") Signed-off-by: Eric Dumazet <[email protected]> Cc: Taehee Yoo <[email protected]> Cc: Cong Wang <[email protected]> Cc: David Ahern <[email protected]> Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Jakub Kicinski <[email protected]>
StreamEncoder& ClientConnectionImpl::newStream(StreamDecoder& response_decoder) { if (resetStreamCalled()) { throw CodecClientException("cannot create new streams after calling reset"); } // If reads were disabled due to flow control, we expect reads to always be enabled again before // reusing this connection. This is done when the final pipeline response is received. ASSERT(connection_.readEnabled()); request_encoder_ = std::make_unique<RequestStreamEncoderImpl>(*this); pending_responses_.emplace_back(&response_decoder); return *request_encoder_; }
0
[ "CWE-400", "CWE-703" ]
envoy
afc39bea36fd436e54262f150c009e8d72db5014
197,230,253,310,208,000,000,000,000,000,000,000,000
13
Track byteSize of HeaderMap internally. Introduces a cached byte size updated internally in HeaderMap. The value is stored as an optional, and is cleared whenever a non-const pointer or reference to a HeaderEntry is accessed. The cached value can be set with refreshByteSize() which performs an iteration over the HeaderMap to sum the size of each key and value in the HeaderMap. Signed-off-by: Asra Ali <[email protected]>
static void cli_flush_use_result(MYSQL *mysql, my_bool flush_all_results) { /* Clear the current execution status */ DBUG_ENTER("cli_flush_use_result"); DBUG_PRINT("warning",("Not all packets read, clearing them")); if (flush_one_result(mysql)) DBUG_VOID_RETURN; /* An error occurred */ if (! flush_all_results) DBUG_VOID_RETURN; while (mysql->server_status & SERVER_MORE_RESULTS_EXISTS) { my_bool is_ok_packet; if (opt_flush_ok_packet(mysql, &is_ok_packet)) DBUG_VOID_RETURN; /* An error occurred. */ if (is_ok_packet) { /* Indeed what we got from network was an OK packet, and we know that OK is the last one in a multi-result-set, so just return. */ DBUG_VOID_RETURN; } /* It's a result set, not an OK packet. A result set contains of two result set subsequences: field metadata, terminated with EOF packet, and result set data, again terminated with EOF packet. Read and flush them. */ if (flush_one_result(mysql) || flush_one_result(mysql)) DBUG_VOID_RETURN; /* An error occurred. */ } DBUG_VOID_RETURN; }
0
[ "CWE-254" ]
mysql-server
13380bf81f6bc20d39549f531f9acebdfb5a8c37
76,757,697,035,975,810,000,000,000,000,000,000,000
38
Bug #22295186: CERTIFICATE VALIDATION BUG IN MYSQL MAY ALLOW MITM
ipmi_fru_read_internal_use(struct ipmi_intf * intf, uint8_t id, char * pFileName) { struct fru_info fru; uint16_t size; uint16_t offset; int rc = 0; rc = ipmi_fru_get_internal_use_info(intf, id, &fru, &size, &offset); if(rc == 0) { uint8_t * frubuf; lprintf(LOG_DEBUG, "Internal Use Area Offset: %i", offset); printf( "Internal Use Area Size : %i\n", size); frubuf = malloc( size ); if(frubuf) { rc = read_fru_area_section(intf, &fru, id, offset, size, frubuf); if(rc == 0) { if(!pFileName) { uint16_t counter; for(counter = 0; counter < size; counter ++) { if((counter % 16) == 0) printf("\n%02i- ", (counter / 16)); printf("%02X ", frubuf[counter]); } } else { FILE * pFile; pFile = fopen(pFileName,"wb"); if (pFile) { fwrite(frubuf, size, 1, pFile); printf("Done\n"); } else { lprintf(LOG_ERR, "Error opening file %s\n", pFileName); free_n(&frubuf); return -1; } fclose(pFile); } } printf("\n"); free_n(&frubuf); } } else { lprintf(LOG_ERR, "Cannot access internal use area"); } return 0; }
0
[ "CWE-120", "CWE-787" ]
ipmitool
e824c23316ae50beb7f7488f2055ac65e8b341f2
290,397,334,096,765,840,000,000,000,000,000,000,000
63
fru: Fix buffer overflow vulnerabilities Partial fix for CVE-2020-5208, see https://github.com/ipmitool/ipmitool/security/advisories/GHSA-g659-9qxw-p7cp The `read_fru_area_section` function only performs size validation of requested read size, and falsely assumes that the IPMI message will not respond with more than the requested amount of data; it uses the unvalidated response size to copy into `frubuf`. If the response is larger than the request, this can result in overflowing the buffer. The same issue affects the `read_fru_area` function.
static void pmac_backlight_unblank(void) { mutex_lock(&pmac_backlight_mutex); if (pmac_backlight) { struct backlight_properties *props; props = &pmac_backlight->props; props->brightness = props->max_brightness; props->power = FB_BLANK_UNBLANK; backlight_update_status(pmac_backlight); } mutex_unlock(&pmac_backlight_mutex); }
0
[]
linux
5d176f751ee3c6eededd984ad409bff201f436a7
51,647,088,605,088,020,000,000,000,000,000,000,000
13
powerpc: tm: Enable transactional memory (TM) lazily for userspace Currently the MSR TM bit is always set if the hardware is TM capable. This adds extra overhead as it means the TM SPRS (TFHAR, TEXASR and TFAIR) must be swapped for each process regardless of if they use TM. For processes that don't use TM the TM MSR bit can be turned off allowing the kernel to avoid the expensive swap of the TM registers. A TM unavailable exception will occur if a thread does use TM and the kernel will enable MSR_TM and leave it so for some time afterwards. Signed-off-by: Cyril Bur <[email protected]> Signed-off-by: Michael Ellerman <[email protected]>
static int file_map_prot_check(struct file *file, unsigned long prot, int shared) { const struct cred *cred = current_cred(); int rc = 0; #ifndef CONFIG_PPC32 if ((prot & PROT_EXEC) && (!file || (!shared && (prot & PROT_WRITE)))) { /* * We are making executable an anonymous mapping or a * private file mapping that will also be writable. * This has an additional check. */ rc = cred_has_perm(cred, cred, PROCESS__EXECMEM); if (rc) goto error; } #endif if (file) { /* read access is always possible with a mapping */ u32 av = FILE__READ; /* write access only matters if the mapping is shared */ if (shared && (prot & PROT_WRITE)) av |= FILE__WRITE; if (prot & PROT_EXEC) av |= FILE__EXECUTE; return file_has_perm(cred, file, av); } error: return rc; }
0
[]
linux-2.6
ee18d64c1f632043a02e6f5ba5e045bb26a5465f
17,074,033,501,534,610,000,000,000,000,000,000,000
35
KEYS: Add a keyctl to install a process's session keyring on its parent [try #6] Add a keyctl to install a process's session keyring onto its parent. This replaces the parent's session keyring. Because the COW credential code does not permit one process to change another process's credentials directly, the change is deferred until userspace next starts executing again. Normally this will be after a wait*() syscall. To support this, three new security hooks have been provided: cred_alloc_blank() to allocate unset security creds, cred_transfer() to fill in the blank security creds and key_session_to_parent() - which asks the LSM if the process may replace its parent's session keyring. The replacement may only happen if the process has the same ownership details as its parent, and the process has LINK permission on the session keyring, and the session keyring is owned by the process, and the LSM permits it. Note that this requires alteration to each architecture's notify_resume path. This has been done for all arches barring blackfin, m68k* and xtensa, all of which need assembly alteration to support TIF_NOTIFY_RESUME. This allows the replacement to be performed at the point the parent process resumes userspace execution. This allows the userspace AFS pioctl emulation to fully emulate newpag() and the VIOCSETTOK and VIOCSETTOK2 pioctls, all of which require the ability to alter the parent process's PAG membership. However, since kAFS doesn't use PAGs per se, but rather dumps the keys into the session keyring, the session keyring of the parent must be replaced if, for example, VIOCSETTOK is passed the newpag flag. This can be tested with the following program: #include <stdio.h> #include <stdlib.h> #include <keyutils.h> #define KEYCTL_SESSION_TO_PARENT 18 #define OSERROR(X, S) do { if ((long)(X) == -1) { perror(S); exit(1); } } while(0) int main(int argc, char **argv) { key_serial_t keyring, key; long ret; keyring = keyctl_join_session_keyring(argv[1]); OSERROR(keyring, "keyctl_join_session_keyring"); key = add_key("user", "a", "b", 1, keyring); OSERROR(key, "add_key"); ret = keyctl(KEYCTL_SESSION_TO_PARENT); OSERROR(ret, "KEYCTL_SESSION_TO_PARENT"); return 0; } Compiled and linked with -lkeyutils, you should see something like: [dhowells@andromeda ~]$ keyctl show Session Keyring -3 --alswrv 4043 4043 keyring: _ses 355907932 --alswrv 4043 -1 \_ keyring: _uid.4043 [dhowells@andromeda ~]$ /tmp/newpag [dhowells@andromeda ~]$ keyctl show Session Keyring -3 --alswrv 4043 4043 keyring: _ses 1055658746 --alswrv 4043 4043 \_ user: a [dhowells@andromeda ~]$ /tmp/newpag hello [dhowells@andromeda ~]$ keyctl show Session Keyring -3 --alswrv 4043 4043 keyring: hello 340417692 --alswrv 4043 4043 \_ user: a Where the test program creates a new session keyring, sticks a user key named 'a' into it and then installs it on its parent. Signed-off-by: David Howells <[email protected]> Signed-off-by: James Morris <[email protected]>
static int file_read(jas_stream_obj_t *obj, char *buf, int cnt) { jas_stream_fileobj_t *fileobj; JAS_DBGLOG(100, ("file_read(%p, %p, %d)\n", obj, buf, cnt)); fileobj = JAS_CAST(jas_stream_fileobj_t *, obj); return read(fileobj->fd, buf, cnt); }
0
[ "CWE-415", "CWE-190", "CWE-369" ]
jasper
634ce8e8a5accc0fa05dd2c20d42b4749d4b2735
8,139,650,960,061,536,000,000,000,000,000,000,000
7
Made some changes to the I/O stream library for memory streams. There were a number of potential problems due to the possibility of integer overflow. Changed some integral types to the larger types size_t or ssize_t. For example, the function mem_resize now takes the buffer size parameter as a size_t. Added a new function jas_stream_memopen2, which takes a buffer size specified as a size_t instead of an int. This can be used in jas_image_cmpt_create to avoid potential overflow problems. Added a new function jas_deprecated to warn about reliance on deprecated library behavior.
void LibRaw::exp_bef(float shift, float smooth) { // params limits if(shift>8) shift = 8; if(shift<0.25) shift = 0.25; if(smooth < 0.0) smooth = 0.0; if(smooth > 1.0) smooth = 1.0; unsigned short *lut = (ushort*)malloc((TBLN+1)*sizeof(unsigned short)); if(shift <=1.0) { for(int i=0;i<=TBLN;i++) lut[i] = (unsigned short)((float)i*shift); } else { float x1,x2,y1,y2; float cstops = log(shift)/log(2.0f); float room = cstops*2; float roomlin = powf(2.0f,room); x2 = (float)TBLN; x1 = (x2+1)/roomlin-1; y1 = x1*shift; y2 = x2*(1+(1-smooth)*(shift-1)); float sq3x=powf(x1*x1*x2,1.0f/3.0f); float B = (y2-y1+shift*(3*x1-3.0f*sq3x)) / (x2+2.0f*x1-3.0f*sq3x); float A = (shift - B)*3.0f*powf(x1*x1,1.0f/3.0f); float CC = y2 - A*powf(x2,1.0f/3.0f)-B*x2; for(int i=0;i<=TBLN;i++) { float X = (float)i; float Y = A*powf(X,1.0f/3.0f)+B*X+CC; if(i<x1) lut[i] = (unsigned short)((float)i*shift); else lut[i] = Y<0?0:(Y>TBLN?TBLN:(unsigned short)(Y)); } } for(int i=0; i< S.height*S.width; i++) { imgdata.image[i][0] = lut[imgdata.image[i][0]]; imgdata.image[i][1] = lut[imgdata.image[i][1]]; imgdata.image[i][2] = lut[imgdata.image[i][2]]; imgdata.image[i][3] = lut[imgdata.image[i][3]]; } C.data_maximum = lut[C.data_maximum]; C.maximum = lut[C.maximum]; // no need to adjust the minumum, black is already subtracted free(lut); }
1
[ "CWE-119", "CWE-787" ]
LibRaw
2f912f5b33582961b1cdbd9fd828589f8b78f21d
152,471,740,197,897,450,000,000,000,000,000,000,000
53
fixed wrong data_maximum calcluation; prevent out-of-buffer in exp_bef
int git_index_snapshot_find( size_t *out, git_vector *entries, git_vector_cmp entry_srch, const char *path, size_t path_len, int stage) { return index_find_in_entries(out, entries, entry_srch, path, path_len, stage); }
0
[ "CWE-415", "CWE-190" ]
libgit2
3db1af1f370295ad5355b8f64b865a2a357bcac0
11,113,038,455,120,042,000,000,000,000,000,000,000
6
index: error out on unreasonable prefix-compressed path lengths When computing the complete path length from the encoded prefix-compressed path, we end up just allocating the complete path without ever checking what the encoded path length actually is. This can easily lead to a denial of service by just encoding an unreasonable long path name inside of the index. Git already enforces a maximum path length of 4096 bytes. As we also have that enforcement ready in some places, just make sure that the resulting path is smaller than GIT_PATH_MAX. Reported-by: Krishna Ram Prakash R <[email protected]> Reported-by: Vivek Parikh <[email protected]>
ebb_ews_store_x_attribute (EContact *contact, const gchar *xname, const gchar *value) { EVCardAttribute *attr; g_return_if_fail (E_IS_CONTACT (contact)); g_return_if_fail (xname != NULL); ebb_ews_remove_x_attribute (contact, xname); if (!value) return; attr = e_vcard_attribute_new ("", xname); e_vcard_attribute_add_value (attr, value); e_vcard_add_attribute (E_VCARD (contact), attr); }
0
[ "CWE-295" ]
evolution-ews
915226eca9454b8b3e5adb6f2fff9698451778de
142,847,021,856,113,870,000,000,000,000,000,000,000
18
I#27 - SSL Certificates are not validated This depends on https://gitlab.gnome.org/GNOME/evolution-data-server/commit/6672b8236139bd6ef41ecb915f4c72e2a052dba5 too. Closes https://gitlab.gnome.org/GNOME/evolution-ews/issues/27
static inline void cpu_to_ube32(uint8_t *buf, unsigned int val) { buf[0] = val >> 24; buf[1] = val >> 16; buf[2] = val >> 8; buf[3] = val & 0xff; }
0
[]
qemu
ce560dcf20c14194db5ef3b9fc1ea592d4e68109
230,678,413,681,133,530,000,000,000,000,000,000,000
7
ATAPI: STARTSTOPUNIT only eject/load media if powercondition is 0 The START STOP UNIT command will only eject/load media if power condition is zero. If power condition is !0 then LOEJ and START will be ignored. From MMC (sbc contains similar wordings too) The Power Conditions field requests the block device to be placed in the power condition defined in Table 558. If this field has a value other than 0h then the Start and LoEj bits shall be ignored. Signed-off-by: Ronnie Sahlberg <[email protected]> Signed-off-by: Kevin Wolf <[email protected]>
gs_manager_finalize (GObject *object) { GSManager *manager; g_return_if_fail (object != NULL); g_return_if_fail (GS_IS_MANAGER (object)); manager = GS_MANAGER (object); g_return_if_fail (manager->priv != NULL); if (manager->priv->bg_notify_id != 0) { gconf_client_remove_dir (manager->priv->client, GNOME_BG_KEY_DIR, NULL); gconf_client_notify_remove (manager->priv->client, manager->priv->bg_notify_id); manager->priv->bg_notify_id = 0; } if (manager->priv->bg != NULL) { g_object_unref (manager->priv->bg); } if (manager->priv->client != NULL) { g_object_unref (manager->priv->client); } free_themes (manager); g_free (manager->priv->logout_command); g_free (manager->priv->keyboard_command); g_free (manager->priv->away_message); remove_unfade_idle (manager); remove_timers (manager); gs_grab_release (manager->priv->grab); manager_stop_jobs (manager); gs_manager_destroy_windows (manager); manager->priv->active = FALSE; manager->priv->activate_time = 0; manager->priv->lock_enabled = FALSE; g_object_unref (manager->priv->fade); g_object_unref (manager->priv->grab); g_object_unref (manager->priv->theme_manager); G_OBJECT_CLASS (gs_manager_parent_class)->finalize (object); }
1
[]
gnome-screensaver
f6d3defdc7080a540d7f8df15dc309a9364ae668
340,204,079,059,999,640,000,000,000,000,000,000,000
50
Create or remove windows as number of monitors changes due to randr 1.2 2008-08-20 William Jon McCann <[email protected]> * src/gs-manager.c (gs_manager_create_window_for_monitor), (on_screen_monitors_changed), (gs_manager_destroy_windows), (gs_manager_finalize), (gs_manager_create_windows_for_screen): Create or remove windows as number of monitors changes due to randr 1.2 goodness. svn path=/trunk/; revision=1483
size_t olm_pk_max_plaintext_length( OlmPkDecryption * decryption, size_t ciphertext_length ) { return _olm_cipher_aes_sha_256_ops.decrypt_max_plaintext_length( olm_pk_cipher, olm::decode_base64_length(ciphertext_length) ); }
0
[ "CWE-787" ]
olm
ccc0d122ee1b4d5e5ca4ec1432086be17d5f901b
225,070,997,079,150,530,000,000,000,000,000,000,000
8
olm_pk_decrypt: Ensure inputs are of correct length.
policies_parse_exit_policy_internal(config_line_t *cfg, smartlist_t **dest, int ipv6_exit, int rejectprivate, const smartlist_t *configured_addresses, int reject_interface_addresses, int reject_configured_port_addresses, int add_default_policy) { if (!ipv6_exit) { append_exit_policy_string(dest, "reject *6:*"); } if (rejectprivate) { /* Reject IPv4 and IPv6 reserved private netblocks */ append_exit_policy_string(dest, "reject private:*"); } /* Consider rejecting IPv4 and IPv6 advertised relay addresses, outbound bind * addresses, publicly routable addresses, and configured port addresses * on this exit relay */ policies_parse_exit_policy_reject_private(dest, ipv6_exit, configured_addresses, reject_interface_addresses, reject_configured_port_addresses); if (parse_addr_policy(cfg, dest, -1)) return -1; /* Before we add the default policy and final rejects, check to see if * there are any lines after accept *:* or reject *:*. These lines have no * effect, and are most likely an error. */ policies_log_first_redundant_entry(*dest); if (add_default_policy) { append_exit_policy_string(dest, DEFAULT_EXIT_POLICY); } else { append_exit_policy_string(dest, "reject *4:*"); append_exit_policy_string(dest, "reject *6:*"); } exit_policy_remove_redundancies(*dest); return 0; }
0
[]
tor
1afc2ed956a35b40dfd1d207652af5b50c295da7
43,023,115,199,804,620,000,000,000,000,000,000,000
43
Fix policies.c instance of the "if (r=(a-b)) return r" pattern I think this one probably can't underflow, since the input ranges are small. But let's not tempt fate. This patch also replaces the "cmp" functions here with just "eq" functions, since nothing actually checked for anything besides 0 and nonzero. Related to 21278.
static int ip_vs_info_seq_show(struct seq_file *seq, void *v) { if (v == SEQ_START_TOKEN) { seq_printf(seq, "IP Virtual Server version %d.%d.%d (size=%d)\n", NVERSION(IP_VS_VERSION_CODE), IP_VS_CONN_TAB_SIZE); seq_puts(seq, "Prot LocalAddress:Port Scheduler Flags\n"); seq_puts(seq, " -> RemoteAddress:Port Forward Weight ActiveConn InActConn\n"); } else { const struct ip_vs_service *svc = v; const struct ip_vs_iter *iter = seq->private; const struct ip_vs_dest *dest; if (iter->table == ip_vs_svc_table) { #ifdef CONFIG_IP_VS_IPV6 if (svc->af == AF_INET6) seq_printf(seq, "%s [%pI6]:%04X %s ", ip_vs_proto_name(svc->protocol), &svc->addr.in6, ntohs(svc->port), svc->scheduler->name); else #endif seq_printf(seq, "%s %08X:%04X %s ", ip_vs_proto_name(svc->protocol), ntohl(svc->addr.ip), ntohs(svc->port), svc->scheduler->name); } else { seq_printf(seq, "FWM %08X %s ", svc->fwmark, svc->scheduler->name); } if (svc->flags & IP_VS_SVC_F_PERSISTENT) seq_printf(seq, "persistent %d %08X\n", svc->timeout, ntohl(svc->netmask)); else seq_putc(seq, '\n'); list_for_each_entry(dest, &svc->destinations, n_list) { #ifdef CONFIG_IP_VS_IPV6 if (dest->af == AF_INET6) seq_printf(seq, " -> [%pI6]:%04X" " %-7s %-6d %-10d %-10d\n", &dest->addr.in6, ntohs(dest->port), ip_vs_fwd_name(atomic_read(&dest->conn_flags)), atomic_read(&dest->weight), atomic_read(&dest->activeconns), atomic_read(&dest->inactconns)); else #endif seq_printf(seq, " -> %08X:%04X " "%-7s %-6d %-10d %-10d\n", ntohl(dest->addr.ip), ntohs(dest->port), ip_vs_fwd_name(atomic_read(&dest->conn_flags)), atomic_read(&dest->weight), atomic_read(&dest->activeconns), atomic_read(&dest->inactconns)); } } return 0; }
0
[ "CWE-119", "CWE-787" ]
linux
04bcef2a83f40c6db24222b27a52892cba39dffb
193,899,404,618,802,530,000,000,000,000,000,000,000
70
ipvs: Add boundary check on ioctl arguments The ipvs code has a nifty system for doing the size of ioctl command copies; it defines an array with values into which it indexes the cmd to find the right length. Unfortunately, the ipvs code forgot to check if the cmd was in the range that the array provides, allowing for an index outside of the array, which then gives a "garbage" result into the length, which then gets used for copying into a stack buffer. Fix this by adding sanity checks on these as well as the copy size. [ [email protected]: adjusted limit to IP_VS_SO_GET_MAX ] Signed-off-by: Arjan van de Ven <[email protected]> Acked-by: Julian Anastasov <[email protected]> Signed-off-by: Simon Horman <[email protected]> Signed-off-by: Patrick McHardy <[email protected]>
static int init_rmode_identity_map(struct kvm *kvm) { int i, r, ret; pfn_t identity_map_pfn; u32 tmp; if (!enable_ept) return 1; if (unlikely(!kvm->arch.ept_identity_pagetable)) { printk(KERN_ERR "EPT: identity-mapping pagetable " "haven't been allocated!\n"); return 0; } if (likely(kvm->arch.ept_identity_pagetable_done)) return 1; ret = 0; identity_map_pfn = kvm->arch.ept_identity_map_addr >> PAGE_SHIFT; r = kvm_clear_guest_page(kvm, identity_map_pfn, 0, PAGE_SIZE); if (r < 0) goto out; /* Set up identity-mapping pagetable for EPT in real mode */ for (i = 0; i < PT32_ENT_PER_PAGE; i++) { tmp = (i << 22) + (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_PSE); r = kvm_write_guest_page(kvm, identity_map_pfn, &tmp, i * sizeof(tmp), sizeof(tmp)); if (r < 0) goto out; } kvm->arch.ept_identity_pagetable_done = true; ret = 1; out: return ret; }
0
[ "CWE-400" ]
linux-2.6
9581d442b9058d3699b4be568b6e5eae38a41493
92,108,942,488,564,020,000,000,000,000,000,000,000
34
KVM: Fix fs/gs reload oops with invalid ldt kvm reloads the host's fs and gs blindly, however the underlying segment descriptors may be invalid due to the user modifying the ldt after loading them. Fix by using the safe accessors (loadsegment() and load_gs_index()) instead of home grown unsafe versions. This is CVE-2010-3698. KVM-Stable-Tag. Signed-off-by: Avi Kivity <[email protected]> Signed-off-by: Marcelo Tosatti <[email protected]>
void qmp_guest_suspend_disk(Error **errp) { Error *local_err = NULL; GuestSuspendMode *mode = g_new(GuestSuspendMode, 1); *mode = GUEST_SUSPEND_MODE_DISK; check_suspend_mode(*mode, &local_err); acquire_privilege(SE_SHUTDOWN_NAME, &local_err); execute_async(do_suspend, mode, &local_err); if (local_err) { error_propagate(errp, local_err); g_free(mode); } }
0
[ "CWE-190" ]
qemu
141b197408ab398c4f474ac1a728ab316e921f2b
65,110,341,573,646,400,000,000,000,000,000,000,000
15
qga: check bytes count read by guest-file-read While reading file content via 'guest-file-read' command, 'qmp_guest_file_read' routine allocates buffer of count+1 bytes. It could overflow for large values of 'count'. Add check to avoid it. Reported-by: Fakhri Zulkifli <[email protected]> Signed-off-by: Prasad J Pandit <[email protected]> Cc: [email protected] Signed-off-by: Michael Roth <[email protected]>
void *Type_ProfileSequenceDesc_Read(struct _cms_typehandler_struct* self, cmsIOHANDLER* io, cmsUInt32Number* nItems, cmsUInt32Number SizeOfTag) { cmsSEQ* OutSeq; cmsUInt32Number i, Count; *nItems = 0; if (!_cmsReadUInt32Number(io, &Count)) return NULL; if (SizeOfTag < sizeof(cmsUInt32Number)) return NULL; SizeOfTag -= sizeof(cmsUInt32Number); OutSeq = cmsAllocProfileSequenceDescription(self ->ContextID, Count); if (OutSeq == NULL) return NULL; OutSeq ->n = Count; // Get structures as well for (i=0; i < Count; i++) { cmsPSEQDESC* sec = &OutSeq -> seq[i]; if (!_cmsReadUInt32Number(io, &sec ->deviceMfg)) goto Error; if (SizeOfTag < sizeof(cmsUInt32Number)) goto Error; SizeOfTag -= sizeof(cmsUInt32Number); if (!_cmsReadUInt32Number(io, &sec ->deviceModel)) goto Error; if (SizeOfTag < sizeof(cmsUInt32Number)) goto Error; SizeOfTag -= sizeof(cmsUInt32Number); if (!_cmsReadUInt64Number(io, &sec ->attributes)) goto Error; if (SizeOfTag < sizeof(cmsUInt32Number)) goto Error; SizeOfTag -= sizeof(cmsUInt64Number); if (!_cmsReadUInt32Number(io, (cmsUInt32Number *)&sec ->technology)) goto Error; if (SizeOfTag < sizeof(cmsUInt32Number)) goto Error; SizeOfTag -= sizeof(cmsUInt32Number); if (!ReadEmbeddedText(self, io, &sec ->Manufacturer, SizeOfTag)) goto Error; if (!ReadEmbeddedText(self, io, &sec ->Model, SizeOfTag)) goto Error; } *nItems = 1; return OutSeq; Error: cmsFreeProfileSequenceDescription(OutSeq); return NULL; }
1
[]
Little-CMS
886e2f524268efe8a1c3aa838c28e446fda24486
178,317,971,097,833,900,000,000,000,000,000,000,000
51
Fixes from coverity check
GF_Err gnrv_Read(GF_Box *s, GF_BitStream *bs) { return GF_OK; }
0
[ "CWE-400", "CWE-401" ]
gpac
d2371b4b204f0a3c0af51ad4e9b491144dd1225c
287,330,678,435,310,300,000,000,000,000,000,000,000
4
prevent dref memleak on invalid input (#1183)
coroutine_fn iscsi_co_pdiscard(BlockDriverState *bs, int64_t offset, int bytes) { IscsiLun *iscsilun = bs->opaque; struct IscsiTask iTask; struct unmap_list list; int r = 0; if (!is_byte_request_lun_aligned(offset, bytes, iscsilun)) { return -ENOTSUP; } if (!iscsilun->lbp.lbpu) { /* UNMAP is not supported by the target */ return 0; } list.lba = offset / iscsilun->block_size; list.num = bytes / iscsilun->block_size; iscsi_co_init_iscsitask(iscsilun, &iTask); qemu_mutex_lock(&iscsilun->mutex); retry: if (iscsi_unmap_task(iscsilun->iscsi, iscsilun->lun, 0, 0, &list, 1, iscsi_co_generic_cb, &iTask) == NULL) { r = -ENOMEM; goto out_unlock; } iscsi_co_wait_for_task(&iTask, iscsilun); if (iTask.task != NULL) { scsi_free_scsi_task(iTask.task); iTask.task = NULL; } if (iTask.do_retry) { iTask.complete = 0; goto retry; } iscsi_allocmap_set_invalid(iscsilun, offset, bytes); if (iTask.status == SCSI_STATUS_CHECK_CONDITION) { /* the target might fail with a check condition if it is not happy with the alignment of the UNMAP request we silently fail in this case */ goto out_unlock; } if (iTask.status != SCSI_STATUS_GOOD) { error_report("iSCSI UNMAP failed at lba %" PRIu64 ": %s", list.lba, iTask.err_str); r = iTask.err_code; goto out_unlock; } out_unlock: qemu_mutex_unlock(&iscsilun->mutex); g_free(iTask.err_str); return r; }
0
[ "CWE-125" ]
qemu
ff0507c239a246fd7215b31c5658fc6a3ee1e4c5
266,325,439,274,934,000,000,000,000,000,000,000,000
61
block/iscsi:fix heap-buffer-overflow in iscsi_aio_ioctl_cb There is an overflow, the source 'datain.data[2]' is 100 bytes, but the 'ss' is 252 bytes.This may cause a security issue because we can access a lot of unrelated memory data. The len for sbp copy data should take the minimum of mx_sb_len and sb_len_wr, not the maximum. If we use iscsi device for VM backend storage, ASAN show stack: READ of size 252 at 0xfffd149dcfc4 thread T0 #0 0xaaad433d0d34 in __asan_memcpy (aarch64-softmmu/qemu-system-aarch64+0x2cb0d34) #1 0xaaad45f9d6d0 in iscsi_aio_ioctl_cb /qemu/block/iscsi.c:996:9 #2 0xfffd1af0e2dc (/usr/lib64/iscsi/libiscsi.so.8+0xe2dc) #3 0xfffd1af0d174 (/usr/lib64/iscsi/libiscsi.so.8+0xd174) #4 0xfffd1af19fac (/usr/lib64/iscsi/libiscsi.so.8+0x19fac) #5 0xaaad45f9acc8 in iscsi_process_read /qemu/block/iscsi.c:403:5 #6 0xaaad4623733c in aio_dispatch_handler /qemu/util/aio-posix.c:467:9 #7 0xaaad4622f350 in aio_dispatch_handlers /qemu/util/aio-posix.c:510:20 #8 0xaaad4622f350 in aio_dispatch /qemu/util/aio-posix.c:520 #9 0xaaad46215944 in aio_ctx_dispatch /qemu/util/async.c:298:5 #10 0xfffd1bed12f4 in g_main_context_dispatch (/lib64/libglib-2.0.so.0+0x512f4) #11 0xaaad46227de0 in glib_pollfds_poll /qemu/util/main-loop.c:219:9 #12 0xaaad46227de0 in os_host_main_loop_wait /qemu/util/main-loop.c:242 #13 0xaaad46227de0 in main_loop_wait /qemu/util/main-loop.c:518 #14 0xaaad43d9d60c in qemu_main_loop /qemu/softmmu/vl.c:1662:9 #15 0xaaad4607a5b0 in main /qemu/softmmu/main.c:49:5 #16 0xfffd1a460b9c in __libc_start_main (/lib64/libc.so.6+0x20b9c) #17 0xaaad43320740 in _start (aarch64-softmmu/qemu-system-aarch64+0x2c00740) 0xfffd149dcfc4 is located 0 bytes to the right of 100-byte region [0xfffd149dcf60,0xfffd149dcfc4) allocated by thread T0 here: #0 0xaaad433d1e70 in __interceptor_malloc (aarch64-softmmu/qemu-system-aarch64+0x2cb1e70) #1 0xfffd1af0e254 (/usr/lib64/iscsi/libiscsi.so.8+0xe254) #2 0xfffd1af0d174 (/usr/lib64/iscsi/libiscsi.so.8+0xd174) #3 0xfffd1af19fac (/usr/lib64/iscsi/libiscsi.so.8+0x19fac) #4 0xaaad45f9acc8 in iscsi_process_read /qemu/block/iscsi.c:403:5 #5 0xaaad4623733c in aio_dispatch_handler /qemu/util/aio-posix.c:467:9 #6 0xaaad4622f350 in aio_dispatch_handlers /qemu/util/aio-posix.c:510:20 #7 0xaaad4622f350 in aio_dispatch /qemu/util/aio-posix.c:520 #8 0xaaad46215944 in aio_ctx_dispatch /qemu/util/async.c:298:5 #9 0xfffd1bed12f4 in g_main_context_dispatch (/lib64/libglib-2.0.so.0+0x512f4) #10 0xaaad46227de0 in glib_pollfds_poll /qemu/util/main-loop.c:219:9 #11 0xaaad46227de0 in os_host_main_loop_wait /qemu/util/main-loop.c:242 #12 0xaaad46227de0 in main_loop_wait /qemu/util/main-loop.c:518 #13 0xaaad43d9d60c in qemu_main_loop /qemu/softmmu/vl.c:1662:9 #14 0xaaad4607a5b0 in main /qemu/softmmu/main.c:49:5 #15 0xfffd1a460b9c in __libc_start_main (/lib64/libc.so.6+0x20b9c) #16 0xaaad43320740 in _start (aarch64-softmmu/qemu-system-aarch64+0x2c00740) Reported-by: Euler Robot <[email protected]> Signed-off-by: Chen Qun <[email protected]> Reviewed-by: Stefan Hajnoczi <[email protected]> Message-id: [email protected] Reviewed-by: Daniel P. Berrangé <[email protected]> Signed-off-by: Peter Maydell <[email protected]>
pci_get_cfgdata16(struct pci_vdev *dev, int offset) { if ((offset > PCI_REGMAX - 1) || (offset & 1) != 0) { pr_err("%s: out of range of PCI config space!\n", __func__); return 0xffff; } return (*(uint16_t *)(dev->cfgdata + offset)); }
0
[ "CWE-617", "CWE-703" ]
acrn-hypervisor
2b3dedfb9ba13f15887f22b935d373f36c9a59fa
194,365,449,031,342,000,000,000,000,000,000,000,000
8
dm: pci: clean up assert() in pci core Tracked-On: #3252 Signed-off-by: Shuo A Liu <[email protected]> Reviewed-by: Yonghua Huang <[email protected]>
int fat_subdirs(struct inode *dir) { struct buffer_head *bh; struct msdos_dir_entry *de; loff_t cpos; int count = 0; bh = NULL; cpos = 0; while (fat_get_short_entry(dir, &cpos, &bh, &de) >= 0) { if (de->attr & ATTR_DIR) count++; } brelse(bh); return count; }
0
[]
linux-2.6
c483bab099cb89e92b7cad94a52fcdaf37e56657
221,150,289,131,148,840,000,000,000,000,000,000,000
16
fat: fix VFAT compat ioctls on 64-bit systems If you compile and run the below test case in an msdos or vfat directory on an x86-64 system with -m32 you'll get garbage in the kernel_dirent struct followed by a SIGSEGV. The patch fixes this. Reported and initial fix by Bart Oldeman #include <sys/types.h> #include <sys/ioctl.h> #include <dirent.h> #include <stdio.h> #include <unistd.h> #include <fcntl.h> struct kernel_dirent { long d_ino; long d_off; unsigned short d_reclen; char d_name[256]; /* We must not include limits.h! */ }; #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct kernel_dirent [2]) #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct kernel_dirent [2]) int main(void) { int fd = open(".", O_RDONLY); struct kernel_dirent de[2]; while (1) { int i = ioctl(fd, VFAT_IOCTL_READDIR_BOTH, (long)de); if (i == -1) break; if (de[0].d_reclen == 0) break; printf("SFN: reclen=%2d off=%d ino=%d, %-12s", de[0].d_reclen, de[0].d_off, de[0].d_ino, de[0].d_name); if (de[1].d_reclen) printf("\tLFN: reclen=%2d off=%d ino=%d, %s", de[1].d_reclen, de[1].d_off, de[1].d_ino, de[1].d_name); printf("\n"); } return 0; } Signed-off-by: Bart Oldeman <[email protected]> Signed-off-by: OGAWA Hirofumi <[email protected]> Cc: <[email protected]> Signed-off-by: Andrew Morton <[email protected]> Signed-off-by: Linus Torvalds <[email protected]>
set_max_block_time(self, time) VALUE self; VALUE time; { struct Tcl_Time tcl_time; VALUE divmod; switch(TYPE(time)) { case T_FIXNUM: case T_BIGNUM: /* time is micro-second value */ divmod = rb_funcall(time, rb_intern("divmod"), 1, LONG2NUM(1000000)); tcl_time.sec = NUM2LONG(RARRAY_PTR(divmod)[0]); tcl_time.usec = NUM2LONG(RARRAY_PTR(divmod)[1]); break; case T_FLOAT: /* time is second value */ divmod = rb_funcall(time, rb_intern("divmod"), 1, INT2FIX(1)); tcl_time.sec = NUM2LONG(RARRAY_PTR(divmod)[0]); tcl_time.usec = (long)(NUM2DBL(RARRAY_PTR(divmod)[1]) * 1000000); default: { VALUE tmp = rb_funcallv(time, ID_inspect, 0, 0); rb_raise(rb_eArgError, "invalid value for time: '%s'", StringValuePtr(tmp)); } } Tcl_SetMaxBlockTime(&tcl_time); return Qnil; }
0
[]
tk
ebd0fc80d62eeb7b8556522256f8d035e013eb65
322,811,910,419,656,770,000,000,000,000,000,000,000
34
tcltklib.c: check argument * ext/tk/tcltklib.c (ip_cancel_eval_core): check argument type and length. git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@51468 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
static void jsR_callcfunction(js_State *J, int n, int min, js_CFunction F) { int i; js_Value v; for (i = n; i < min; ++i) js_pushundefined(J); F(J); v = *stackidx(J, -1); TOP = --BOT; /* clear stack */ js_pushvalue(J, v); }
0
[ "CWE-476" ]
mujs
77ab465f1c394bb77f00966cd950650f3f53cb24
39,501,193,780,260,324,000,000,000,000,000,000,000
13
Fix 697401: Error when dropping extra arguments to lightweight functions.
static __latent_entropy struct task_struct *copy_process( unsigned long clone_flags, unsigned long stack_start, unsigned long stack_size, int __user *child_tidptr, struct pid *pid, int trace, unsigned long tls, int node) { int retval; struct task_struct *p; struct multiprocess_signals delayed; /* * Don't allow sharing the root directory with processes in a different * namespace */ if ((clone_flags & (CLONE_NEWNS|CLONE_FS)) == (CLONE_NEWNS|CLONE_FS)) return ERR_PTR(-EINVAL); if ((clone_flags & (CLONE_NEWUSER|CLONE_FS)) == (CLONE_NEWUSER|CLONE_FS)) return ERR_PTR(-EINVAL); /* * Thread groups must share signals as well, and detached threads * can only be started up within the thread group. */ if ((clone_flags & CLONE_THREAD) && !(clone_flags & CLONE_SIGHAND)) return ERR_PTR(-EINVAL); /* * Shared signal handlers imply shared VM. By way of the above, * thread groups also imply shared VM. Blocking this case allows * for various simplifications in other code. */ if ((clone_flags & CLONE_SIGHAND) && !(clone_flags & CLONE_VM)) return ERR_PTR(-EINVAL); /* * Siblings of global init remain as zombies on exit since they are * not reaped by their parent (swapper). To solve this and to avoid * multi-rooted process trees, prevent global and container-inits * from creating siblings. */ if ((clone_flags & CLONE_PARENT) && current->signal->flags & SIGNAL_UNKILLABLE) return ERR_PTR(-EINVAL); /* * If the new process will be in a different pid or user namespace * do not allow it to share a thread group with the forking task. */ if (clone_flags & CLONE_THREAD) { if ((clone_flags & (CLONE_NEWUSER | CLONE_NEWPID)) || (task_active_pid_ns(current) != current->nsproxy->pid_ns_for_children)) return ERR_PTR(-EINVAL); } /* * Force any signals received before this point to be delivered * before the fork happens. Collect up signals sent to multiple * processes that happen during the fork and delay them so that * they appear to happen after the fork. */ sigemptyset(&delayed.signal); INIT_HLIST_NODE(&delayed.node); spin_lock_irq(&current->sighand->siglock); if (!(clone_flags & CLONE_THREAD)) hlist_add_head(&delayed.node, &current->signal->multiprocess); recalc_sigpending(); spin_unlock_irq(&current->sighand->siglock); retval = -ERESTARTNOINTR; if (signal_pending(current)) goto fork_out; retval = -ENOMEM; p = dup_task_struct(current, node); if (!p) goto fork_out; /* * This _must_ happen before we call free_task(), i.e. before we jump * to any of the bad_fork_* labels. This is to avoid freeing * p->set_child_tid which is (ab)used as a kthread's data pointer for * kernel threads (PF_KTHREAD). */ p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL; /* * Clear TID on mm_release()? */ p->clear_child_tid = (clone_flags & CLONE_CHILD_CLEARTID) ? child_tidptr : NULL; ftrace_graph_init_task(p); rt_mutex_init_task(p); #ifdef CONFIG_PROVE_LOCKING DEBUG_LOCKS_WARN_ON(!p->hardirqs_enabled); DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled); #endif retval = -EAGAIN; if (atomic_read(&p->real_cred->user->processes) >= task_rlimit(p, RLIMIT_NPROC)) { if (p->real_cred->user != INIT_USER && !capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN)) goto bad_fork_free; } current->flags &= ~PF_NPROC_EXCEEDED; retval = copy_creds(p, clone_flags); if (retval < 0) goto bad_fork_free; /* * If multiple threads are within copy_process(), then this check * triggers too late. This doesn't hurt, the check is only there * to stop root fork bombs. */ retval = -EAGAIN; if (nr_threads >= max_threads) goto bad_fork_cleanup_count; delayacct_tsk_init(p); /* Must remain after dup_task_struct() */ p->flags &= ~(PF_SUPERPRIV | PF_WQ_WORKER | PF_IDLE); p->flags |= PF_FORKNOEXEC; INIT_LIST_HEAD(&p->children); INIT_LIST_HEAD(&p->sibling); rcu_copy_process(p); p->vfork_done = NULL; spin_lock_init(&p->alloc_lock); init_sigpending(&p->pending); p->utime = p->stime = p->gtime = 0; #ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME p->utimescaled = p->stimescaled = 0; #endif prev_cputime_init(&p->prev_cputime); #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN seqcount_init(&p->vtime.seqcount); p->vtime.starttime = 0; p->vtime.state = VTIME_INACTIVE; #endif #if defined(SPLIT_RSS_COUNTING) memset(&p->rss_stat, 0, sizeof(p->rss_stat)); #endif p->default_timer_slack_ns = current->timer_slack_ns; #ifdef CONFIG_PSI p->psi_flags = 0; #endif task_io_accounting_init(&p->ioac); acct_clear_integrals(p); posix_cpu_timers_init(p); p->io_context = NULL; audit_set_context(p, NULL); cgroup_fork(p); #ifdef CONFIG_NUMA p->mempolicy = mpol_dup(p->mempolicy); if (IS_ERR(p->mempolicy)) { retval = PTR_ERR(p->mempolicy); p->mempolicy = NULL; goto bad_fork_cleanup_threadgroup_lock; } #endif #ifdef CONFIG_CPUSETS p->cpuset_mem_spread_rotor = NUMA_NO_NODE; p->cpuset_slab_spread_rotor = NUMA_NO_NODE; seqcount_init(&p->mems_allowed_seq); #endif #ifdef CONFIG_TRACE_IRQFLAGS p->irq_events = 0; p->hardirqs_enabled = 0; p->hardirq_enable_ip = 0; p->hardirq_enable_event = 0; p->hardirq_disable_ip = _THIS_IP_; p->hardirq_disable_event = 0; p->softirqs_enabled = 1; p->softirq_enable_ip = _THIS_IP_; p->softirq_enable_event = 0; p->softirq_disable_ip = 0; p->softirq_disable_event = 0; p->hardirq_context = 0; p->softirq_context = 0; #endif p->pagefault_disabled = 0; #ifdef CONFIG_LOCKDEP p->lockdep_depth = 0; /* no locks held yet */ p->curr_chain_key = 0; p->lockdep_recursion = 0; lockdep_init_task(p); #endif #ifdef CONFIG_DEBUG_MUTEXES p->blocked_on = NULL; /* not blocked yet */ #endif #ifdef CONFIG_BCACHE p->sequential_io = 0; p->sequential_io_avg = 0; #endif /* Perform scheduler related setup. Assign this task to a CPU. */ retval = sched_fork(clone_flags, p); if (retval) goto bad_fork_cleanup_policy; retval = perf_event_init_task(p); if (retval) goto bad_fork_cleanup_policy; retval = audit_alloc(p); if (retval) goto bad_fork_cleanup_perf; /* copy all the process information */ shm_init_task(p); retval = security_task_alloc(p, clone_flags); if (retval) goto bad_fork_cleanup_audit; retval = copy_semundo(clone_flags, p); if (retval) goto bad_fork_cleanup_security; retval = copy_files(clone_flags, p); if (retval) goto bad_fork_cleanup_semundo; retval = copy_fs(clone_flags, p); if (retval) goto bad_fork_cleanup_files; retval = copy_sighand(clone_flags, p); if (retval) goto bad_fork_cleanup_fs; retval = copy_signal(clone_flags, p); if (retval) goto bad_fork_cleanup_sighand; retval = copy_mm(clone_flags, p); if (retval) goto bad_fork_cleanup_signal; retval = copy_namespaces(clone_flags, p); if (retval) goto bad_fork_cleanup_mm; retval = copy_io(clone_flags, p); if (retval) goto bad_fork_cleanup_namespaces; retval = copy_thread_tls(clone_flags, stack_start, stack_size, p, tls); if (retval) goto bad_fork_cleanup_io; stackleak_task_init(p); if (pid != &init_struct_pid) { pid = alloc_pid(p->nsproxy->pid_ns_for_children); if (IS_ERR(pid)) { retval = PTR_ERR(pid); goto bad_fork_cleanup_thread; } } #ifdef CONFIG_BLOCK p->plug = NULL; #endif #ifdef CONFIG_FUTEX p->robust_list = NULL; #ifdef CONFIG_COMPAT p->compat_robust_list = NULL; #endif INIT_LIST_HEAD(&p->pi_state_list); p->pi_state_cache = NULL; #endif /* * sigaltstack should be cleared when sharing the same VM */ if ((clone_flags & (CLONE_VM|CLONE_VFORK)) == CLONE_VM) sas_ss_reset(p); /* * Syscall tracing and stepping should be turned off in the * child regardless of CLONE_PTRACE. */ user_disable_single_step(p); clear_tsk_thread_flag(p, TIF_SYSCALL_TRACE); #ifdef TIF_SYSCALL_EMU clear_tsk_thread_flag(p, TIF_SYSCALL_EMU); #endif clear_all_latency_tracing(p); /* ok, now we should be set up.. */ p->pid = pid_nr(pid); if (clone_flags & CLONE_THREAD) { p->exit_signal = -1; p->group_leader = current->group_leader; p->tgid = current->tgid; } else { if (clone_flags & CLONE_PARENT) p->exit_signal = current->group_leader->exit_signal; else p->exit_signal = (clone_flags & CSIGNAL); p->group_leader = p; p->tgid = p->pid; } p->nr_dirtied = 0; p->nr_dirtied_pause = 128 >> (PAGE_SHIFT - 10); p->dirty_paused_when = 0; p->pdeath_signal = 0; INIT_LIST_HEAD(&p->thread_group); p->task_works = NULL; cgroup_threadgroup_change_begin(current); /* * Ensure that the cgroup subsystem policies allow the new process to be * forked. It should be noted the the new process's css_set can be changed * between here and cgroup_post_fork() if an organisation operation is in * progress. */ retval = cgroup_can_fork(p); if (retval) goto bad_fork_free_pid; /* * From this point on we must avoid any synchronous user-space * communication until we take the tasklist-lock. In particular, we do * not want user-space to be able to predict the process start-time by * stalling fork(2) after we recorded the start_time but before it is * visible to the system. */ p->start_time = ktime_get_ns(); p->real_start_time = ktime_get_boot_ns(); /* * Make it visible to the rest of the system, but dont wake it up yet. * Need tasklist lock for parent etc handling! */ write_lock_irq(&tasklist_lock); /* CLONE_PARENT re-uses the old parent */ if (clone_flags & (CLONE_PARENT|CLONE_THREAD)) { p->real_parent = current->real_parent; p->parent_exec_id = current->parent_exec_id; } else { p->real_parent = current; p->parent_exec_id = current->self_exec_id; } klp_copy_process(p); spin_lock(&current->sighand->siglock); /* * Copy seccomp details explicitly here, in case they were changed * before holding sighand lock. */ copy_seccomp(p); rseq_fork(p, clone_flags); /* Don't start children in a dying pid namespace */ if (unlikely(!(ns_of_pid(pid)->pid_allocated & PIDNS_ADDING))) { retval = -ENOMEM; goto bad_fork_cancel_cgroup; } /* Let kill terminate clone/fork in the middle */ if (fatal_signal_pending(current)) { retval = -EINTR; goto bad_fork_cancel_cgroup; } init_task_pid_links(p); if (likely(p->pid)) { ptrace_init_task(p, (clone_flags & CLONE_PTRACE) || trace); init_task_pid(p, PIDTYPE_PID, pid); if (thread_group_leader(p)) { init_task_pid(p, PIDTYPE_TGID, pid); init_task_pid(p, PIDTYPE_PGID, task_pgrp(current)); init_task_pid(p, PIDTYPE_SID, task_session(current)); if (is_child_reaper(pid)) { ns_of_pid(pid)->child_reaper = p; p->signal->flags |= SIGNAL_UNKILLABLE; } p->signal->shared_pending.signal = delayed.signal; p->signal->tty = tty_kref_get(current->signal->tty); /* * Inherit has_child_subreaper flag under the same * tasklist_lock with adding child to the process tree * for propagate_has_child_subreaper optimization. */ p->signal->has_child_subreaper = p->real_parent->signal->has_child_subreaper || p->real_parent->signal->is_child_subreaper; list_add_tail(&p->sibling, &p->real_parent->children); list_add_tail_rcu(&p->tasks, &init_task.tasks); attach_pid(p, PIDTYPE_TGID); attach_pid(p, PIDTYPE_PGID); attach_pid(p, PIDTYPE_SID); __this_cpu_inc(process_counts); } else { current->signal->nr_threads++; atomic_inc(&current->signal->live); atomic_inc(&current->signal->sigcnt); task_join_group_stop(p); list_add_tail_rcu(&p->thread_group, &p->group_leader->thread_group); list_add_tail_rcu(&p->thread_node, &p->signal->thread_head); } attach_pid(p, PIDTYPE_PID); nr_threads++; } total_forks++; hlist_del_init(&delayed.node); spin_unlock(&current->sighand->siglock); syscall_tracepoint_update(p); write_unlock_irq(&tasklist_lock); proc_fork_connector(p); cgroup_post_fork(p); cgroup_threadgroup_change_end(current); perf_event_fork(p); trace_task_newtask(p, clone_flags); uprobe_copy_process(p, clone_flags); return p; bad_fork_cancel_cgroup: spin_unlock(&current->sighand->siglock); write_unlock_irq(&tasklist_lock); cgroup_cancel_fork(p); bad_fork_free_pid: cgroup_threadgroup_change_end(current); if (pid != &init_struct_pid) free_pid(pid); bad_fork_cleanup_thread: exit_thread(p); bad_fork_cleanup_io: if (p->io_context) exit_io_context(p); bad_fork_cleanup_namespaces: exit_task_namespaces(p); bad_fork_cleanup_mm: if (p->mm) mmput(p->mm); bad_fork_cleanup_signal: if (!(clone_flags & CLONE_THREAD)) free_signal_struct(p->signal); bad_fork_cleanup_sighand: __cleanup_sighand(p->sighand); bad_fork_cleanup_fs: exit_fs(p); /* blocking */ bad_fork_cleanup_files: exit_files(p); /* blocking */ bad_fork_cleanup_semundo: exit_sem(p); bad_fork_cleanup_security: security_task_free(p); bad_fork_cleanup_audit: audit_free(p); bad_fork_cleanup_perf: perf_event_free_task(p); bad_fork_cleanup_policy: lockdep_free_task(p); #ifdef CONFIG_NUMA mpol_put(p->mempolicy); bad_fork_cleanup_threadgroup_lock: #endif delayacct_tsk_free(p); bad_fork_cleanup_count: atomic_dec(&p->cred->user->processes); exit_creds(p); bad_fork_free: p->state = TASK_DEAD; put_task_stack(p); free_task(p); fork_out: spin_lock_irq(&current->sighand->siglock); hlist_del_init(&delayed.node); spin_unlock_irq(&current->sighand->siglock); return ERR_PTR(retval); }
0
[ "CWE-362" ]
linux
7b55851367136b1efd84d98fea81ba57a98304cf
252,164,488,133,332,900,000,000,000,000,000,000,000
492
fork: record start_time late This changes the fork(2) syscall to record the process start_time after initializing the basic task structure but still before making the new process visible to user-space. Technically, we could record the start_time anytime during fork(2). But this might lead to scenarios where a start_time is recorded long before a process becomes visible to user-space. For instance, with userfaultfd(2) and TLS, user-space can delay the execution of fork(2) for an indefinite amount of time (and will, if this causes network access, or similar). By recording the start_time late, it much closer reflects the point in time where the process becomes live and can be observed by other processes. Lastly, this makes it much harder for user-space to predict and control the start_time they get assigned. Previously, user-space could fork a process and stall it in copy_thread_tls() before its pid is allocated, but after its start_time is recorded. This can be misused to later-on cycle through PIDs and resume the stalled fork(2) yielding a process that has the same pid and start_time as a process that existed before. This can be used to circumvent security systems that identify processes by their pid+start_time combination. Even though user-space was always aware that start_time recording is flaky (but several projects are known to still rely on start_time-based identification), changing the start_time to be recorded late will help mitigate existing attacks and make it much harder for user-space to control the start_time a process gets assigned. Reported-by: Jann Horn <[email protected]> Signed-off-by: Tom Gundersen <[email protected]> Signed-off-by: David Herrmann <[email protected]> Signed-off-by: Linus Torvalds <[email protected]>
mlx5_tx_dseg_vlan(struct mlx5_txq_data *restrict txq, struct mlx5_txq_local *restrict loc __rte_unused, struct mlx5_wqe_dseg *restrict dseg, uint8_t *buf, unsigned int len, unsigned int olx __rte_unused) { unsigned int part; uint8_t *pdst; assert(len > MLX5_ESEG_MIN_INLINE_SIZE); static_assert(MLX5_DSEG_MIN_INLINE_SIZE == (2 * RTE_ETHER_ADDR_LEN), "invalid Data Segment data size"); if (!MLX5_TXOFF_CONFIG(MPW)) { /* Store the descriptor byte counter for eMPW sessions. */ dseg->bcount = rte_cpu_to_be_32 ((len + sizeof(struct rte_vlan_hdr)) | MLX5_ETH_WQE_DATA_INLINE); pdst = &dseg->inline_data[0]; } else { /* The entire legacy MPW session counter is stored on close. */ pdst = (uint8_t *)dseg; } memcpy(pdst, buf, MLX5_DSEG_MIN_INLINE_SIZE); buf += MLX5_DSEG_MIN_INLINE_SIZE; pdst += MLX5_DSEG_MIN_INLINE_SIZE; len -= MLX5_DSEG_MIN_INLINE_SIZE; /* Insert VLAN ethertype + VLAN tag. Pointer is aligned. */ assert(pdst == RTE_PTR_ALIGN(pdst, MLX5_WSEG_SIZE)); if (unlikely(pdst >= (uint8_t *)txq->wqes_end)) pdst = (uint8_t *)txq->wqes; *(uint32_t *)pdst = rte_cpu_to_be_32((RTE_ETHER_TYPE_VLAN << 16) | loc->mbuf->vlan_tci); pdst += sizeof(struct rte_vlan_hdr); /* * The WQEBB space availability is checked by caller. * Here we should be aware of WQE ring buffer wraparound only. */ part = (uint8_t *)txq->wqes_end - pdst; part = RTE_MIN(part, len); do { rte_memcpy(pdst, buf, part); len -= part; if (likely(!len)) { pdst += part; if (!MLX5_TXOFF_CONFIG(MPW)) pdst = RTE_PTR_ALIGN(pdst, MLX5_WSEG_SIZE); /* Note: no final wraparound check here. */ return (struct mlx5_wqe_dseg *)pdst; } pdst = (uint8_t *)txq->wqes; buf += part; part = len; } while (true); }
0
[]
dpdk-stable
8b090f2664e9d014cd8fa0fde90597aaf4349e7e
274,603,463,083,581,100,000,000,000,000,000,000,000
57
net/mlx5: fix Rx queue recovery mechanism The local variables are getting inconsistent in data receiving routines after queue error recovery. Receive queue consumer index is getting wrong, need to reset one to the size of the queue (as RQ was fully replenished in recovery procedure). In MPRQ case, also the local consumed strd variable should be reset. CVE-2022-28199 Fixes: 88c0733 ("net/mlx5: extend Rx completion with error handling") Signed-off-by: Alexander Kozyrev <[email protected]> Signed-off-by: Matan Azrad <[email protected]>
static int ssl_parse_ecjpake_kkpp( mbedtls_ssl_context *ssl, const unsigned char *buf, size_t len ) { int ret; if( ssl->transform_negotiate->ciphersuite_info->key_exchange != MBEDTLS_KEY_EXCHANGE_ECJPAKE ) { MBEDTLS_SSL_DEBUG_MSG( 3, ( "skip ecjpake kkpp extension" ) ); return( 0 ); } /* If we got here, we no longer need our cached extension */ mbedtls_free( ssl->handshake->ecjpake_cache ); ssl->handshake->ecjpake_cache = NULL; ssl->handshake->ecjpake_cache_len = 0; if( ( ret = mbedtls_ecjpake_read_round_one( &ssl->handshake->ecjpake_ctx, buf, len ) ) != 0 ) { MBEDTLS_SSL_DEBUG_RET( 1, "mbedtls_ecjpake_read_round_one", ret ); mbedtls_ssl_send_alert_message( ssl, MBEDTLS_SSL_ALERT_LEVEL_FATAL, MBEDTLS_SSL_ALERT_MSG_HANDSHAKE_FAILURE ); return( ret ); } return( 0 ); }
0
[ "CWE-119", "CWE-125", "CWE-295" ]
mbedtls
a1098f81c252b317ad34ea978aea2bc47760b215
38,321,314,133,837,947,000,000,000,000,000,000,000
29
Add bounds check before signature length read
void onDataInternal(Buffer::Instance& data) { buffer_.add(data); while (true) { if (size_ == 0) { uint16_t size_n; if (buffer_.length() < sizeof(size_n)) { // If we don't have enough bytes to determine size, wait until we do. return; } void* mem = buffer_.linearize(sizeof(size_n)); std::memcpy(reinterpret_cast<void*>(&size_n), mem, sizeof(size_n)); buffer_.drain(sizeof(size_n)); size_ = ntohs(size_n); } if (buffer_.length() < size_) { // If we don't have enough bytes to read the complete query, wait until // we do. return; } // Expect requests to be small, so stack allocation is fine for test code. unsigned char* request = static_cast<unsigned char*>(buffer_.linearize(size_)); // Only expecting a single question. ASSERT_EQ(1, DNS_HEADER_QDCOUNT(request)); // Decode the question and perform lookup. const unsigned char* question = request + HFIXEDSZ; // The number of bytes the encoded question name takes up in the request. // Useful in the response when generating resource records containing the // name. long name_len; // Get host name from query and use the name to lookup a record // in a host map. If the query type is of type A, then perform the lookup in // the hosts_a_ host map. If the query type is of type AAAA, then perform the // lookup in the `hosts_aaaa_` host map. char* name; ASSERT_EQ(ARES_SUCCESS, ares_expand_name(question, request, size_, &name, &name_len)); const std::list<std::string>* ips = nullptr; // We only expect resources of type A or AAAA. const int q_type = DNS_QUESTION_TYPE(question + name_len); std::string cname; // check if we have a cname. If so, we will need to send a response element with the cname // and lookup the ips of the cname and send back those ips (if any) too auto cit = parent_.cnames_.find(name); if (cit != parent_.cnames_.end()) { cname = cit->second; } const char* hostLookup = name; const unsigned char* ip_question = question; long ip_name_len = name_len; std::string encodedCname; if (!cname.empty()) { ASSERT_TRUE(cname.size() <= 253); hostLookup = cname.c_str(); encodedCname = TestDnsServerQuery::encodeDnsName(cname); ip_question = reinterpret_cast<const unsigned char*>(encodedCname.c_str()); ip_name_len = encodedCname.size() + 1; //+1 as we need to include the final null terminator } ASSERT_TRUE(q_type == T_A || q_type == T_AAAA); if (q_type == T_A) { auto it = parent_.hosts_a_.find(hostLookup); if (it != parent_.hosts_a_.end()) { ips = &it->second; } } else { auto it = parent_.hosts_aaaa_.find(hostLookup); if (it != parent_.hosts_aaaa_.end()) { ips = &it->second; } } ares_free_string(name); int answer_size = ips != nullptr ? ips->size() : 0; answer_size += !encodedCname.empty() ? 1 : 0; // The response begins with the initial part of the request // (including the question section). const size_t response_base_len = HFIXEDSZ + name_len + QFIXEDSZ; absl::FixedArray<unsigned char> response_buf(response_base_len); unsigned char* response_base = response_buf.begin(); memcpy(response_base, request, response_base_len); DNS_HEADER_SET_QR(response_base, 1); DNS_HEADER_SET_AA(response_base, 0); if (parent_.refused_) { DNS_HEADER_SET_RCODE(response_base, REFUSED); } else { DNS_HEADER_SET_RCODE(response_base, answer_size > 0 ? NOERROR : NXDOMAIN); } DNS_HEADER_SET_ANCOUNT(response_base, answer_size); DNS_HEADER_SET_NSCOUNT(response_base, 0); DNS_HEADER_SET_ARCOUNT(response_base, 0); // Total response size will be computed according to cname response size + ip response sizes size_t response_ip_rest_len; if (q_type == T_A) { response_ip_rest_len = ips != nullptr ? ips->size() * (ip_name_len + RRFIXEDSZ + sizeof(in_addr)) : 0; } else { response_ip_rest_len = ips != nullptr ? ips->size() * (ip_name_len + RRFIXEDSZ + sizeof(in6_addr)) : 0; } size_t response_cname_len = !encodedCname.empty() ? name_len + RRFIXEDSZ + encodedCname.size() + 1 : 0; const uint16_t response_size_n = htons(response_base_len + response_ip_rest_len + response_cname_len); Buffer::OwnedImpl write_buffer; // Write response header write_buffer.add(&response_size_n, sizeof(response_size_n)); write_buffer.add(response_base, response_base_len); // if we have a cname, create a resource record if (!encodedCname.empty()) { unsigned char cname_rr_fixed[RRFIXEDSZ]; DNS_RR_SET_TYPE(cname_rr_fixed, T_CNAME); DNS_RR_SET_LEN(cname_rr_fixed, encodedCname.size() + 1); DNS_RR_SET_CLASS(cname_rr_fixed, C_IN); DNS_RR_SET_TTL(cname_rr_fixed, parent_.record_ttl_.count()); write_buffer.add(question, name_len); write_buffer.add(cname_rr_fixed, RRFIXEDSZ); write_buffer.add(encodedCname.c_str(), encodedCname.size() + 1); } // Create a resource record for each IP found in the host map. unsigned char response_rr_fixed[RRFIXEDSZ]; if (q_type == T_A) { DNS_RR_SET_TYPE(response_rr_fixed, T_A); DNS_RR_SET_LEN(response_rr_fixed, sizeof(in_addr)); } else { DNS_RR_SET_TYPE(response_rr_fixed, T_AAAA); DNS_RR_SET_LEN(response_rr_fixed, sizeof(in6_addr)); } DNS_RR_SET_CLASS(response_rr_fixed, C_IN); DNS_RR_SET_TTL(response_rr_fixed, parent_.record_ttl_.count()); if (ips != nullptr) { for (const auto& it : *ips) { write_buffer.add(ip_question, ip_name_len); write_buffer.add(response_rr_fixed, RRFIXEDSZ); if (q_type == T_A) { in_addr addr; ASSERT_EQ(1, inet_pton(AF_INET, it.c_str(), &addr)); write_buffer.add(&addr, sizeof(addr)); } else { in6_addr addr; ASSERT_EQ(1, inet_pton(AF_INET6, it.c_str(), &addr)); write_buffer.add(&addr, sizeof(addr)); } } } parent_.connection_->write(write_buffer, false); // Reset query state, time for the next one. buffer_.drain(size_); size_ = 0; } }
0
[ "CWE-400" ]
envoy
542f84c66e9f6479bc31c6f53157c60472b25240
284,957,438,943,359,600,000,000,000,000,000,000,000
155
overload: Runtime configurable global connection limits (#147) Signed-off-by: Tony Allen <[email protected]>
struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb, struct ip_options *opt) { struct tcp_options_received tcp_opt; u8 *hash_location; struct inet_request_sock *ireq; struct tcp_request_sock *treq; struct tcp_sock *tp = tcp_sk(sk); const struct tcphdr *th = tcp_hdr(skb); __u32 cookie = ntohl(th->ack_seq) - 1; struct sock *ret = sk; struct request_sock *req; int mss; struct rtable *rt; __u8 rcv_wscale; bool ecn_ok; if (!sysctl_tcp_syncookies || !th->ack || th->rst) goto out; if (tcp_synq_no_recent_overflow(sk) || (mss = cookie_check(skb, cookie)) == 0) { NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SYNCOOKIESFAILED); goto out; } NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SYNCOOKIESRECV); /* check for timestamp cookie support */ memset(&tcp_opt, 0, sizeof(tcp_opt)); tcp_parse_options(skb, &tcp_opt, &hash_location, 0); if (!cookie_check_timestamp(&tcp_opt, &ecn_ok)) goto out; ret = NULL; req = inet_reqsk_alloc(&tcp_request_sock_ops); /* for safety */ if (!req) goto out; ireq = inet_rsk(req); treq = tcp_rsk(req); treq->rcv_isn = ntohl(th->seq) - 1; treq->snt_isn = cookie; req->mss = mss; ireq->loc_port = th->dest; ireq->rmt_port = th->source; ireq->loc_addr = ip_hdr(skb)->daddr; ireq->rmt_addr = ip_hdr(skb)->saddr; ireq->ecn_ok = ecn_ok; ireq->snd_wscale = tcp_opt.snd_wscale; ireq->sack_ok = tcp_opt.sack_ok; ireq->wscale_ok = tcp_opt.wscale_ok; ireq->tstamp_ok = tcp_opt.saw_tstamp; req->ts_recent = tcp_opt.saw_tstamp ? tcp_opt.rcv_tsval : 0; /* We throwed the options of the initial SYN away, so we hope * the ACK carries the same options again (see RFC1122 4.2.3.8) */ if (opt && opt->optlen) { int opt_size = sizeof(struct ip_options) + opt->optlen; ireq->opt = kmalloc(opt_size, GFP_ATOMIC); if (ireq->opt != NULL && ip_options_echo(ireq->opt, skb)) { kfree(ireq->opt); ireq->opt = NULL; } } if (security_inet_conn_request(sk, skb, req)) { reqsk_free(req); goto out; } req->expires = 0UL; req->retrans = 0; /* * We need to lookup the route here to get at the correct * window size. We should better make sure that the window size * hasn't changed since we received the original syn, but I see * no easy way to do this. */ { struct flowi4 fl4; flowi4_init_output(&fl4, 0, sk->sk_mark, RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE, IPPROTO_TCP, inet_sk_flowi_flags(sk), (opt && opt->srr) ? opt->faddr : ireq->rmt_addr, ireq->loc_addr, th->source, th->dest); security_req_classify_flow(req, flowi4_to_flowi(&fl4)); rt = ip_route_output_key(sock_net(sk), &fl4); if (IS_ERR(rt)) { reqsk_free(req); goto out; } } /* Try to redo what tcp_v4_send_synack did. */ req->window_clamp = tp->window_clamp ? :dst_metric(&rt->dst, RTAX_WINDOW); tcp_select_initial_window(tcp_full_space(sk), req->mss, &req->rcv_wnd, &req->window_clamp, ireq->wscale_ok, &rcv_wscale, dst_metric(&rt->dst, RTAX_INITRWND)); ireq->rcv_wscale = rcv_wscale; ret = get_cookie_sock(sk, skb, req, &rt->dst); out: return ret; }
1
[ "CWE-362" ]
linux-2.6
f6d8bd051c391c1c0458a30b2a7abcd939329259
148,163,105,186,983,800,000,000,000,000,000,000,000
112
inet: add RCU protection to inet->opt We lack proper synchronization to manipulate inet->opt ip_options Problem is ip_make_skb() calls ip_setup_cork() and ip_setup_cork() possibly makes a copy of ipc->opt (struct ip_options), without any protection against another thread manipulating inet->opt. Another thread can change inet->opt pointer and free old one under us. Use RCU to protect inet->opt (changed to inet->inet_opt). Instead of handling atomic refcounts, just copy ip_options when necessary, to avoid cache line dirtying. We cant insert an rcu_head in struct ip_options since its included in skb->cb[], so this patch is large because I had to introduce a new ip_options_rcu structure. Signed-off-by: Eric Dumazet <[email protected]> Cc: Herbert Xu <[email protected]> Signed-off-by: David S. Miller <[email protected]>
static void mmtimer_set_next_timer(int nodeid) { struct mmtimer_node *n = &timers[nodeid]; struct mmtimer *x; struct k_itimer *t; int o; restart: if (n->next == NULL) return; x = rb_entry(n->next, struct mmtimer, list); t = x->timer; if (!t->it.mmtimer.incr) { /* Not an interval timer */ if (!mmtimer_setup(x->cpu, COMPARATOR, t->it.mmtimer.expires)) { /* Late setup, fire now */ tasklet_schedule(&n->tasklet); } return; } /* Interval timer */ o = 0; while (!mmtimer_setup(x->cpu, COMPARATOR, t->it.mmtimer.expires)) { unsigned long e, e1; struct rb_node *next; t->it.mmtimer.expires += t->it.mmtimer.incr << o; t->it_overrun += 1 << o; o++; if (o > 20) { printk(KERN_ALERT "mmtimer: cannot reschedule timer\n"); t->it.mmtimer.clock = TIMER_OFF; n->next = rb_next(&x->list); rb_erase(&x->list, &n->timer_head); kfree(x); goto restart; } e = t->it.mmtimer.expires; next = rb_next(&x->list); if (next == NULL) continue; e1 = rb_entry(next, struct mmtimer, list)-> timer->it.mmtimer.expires; if (e > e1) { n->next = next; rb_erase(&x->list, &n->timer_head); mmtimer_add_list(x); goto restart; } } }
0
[ "CWE-189" ]
linux
f8bd2258e2d520dff28c855658bd24bdafb5102d
313,441,642,630,882,140,000,000,000,000,000,000,000
56
remove div_long_long_rem x86 is the only arch right now, which provides an optimized for div_long_long_rem and it has the downside that one has to be very careful that the divide doesn't overflow. The API is a little akward, as the arguments for the unsigned divide are signed. The signed version also doesn't handle a negative divisor and produces worse code on 64bit archs. There is little incentive to keep this API alive, so this converts the few users to the new API. Signed-off-by: Roman Zippel <[email protected]> Cc: Ralf Baechle <[email protected]> Cc: Ingo Molnar <[email protected]> Cc: Thomas Gleixner <[email protected]> Cc: john stultz <[email protected]> Cc: Christoph Lameter <[email protected]> Signed-off-by: Andrew Morton <[email protected]> Signed-off-by: Linus Torvalds <[email protected]>
uint32_t crypt_get_compatibility(struct crypt_device *cd) { if (cd) return cd->compatibility; return 0; }
0
[ "CWE-345" ]
cryptsetup
0113ac2d889c5322659ad0596d4cfc6da53e356c
90,062,438,258,234,490,000,000,000,000,000,000,000
7
Fix CVE-2021-4122 - LUKS2 reencryption crash recovery attack Fix possible attacks against data confidentiality through LUKS2 online reencryption extension crash recovery. An attacker can modify on-disk metadata to simulate decryption in progress with crashed (unfinished) reencryption step and persistently decrypt part of the LUKS device. This attack requires repeated physical access to the LUKS device but no knowledge of user passphrases. The decryption step is performed after a valid user activates the device with a correct passphrase and modified metadata. There are no visible warnings for the user that such recovery happened (except using the luksDump command). The attack can also be reversed afterward (simulating crashed encryption from a plaintext) with possible modification of revealed plaintext. The problem was caused by reusing a mechanism designed for actual reencryption operation without reassessing the security impact for new encryption and decryption operations. While the reencryption requires calculating and verifying both key digests, no digest was needed to initiate decryption recovery if the destination is plaintext (no encryption key). Also, some metadata (like encryption cipher) is not protected, and an attacker could change it. Note that LUKS2 protects visible metadata only when a random change occurs. It does not protect against intentional modification but such modification must not cause a violation of data confidentiality. The fix introduces additional digest protection of reencryption metadata. The digest is calculated from known keys and critical reencryption metadata. Now an attacker cannot create correct metadata digest without knowledge of a passphrase for used keyslots. For more details, see LUKS2 On-Disk Format Specification version 1.1.0.
static void vvc_compute_poc(VVCSliceInfo *si) { u32 max_poc_lsb = 1 << (si->sps->log2_max_poc_lsb); /*POC reset for IDR frames, NOT for CRA*/ if (si->irap_or_gdr_pic && !si->gdr_pic) { si->poc_lsb_prev = 0; si->poc_msb_prev = 0; } if (si->poc_msb_cycle_present_flag) { si->poc_msb = si->poc_msb_cycle; } else { if ((si->poc_lsb < si->poc_lsb_prev) && (si->poc_lsb_prev - si->poc_lsb >= max_poc_lsb / 2)) si->poc_msb = si->poc_msb_prev + max_poc_lsb; else if ((si->poc_lsb > si->poc_lsb_prev) && (si->poc_lsb - si->poc_lsb_prev > max_poc_lsb / 2)) si->poc_msb = si->poc_msb_prev - max_poc_lsb; else si->poc_msb = si->poc_msb_prev; } si->poc = si->poc_msb + si->poc_lsb; }
0
[ "CWE-190", "CWE-787" ]
gpac
51cdb67ff7c5f1242ac58c5aa603ceaf1793b788
335,142,549,690,232,950,000,000,000,000,000,000,000
23
add safety in avc/hevc/vvc sps/pps/vps ID check - cf #1720 #1721 #1722
static void cli_server_init_globals(zend_cli_server_globals *cg TSRMLS_DC) { cg->color = 0; }
0
[]
php-src
2438490addfbfba51e12246a74588b2382caa08a
44,283,818,163,537,060,000,000,000,000,000,000,000
4
slim post data
static inline bool is_invalid_opcode(u32 intr_info) { return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK | INTR_INFO_VALID_MASK)) == (INTR_TYPE_HARD_EXCEPTION | UD_VECTOR | INTR_INFO_VALID_MASK); }
0
[ "CWE-400" ]
linux-2.6
9581d442b9058d3699b4be568b6e5eae38a41493
107,657,002,480,146,560,000,000,000,000,000,000,000
6
KVM: Fix fs/gs reload oops with invalid ldt kvm reloads the host's fs and gs blindly, however the underlying segment descriptors may be invalid due to the user modifying the ldt after loading them. Fix by using the safe accessors (loadsegment() and load_gs_index()) instead of home grown unsafe versions. This is CVE-2010-3698. KVM-Stable-Tag. Signed-off-by: Avi Kivity <[email protected]> Signed-off-by: Marcelo Tosatti <[email protected]>
UsbHubResetPort ( IN USB_INTERFACE *HubIf, IN UINT8 Port ) { EFI_USB_PORT_STATUS PortState; UINTN Index; EFI_STATUS Status; Status = UsbHubSetPortFeature (HubIf, Port, (EFI_USB_PORT_FEATURE) USB_HUB_PORT_RESET); if (EFI_ERROR (Status)) { return Status; } // // Drive the reset signal for worst 20ms. Check USB 2.0 Spec // section 7.1.7.5 for timing requirements. // gBS->Stall (USB_SET_PORT_RESET_STALL); // // Check USB_PORT_STAT_C_RESET bit to see if the resetting state is done. // ZeroMem (&PortState, sizeof (EFI_USB_PORT_STATUS)); for (Index = 0; Index < USB_WAIT_PORT_STS_CHANGE_LOOP; Index++) { Status = UsbHubGetPortStatus (HubIf, Port, &PortState); if (EFI_ERROR (Status)) { return Status; } if (!EFI_ERROR (Status) && USB_BIT_IS_SET (PortState.PortChangeStatus, USB_PORT_STAT_C_RESET)) { gBS->Stall (USB_SET_PORT_RECOVERY_STALL); return EFI_SUCCESS; } gBS->Stall (USB_WAIT_PORT_STS_CHANGE_STALL); } return EFI_TIMEOUT; }
0
[ "CWE-787" ]
edk2
acebdf14c985c5c9f50b37ece0b15ada87767359
225,719,403,125,446,950,000,000,000,000,000,000,000
44
MdeModulePkg UsbBusDxe: Fix wrong buffer length used to read hub desc REF: https://bugzilla.tianocore.org/show_bug.cgi?id=973 HUB descriptor has variable length. But the code uses stack (HubDesc in UsbHubInit) with fixed length sizeof(EFI_USB_HUB_DESCRIPTOR) to hold HUB descriptor data. It uses hard code length value (32 that is greater than sizeof(EFI_USB_HUB_DESCRIPTOR)) for SuperSpeed path, then there will be stack overflow when IOMMU is enabled because the Unmap operation will copy the data from device buffer to host buffer. And it uses HubDesc->Length for none SuperSpeed path, then there will be stack overflow when HubDesc->Length is greater than sizeof(EFI_USB_HUB_DESCRIPTOR). The patch updates the code to use a big enough buffer to hold the descriptor data. The definition EFI_USB_SUPER_SPEED_HUB_DESCRIPTOR is wrong (HubDelay field should be UINT16 type) and no code is using it, the patch removes it. Cc: Jiewen Yao <[email protected]> Cc: Ruiyu Ni <[email protected]> Cc: Bret Barkelew <[email protected]> Contributed-under: TianoCore Contribution Agreement 1.1 Signed-off-by: Star Zeng <[email protected]> Reviewed-by: Bret Barkelew <[email protected]>
stack_double(int is_alloca, char** arg_alloc_base, OnigStackType** arg_stk_base, OnigStackType** arg_stk_end, OnigStackType** arg_stk, OnigMatchArg* msa) { unsigned int n; int used; size_t size; size_t new_size; char* alloc_base; char* new_alloc_base; OnigStackType *stk_base, *stk_end, *stk; alloc_base = *arg_alloc_base; stk_base = *arg_stk_base; stk_end = *arg_stk_end; stk = *arg_stk; n = stk_end - stk_base; size = sizeof(OnigStackIndex) * msa->ptr_num + sizeof(OnigStackType) * n; n *= 2; new_size = sizeof(OnigStackIndex) * msa->ptr_num + sizeof(OnigStackType) * n; if (is_alloca != 0) { new_alloc_base = (char* )xmalloc(new_size); if (IS_NULL(new_alloc_base)) { STACK_SAVE; return ONIGERR_MEMORY; } xmemcpy(new_alloc_base, alloc_base, size); } else { if (MatchStackLimitSize != 0 && n > MatchStackLimitSize) { if ((unsigned int )(stk_end - stk_base) == MatchStackLimitSize) return ONIGERR_MATCH_STACK_LIMIT_OVER; else n = MatchStackLimitSize; } new_alloc_base = (char* )xrealloc(alloc_base, new_size); if (IS_NULL(new_alloc_base)) { STACK_SAVE; return ONIGERR_MEMORY; } } alloc_base = new_alloc_base; used = stk - stk_base; *arg_alloc_base = alloc_base; *arg_stk_base = (OnigStackType* )(alloc_base + (sizeof(OnigStackIndex) * msa->ptr_num)); *arg_stk = *arg_stk_base + used; *arg_stk_end = *arg_stk_base + n; return 0; }
0
[ "CWE-125" ]
oniguruma
690313a061f7a4fa614ec5cc8368b4f2284e059b
109,418,410,253,903,950,000,000,000,000,000,000,000
53
fix #57 : DATA_ENSURE() check must be before data access
static int mov_read_saiz(MOVContext *c, AVIOContext *pb, MOVAtom atom) { MOVEncryptionIndex *encryption_index; MOVStreamContext *sc; int ret; unsigned int sample_count, aux_info_type, aux_info_param; ret = get_current_encryption_info(c, &encryption_index, &sc); if (ret != 1) return ret; if (encryption_index->nb_encrypted_samples) { // This can happen if we have both saio/saiz and senc atoms. av_log(c->fc, AV_LOG_DEBUG, "Ignoring duplicate encryption info in saiz\n"); return 0; } if (encryption_index->auxiliary_info_sample_count) { av_log(c->fc, AV_LOG_ERROR, "Duplicate saiz atom\n"); return AVERROR_INVALIDDATA; } avio_r8(pb); /* version */ if (avio_rb24(pb) & 0x01) { /* flags */ aux_info_type = avio_rb32(pb); aux_info_param = avio_rb32(pb); if (sc->cenc.default_encrypted_sample) { if (aux_info_type != sc->cenc.default_encrypted_sample->scheme) { av_log(c->fc, AV_LOG_DEBUG, "Ignoring saiz box with non-zero aux_info_type\n"); return 0; } if (aux_info_param != 0) { av_log(c->fc, AV_LOG_DEBUG, "Ignoring saiz box with non-zero aux_info_type_parameter\n"); return 0; } } else { // Didn't see 'schm' or 'tenc', so this isn't encrypted. if ((aux_info_type == MKBETAG('c','e','n','c') || aux_info_type == MKBETAG('c','e','n','s') || aux_info_type == MKBETAG('c','b','c','1') || aux_info_type == MKBETAG('c','b','c','s')) && aux_info_param == 0) { av_log(c->fc, AV_LOG_ERROR, "Saw encrypted saiz without schm/tenc\n"); return AVERROR_INVALIDDATA; } else { return 0; } } } else if (!sc->cenc.default_encrypted_sample) { // Didn't see 'schm' or 'tenc', so this isn't encrypted. return 0; } encryption_index->auxiliary_info_default_size = avio_r8(pb); sample_count = avio_rb32(pb); encryption_index->auxiliary_info_sample_count = sample_count; if (encryption_index->auxiliary_info_default_size == 0) { ret = mov_try_read_block(pb, sample_count, &encryption_index->auxiliary_info_sizes); if (ret < 0) { av_log(c->fc, AV_LOG_ERROR, "Failed to read the auxiliary info\n"); return ret; } } if (encryption_index->auxiliary_offsets_count) { return mov_parse_auxiliary_info(c, sc, pb, encryption_index); } return 0; }
0
[ "CWE-703" ]
FFmpeg
c953baa084607dd1d84c3bfcce3cf6a87c3e6e05
306,737,486,190,364,260,000,000,000,000,000,000,000
71
avformat/mov: Check count sums in build_open_gop_key_points() Fixes: ffmpeg.md Fixes: Out of array access Fixes: CVE-2022-2566 Found-by: Andy Nguyen <[email protected]> Found-by: 3pvd <[email protected]> Reviewed-by: Andy Nguyen <[email protected]> Signed-off-by: Michael Niedermayer <[email protected]>