func
stringlengths
0
484k
target
int64
0
1
cwe
listlengths
0
4
project
stringclasses
799 values
commit_id
stringlengths
40
40
hash
float64
1,215,700,430,453,689,100,000,000B
340,281,914,521,452,260,000,000,000,000B
size
int64
1
24k
message
stringlengths
0
13.3k
static GF_Err gf_isom_set_edit_internal(GF_ISOFile *movie, u32 trackNumber, u64 EditTime, u64 EditDuration, u64 MediaTime, u32 media_rate, GF_ISOEditType EditMode) { GF_TrackBox *trak; GF_EditBox *edts; GF_EditListBox *elst; GF_EdtsEntry *ent, *newEnt; u32 i; GF_Err e; u64 startTime; e = CanAccessMovie(movie, GF_ISOM_OPEN_WRITE); if (e) return e; trak = gf_isom_get_track_from_file(movie, trackNumber); if (!trak) return GF_BAD_PARAM; edts = trak->editBox; if (! edts) { edts = (GF_EditBox *) gf_isom_box_new_parent(&trak->child_boxes, GF_ISOM_BOX_TYPE_EDTS); if (!edts) return GF_OUT_OF_MEM; trak_on_child_box((GF_Box*)trak, (GF_Box *)edts, GF_FALSE); } elst = edts->editList; if (!elst) { elst = (GF_EditListBox *) gf_isom_box_new_parent(&edts->child_boxes, GF_ISOM_BOX_TYPE_ELST); if (!elst) return GF_OUT_OF_MEM; edts_on_child_box((GF_Box*)edts, (GF_Box *)elst, GF_FALSE); } startTime = 0; ent = NULL; //get the prev entry to this startTime if any i=0; while ((ent = (GF_EdtsEntry *)gf_list_enum(elst->entryList, &i))) { if ( (startTime <= EditTime) && (startTime + ent->segmentDuration > EditTime) ) goto found; startTime += ent->segmentDuration; } //not found, add a new entry, insert empty one if gap if (!ent) { Bool empty_inserted = GF_FALSE; if (startTime != EditTime) { newEnt = CreateEditEntry(EditTime - startTime, 0, 0, GF_ISOM_EDIT_EMPTY); if (!newEnt) return GF_OUT_OF_MEM; empty_inserted = GF_TRUE; gf_list_add(elst->entryList, newEnt); } newEnt = CreateEditEntry(EditDuration, MediaTime, media_rate, EditMode); if (!newEnt) return GF_OUT_OF_MEM; gf_list_add(elst->entryList, newEnt); e = SetTrackDuration(trak); if (e) return e; return empty_inserted ? GF_EOS : GF_OK; } startTime -= ent->segmentDuration; found: //if same time, we erase the current one... if (startTime == EditTime) { ent->segmentDuration = EditDuration; switch (EditMode) { case GF_ISOM_EDIT_EMPTY: ent->mediaRate = 0x10000; ent->mediaTime = -1; break; case GF_ISOM_EDIT_DWELL: ent->mediaRate = 0; ent->mediaTime = MediaTime; break; default: ent->mediaRate = media_rate; ent->mediaTime = MediaTime; break; } return SetTrackDuration(trak); } //adjust so that the prev ent leads to EntryTime //Note: we don't change the next one as it is unknown to us in //a lot of case (the author's changes) ent->segmentDuration = EditTime - startTime; newEnt = CreateEditEntry(EditDuration, MediaTime, media_rate, EditMode); if (!newEnt) return GF_OUT_OF_MEM; //is it the last entry ??? if (i >= gf_list_count(elst->entryList) - 1) { //add the new entry at the end gf_list_add(elst->entryList, newEnt); return SetTrackDuration(trak); } else { //insert after the current entry (which is i) gf_list_insert(elst->entryList, newEnt, i+1); return SetTrackDuration(trak); } }
0
[ "CWE-476" ]
gpac
586e817dcd531bb3e75438390f1f753cfe6e940a
264,903,762,518,844,400,000,000,000,000,000,000,000
97
fixed #2046
void Skip(cmsIT8* it8, SYMBOL sy) { if (it8->sy == sy && it8->sy != SEOF) InSymbol(it8); }
0
[]
Little-CMS
65e2f1df3495edc984f7e0d7b7b24e29d851e240
285,313,160,948,587,920,000,000,000,000,000,000,000
5
Fix some warnings from static analysis
index_build (p11_index *index, CK_OBJECT_HANDLE handle, CK_ATTRIBUTE **attrs, CK_ATTRIBUTE *merge) { CK_ATTRIBUTE *extra = NULL; CK_ATTRIBUTE *built; p11_array *stack = NULL; CK_ULONG count; CK_ULONG nattrs; CK_ULONG nmerge; CK_ULONG nextra; CK_RV rv; int i; rv = index->build (index->data, index, *attrs, merge, &extra); if (rv != CKR_OK) return rv; /* Short circuit when nothing to merge */ if (*attrs == NULL && extra == NULL) { built = merge; stack = NULL; } else { stack = p11_array_new (NULL); nattrs = p11_attrs_count (*attrs); nmerge = p11_attrs_count (merge); nextra = p11_attrs_count (extra); /* Make a shallow copy of the combined attributes for validation */ built = calloc (nmerge + nattrs + nextra + 1, sizeof (CK_ATTRIBUTE)); return_val_if_fail (built != NULL, CKR_GENERAL_ERROR); count = nmerge; memcpy (built, merge, sizeof (CK_ATTRIBUTE) * nmerge); p11_array_push (stack, merge); merge_attrs (built, &count, *attrs, nattrs, stack); merge_attrs (built, &count, extra, nextra, stack); /* The terminator attribute */ built[count].type = CKA_INVALID; assert (p11_attrs_terminator (built + count)); } rv = index->store (index->data, index, handle, &built); if (rv == CKR_OK) { for (i = 0; stack && i < stack->num; i++) free (stack->elem[i]); *attrs = built; } else { p11_attrs_free (extra); free (built); } p11_array_free (stack); return rv; }
0
[ "CWE-190" ]
p11-kit
5307a1d21a50cacd06f471a873a018d23ba4b963
38,157,472,885,762,620,000,000,000,000,000,000,000
59
Check for arithmetic overflows before allocating
CURLcode Curl_http_resume(struct Curl_easy *data, struct connectdata *conn, Curl_HttpReq httpreq) { if((HTTPREQ_POST == httpreq || HTTPREQ_PUT == httpreq) && data->state.resume_from) { /********************************************************************** * Resuming upload in HTTP means that we PUT or POST and that we have * got a resume_from value set. The resume value has already created * a Range: header that will be passed along. We need to "fast forward" * the file the given number of bytes and decrease the assume upload * file size before we continue this venture in the dark lands of HTTP. * Resuming mime/form posting at an offset > 0 has no sense and is ignored. *********************************************************************/ if(data->state.resume_from < 0) { /* * This is meant to get the size of the present remote-file by itself. * We don't support this now. Bail out! */ data->state.resume_from = 0; } if(data->state.resume_from && !data->state.this_is_a_follow) { /* do we still game? */ /* Now, let's read off the proper amount of bytes from the input. */ int seekerr = CURL_SEEKFUNC_CANTSEEK; if(conn->seek_func) { Curl_set_in_callback(data, true); seekerr = conn->seek_func(conn->seek_client, data->state.resume_from, SEEK_SET); Curl_set_in_callback(data, false); } if(seekerr != CURL_SEEKFUNC_OK) { curl_off_t passed = 0; if(seekerr != CURL_SEEKFUNC_CANTSEEK) { failf(data, "Could not seek stream"); return CURLE_READ_ERROR; } /* when seekerr == CURL_SEEKFUNC_CANTSEEK (can't seek to offset) */ do { size_t readthisamountnow = (data->state.resume_from - passed > data->set.buffer_size) ? (size_t)data->set.buffer_size : curlx_sotouz(data->state.resume_from - passed); size_t actuallyread = data->state.fread_func(data->state.buffer, 1, readthisamountnow, data->state.in); passed += actuallyread; if((actuallyread == 0) || (actuallyread > readthisamountnow)) { /* this checks for greater-than only to make sure that the CURL_READFUNC_ABORT return code still aborts */ failf(data, "Could only read %" CURL_FORMAT_CURL_OFF_T " bytes from the input", passed); return CURLE_READ_ERROR; } } while(passed < data->state.resume_from); } /* now, decrease the size of the read */ if(data->state.infilesize>0) { data->state.infilesize -= data->state.resume_from; if(data->state.infilesize <= 0) { failf(data, "File already completely uploaded"); return CURLE_PARTIAL_FILE; } } /* we've passed, proceed as normal */ } } return CURLE_OK; }
0
[]
curl
48d7064a49148f03942380967da739dcde1cdc24
103,460,146,676,619,950,000,000,000,000,000,000,000
79
cookie: apply limits - Send no more than 150 cookies per request - Cap the max length used for a cookie: header to 8K - Cap the max number of received Set-Cookie: headers to 50 Bug: https://curl.se/docs/CVE-2022-32205.html CVE-2022-32205 Reported-by: Harry Sintonen Closes #9048
static struct mnt_namespace *create_mnt_ns(struct vfsmount *m) { struct mnt_namespace *new_ns = alloc_mnt_ns(&init_user_ns); if (!IS_ERR(new_ns)) { struct mount *mnt = real_mount(m); mnt->mnt_ns = new_ns; new_ns->root = mnt; list_add(&mnt->mnt_list, &new_ns->list); } else { mntput(m); } return new_ns; }
1
[ "CWE-400", "CWE-703" ]
linux
d29216842a85c7970c536108e093963f02714498
72,825,772,552,645,460,000,000,000,000,000,000,000
13
mnt: Add a per mount namespace limit on the number of mounts CAI Qian <[email protected]> pointed out that the semantics of shared subtrees make it possible to create an exponentially increasing number of mounts in a mount namespace. mkdir /tmp/1 /tmp/2 mount --make-rshared / for i in $(seq 1 20) ; do mount --bind /tmp/1 /tmp/2 ; done Will create create 2^20 or 1048576 mounts, which is a practical problem as some people have managed to hit this by accident. As such CVE-2016-6213 was assigned. Ian Kent <[email protected]> described the situation for autofs users as follows: > The number of mounts for direct mount maps is usually not very large because of > the way they are implemented, large direct mount maps can have performance > problems. There can be anywhere from a few (likely case a few hundred) to less > than 10000, plus mounts that have been triggered and not yet expired. > > Indirect mounts have one autofs mount at the root plus the number of mounts that > have been triggered and not yet expired. > > The number of autofs indirect map entries can range from a few to the common > case of several thousand and in rare cases up to between 30000 and 50000. I've > not heard of people with maps larger than 50000 entries. > > The larger the number of map entries the greater the possibility for a large > number of active mounts so it's not hard to expect cases of a 1000 or somewhat > more active mounts. So I am setting the default number of mounts allowed per mount namespace at 100,000. This is more than enough for any use case I know of, but small enough to quickly stop an exponential increase in mounts. Which should be perfect to catch misconfigurations and malfunctioning programs. For anyone who needs a higher limit this can be changed by writing to the new /proc/sys/fs/mount-max sysctl. Tested-by: CAI Qian <[email protected]> Signed-off-by: "Eric W. Biederman" <[email protected]>
int64 GetDnnWorkspaceLimit(const string& envvar_in_mb, int64 default_value_in_bytes) { const char* workspace_limit_in_mb_str = getenv(envvar_in_mb.c_str()); if (workspace_limit_in_mb_str != nullptr && strcmp(workspace_limit_in_mb_str, "") != 0) { int64 scratch_limit_in_mb = -1; if (strings::safe_strto64(workspace_limit_in_mb_str, &scratch_limit_in_mb)) { return scratch_limit_in_mb * (1 << 20); } else { LOG(WARNING) << "Invalid value for env-var " << envvar_in_mb << ": " << workspace_limit_in_mb_str; } } return default_value_in_bytes; }
0
[ "CWE-369" ]
tensorflow
b12aa1d44352de21d1a6faaf04172d8c2508b42b
224,057,565,147,497,370,000,000,000,000,000,000,000
16
Fix one more FPE. PiperOrigin-RevId: 369346568 Change-Id: I840fd575962adc879713a4c9cc59e6da3331caa7
GF_Err xtra_box_size(GF_Box *s) { GF_XtraBox *ptr = (GF_XtraBox *)s; u32 i, count = gf_list_count(ptr->tags); for (i=0; i<count; i++) { GF_XtraTag *tag = gf_list_get(ptr->tags, i); ptr->size += 18 + (u32) strlen(tag->name) + tag->prop_size; } return GF_OK; }
0
[ "CWE-787" ]
gpac
77510778516803b7f7402d7423c6d6bef50254c3
142,153,465,602,443,560,000,000,000,000,000,000,000
10
fixed #2255
fr_archive_libarchive_paste_clipboard (FrArchive *archive, GFile *archive_file, char *password, gboolean encrypt_header, FrCompression compression, guint volume_size, FrClipboardOp op, char *base_dir, GList *files, GFile *tmp_dir, char *current_dir, GCancellable *cancellable, GAsyncReadyCallback callback, gpointer user_data) { AddData *add_data; GList *scan; g_return_if_fail (base_dir != NULL); add_data = add_data_new (); current_dir = current_dir + 1; for (scan = files; scan; scan = scan->next) { const char *old_name = (char *) scan->data; char *new_name; GFile *file; new_name = g_build_filename (current_dir, old_name + strlen (base_dir) - 1, NULL); file = _g_file_append_path (tmp_dir, old_name, NULL); g_hash_table_insert (add_data->files_to_add, new_name, add_file_new (file, new_name)); add_data->n_files_to_add++; g_object_unref (file); } _fr_archive_libarchive_save (archive, FALSE, password, encrypt_header, compression, volume_size, cancellable, g_simple_async_result_new (G_OBJECT (archive), callback, user_data, fr_archive_paste_clipboard), _add_files_begin, _add_files_end, _add_files_entry_action, add_data, (GDestroyNotify) add_data_free); }
0
[ "CWE-22" ]
file-roller
b147281293a8307808475e102a14857055f81631
163,318,643,521,138,100,000,000,000,000,000,000,000
53
libarchive: sanitize filenames before extracting
static int spl_ptr_pqueue_zval_cmp(spl_ptr_heap_element a, spl_ptr_heap_element b, void* object TSRMLS_DC) { /* {{{ */ zval result; zval **a_priority_pp = spl_pqueue_extract_helper((zval **)&a, SPL_PQUEUE_EXTR_PRIORITY); zval **b_priority_pp = spl_pqueue_extract_helper((zval **)&b, SPL_PQUEUE_EXTR_PRIORITY); if ((!a_priority_pp) || (!b_priority_pp)) { zend_error(E_RECOVERABLE_ERROR, "Unable to extract from the PriorityQueue node"); return 0; } if (EG(exception)) { return 0; } if (object) { spl_heap_object *heap_object = (spl_heap_object*)zend_object_store_get_object(object TSRMLS_CC); if (heap_object->fptr_cmp) { long lval = 0; if (spl_ptr_heap_cmp_cb_helper((zval *)object, heap_object, *a_priority_pp, *b_priority_pp, &lval TSRMLS_CC) == FAILURE) { /* exception or call failure */ return 0; } return lval; } } INIT_ZVAL(result); compare_function(&result, *a_priority_pp, *b_priority_pp TSRMLS_CC); return Z_LVAL(result); }
0
[]
php-src
1cbd25ca15383394ffa9ee8601c5de4c0f2f90e1
113,587,060,893,034,870,000,000,000,000,000,000,000
29
Fix bug #69737 - Segfault when SplMinHeap::compare produces fatal error
struct snd_kcontrol *snd_ctl_find_id(struct snd_card *card, struct snd_ctl_elem_id *id) { struct snd_kcontrol *kctl; if (snd_BUG_ON(!card || !id)) return NULL; if (id->numid != 0) return snd_ctl_find_numid(card, id->numid); #ifdef CONFIG_SND_CTL_FAST_LOOKUP kctl = xa_load(&card->ctl_hash, get_ctl_id_hash(id)); if (kctl && elem_id_matches(kctl, id)) return kctl; if (!card->ctl_hash_collision) return NULL; /* we can rely on only hash table */ #endif /* no matching in hash table - try all as the last resort */ list_for_each_entry(kctl, &card->controls, list) if (elem_id_matches(kctl, id)) return kctl; return NULL; }
0
[ "CWE-416", "CWE-125" ]
linux
6ab55ec0a938c7f943a4edba3d6514f775983887
274,817,149,862,866,700,000,000,000,000,000,000,000
23
ALSA: control: Fix an out-of-bounds bug in get_ctl_id_hash() Since the user can control the arguments provided to the kernel by the ioctl() system call, an out-of-bounds bug occurs when the 'id->name' provided by the user does not end with '\0'. The following log can reveal it: [ 10.002313] BUG: KASAN: stack-out-of-bounds in snd_ctl_find_id+0x36c/0x3a0 [ 10.002895] Read of size 1 at addr ffff888109f5fe28 by task snd/439 [ 10.004934] Call Trace: [ 10.007140] snd_ctl_find_id+0x36c/0x3a0 [ 10.007489] snd_ctl_ioctl+0x6cf/0x10e0 Fix this by checking the bound of 'id->name' in the loop. Fixes: c27e1efb61c5 ("ALSA: control: Use xarray for faster lookups") Signed-off-by: Zheyu Ma <[email protected]> Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Takashi Iwai <[email protected]>
static void on_btn_repeat_cb(GtkButton *button) { g_auto_event_list = g_list_prepend(g_auto_event_list, g_event_selected); g_event_selected = NULL; show_next_step_button(); clear_warnings(); const gint current_page_no = gtk_notebook_get_current_page(g_assistant); const int next_page_no = select_next_page_no(pages[PAGENO_SUMMARY].page_no, NULL); if (current_page_no == next_page_no) on_page_prepare(g_assistant, gtk_notebook_get_nth_page(g_assistant, next_page_no), NULL); else gtk_notebook_set_current_page(g_assistant, next_page_no); }
0
[ "CWE-200" ]
libreport
257578a23d1537a2d235aaa2b1488ee4f818e360
91,927,638,840,600,220,000,000,000,000,000,000,000
15
wizard: fix save users changes after reviewing dump dir files If the user reviewed the dump dir's files during reporting the crash, the changes was thrown away and original data was passed to the bugzilla bug report. report-gtk saves the first text view buffer and then reloads data from the reported problem directory, which causes that the changes made to those text views are thrown away. Function save_text_if_changed(), except of saving text, also reload the files from dump dir and update gui state from the dump dir. The commit moves the reloading and updating gui functions away from this function. Related to rhbz#1270235 Signed-off-by: Matej Habrnal <[email protected]>
static int __init ftrace_dyn_table_alloc(unsigned long num_to_init) { int cnt; if (!num_to_init) { pr_info("ftrace: No functions to be traced?\n"); return -1; } cnt = num_to_init / ENTRIES_PER_PAGE; pr_info("ftrace: allocating %ld entries in %d pages\n", num_to_init, cnt + 1); return 0; }
0
[ "CWE-703" ]
linux
6a76f8c0ab19f215af2a3442870eeb5f0e81998d
122,602,902,387,030,220,000,000,000,000,000,000,000
15
tracing: Fix possible NULL pointer dereferences Currently set_ftrace_pid and set_graph_function files use seq_lseek for their fops. However seq_open() is called only for FMODE_READ in the fops->open() so that if an user tries to seek one of those file when she open it for writing, it sees NULL seq_file and then panic. It can be easily reproduced with following command: $ cd /sys/kernel/debug/tracing $ echo 1234 | sudo tee -a set_ftrace_pid In this example, GNU coreutils' tee opens the file with fopen(, "a") and then the fopen() internally calls lseek(). Link: http://lkml.kernel.org/r/[email protected] Cc: Frederic Weisbecker <[email protected]> Cc: Ingo Molnar <[email protected]> Cc: Namhyung Kim <[email protected]> Cc: [email protected] Signed-off-by: Namhyung Kim <[email protected]> Signed-off-by: Steven Rostedt <[email protected]>
Item_splocal_row_field(THD *thd, const Sp_rcontext_handler *rh, const LEX_CSTRING *sp_var_name, const LEX_CSTRING *sp_field_name, uint sp_var_idx, uint sp_field_idx, const Type_handler *handler, uint pos_in_q= 0, uint len_in_q= 0) :Item_splocal(thd, rh, sp_var_name, sp_var_idx, handler, pos_in_q, len_in_q), m_field_name(*sp_field_name), m_field_idx(sp_field_idx) { }
0
[ "CWE-617" ]
server
807945f2eb5fa22e6f233cc17b85a2e141efe2c8
319,066,510,601,727,850,000,000,000,000,000,000,000
11
MDEV-26402: A SEGV in Item_field::used_tables/update_depend_map_for_order... When doing condition pushdown from HAVING into WHERE, Item_equal::create_pushable_equalities() calls item->set_extraction_flag(IMMUTABLE_FL) for constant items. Then, Item::cleanup_excluding_immutables_processor() checks for this flag to see if it should call item->cleanup() or leave the item as-is. The failure happens when a constant item has a non-constant one inside it, like: (tbl.col=0 AND impossible_cond) item->walk(cleanup_excluding_immutables_processor) works in a bottom-up way so it 1. will call Item_func_eq(tbl.col=0)->cleanup() 2. will not call Item_cond_and->cleanup (as the AND is constant) This creates an item tree where a fixed Item has an un-fixed Item inside it which eventually causes an assertion failure. Fixed by introducing this rule: instead of just calling item->set_extraction_flag(IMMUTABLE_FL); we call Item::walk() to set the flag for all sub-items of the item.
void show_help_default(const char *opt, const char *arg) { av_log_set_callback(log_callback_help); show_usage(); show_help_options(options, "Main options:", 0, 0, 0); printf("\n"); show_help_children(avformat_get_class(), AV_OPT_FLAG_DECODING_PARAM); show_help_children(avcodec_get_class(), AV_OPT_FLAG_DECODING_PARAM); }
0
[ "CWE-476" ]
FFmpeg
837cb4325b712ff1aab531bf41668933f61d75d2
301,145,102,275,808,200,000,000,000,000,000,000,000
10
ffprobe: Fix null pointer dereference with color primaries Found-by: AD-lab of venustech Signed-off-by: Michael Niedermayer <[email protected]>
void rtl8xxxu_gen2_disable_rf(struct rtl8xxxu_priv *priv) { u32 val32; val32 = rtl8xxxu_read32(priv, REG_RX_WAIT_CCA); val32 &= ~(BIT(22) | BIT(23)); rtl8xxxu_write32(priv, REG_RX_WAIT_CCA, val32); }
0
[ "CWE-400", "CWE-401" ]
linux
a2cdd07488e666aa93a49a3fc9c9b1299e27ef3c
53,484,721,703,799,840,000,000,000,000,000,000,000
8
rtl8xxxu: prevent leaking urb In rtl8xxxu_submit_int_urb if usb_submit_urb fails the allocated urb should be released. Signed-off-by: Navid Emamdoost <[email protected]> Reviewed-by: Chris Chiu <[email protected]> Signed-off-by: Kalle Valo <[email protected]>
static Image *ReadMATImage(const ImageInfo *image_info,ExceptionInfo *exception) { Image *image, *image2=NULL, *rotated_image; PixelPacket *q; unsigned int status; MATHeader MATLAB_HDR; size_t size; size_t CellType; QuantumInfo *quantum_info; ImageInfo *clone_info; int i; ssize_t ldblk; unsigned char *BImgBuff = NULL; double MinVal, MaxVal; size_t Unknown6; unsigned z, z2; unsigned Frames; int logging; int sample_size; MagickOffsetType filepos=0x80; unsigned int (*ReadBlobXXXLong)(Image *image); unsigned short (*ReadBlobXXXShort)(Image *image); void (*ReadBlobDoublesXXX)(Image * image, size_t len, double *data); void (*ReadBlobFloatsXXX)(Image * image, size_t len, float *data); assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); logging = LogMagickEvent(CoderEvent,GetMagickModule(),"enter"); /* Open image file. */ quantum_info=(QuantumInfo *) NULL; image = AcquireImage(image_info); image2 = (Image *) NULL; status = OpenBlob(image_info, image, ReadBinaryBlobMode, exception); if (status == MagickFalse) { image=DestroyImageList(image); return((Image *) NULL); } /* Read MATLAB image. */ clone_info=(ImageInfo *) NULL; if(ReadBlob(image,124,(unsigned char *) &MATLAB_HDR.identific) != 124) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); if (strncmp(MATLAB_HDR.identific,"MATLAB",6) != 0) { image=ReadMATImageV4(image_info,image,exception); if (image == NULL) { if ((image != image2) && (image2 != (Image *) NULL)) image2=DestroyImage(image2); if (clone_info != (ImageInfo *) NULL) clone_info=DestroyImageInfo(clone_info); return((Image *) NULL); } goto END_OF_READING; } MATLAB_HDR.Version = ReadBlobLSBShort(image); if(ReadBlob(image,2,(unsigned char *) &MATLAB_HDR.EndianIndicator) != 2) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); if (logging) (void)LogMagickEvent(CoderEvent,GetMagickModule()," Endian %c%c", MATLAB_HDR.EndianIndicator[0],MATLAB_HDR.EndianIndicator[1]); if (!strncmp(MATLAB_HDR.EndianIndicator, "IM", 2)) { ReadBlobXXXLong = ReadBlobLSBLong; ReadBlobXXXShort = ReadBlobLSBShort; ReadBlobDoublesXXX = ReadBlobDoublesLSB; ReadBlobFloatsXXX = ReadBlobFloatsLSB; image->endian = LSBEndian; } else if (!strncmp(MATLAB_HDR.EndianIndicator, "MI", 2)) { ReadBlobXXXLong = ReadBlobMSBLong; ReadBlobXXXShort = ReadBlobMSBShort; ReadBlobDoublesXXX = ReadBlobDoublesMSB; ReadBlobFloatsXXX = ReadBlobFloatsMSB; image->endian = MSBEndian; } else { MATLAB_KO: if ((image != image2) && (image2 != (Image *) NULL)) image2=DestroyImage(image2); if (clone_info != (ImageInfo *) NULL) clone_info=DestroyImageInfo(clone_info); ThrowReaderException(CorruptImageError,"ImproperImageHeader"); } filepos = TellBlob(image); while(!EOFBlob(image)) /* object parser loop */ { Frames = 1; if (filepos != (unsigned int) filepos) break; if(SeekBlob(image,filepos,SEEK_SET) != filepos) break; /* printf("pos=%X\n",TellBlob(image)); */ MATLAB_HDR.DataType = ReadBlobXXXLong(image); if(EOFBlob(image)) break; MATLAB_HDR.ObjectSize = ReadBlobXXXLong(image); if(EOFBlob(image)) break; if((MagickSizeType) (MATLAB_HDR.ObjectSize+filepos) > GetBlobSize(image)) goto MATLAB_KO; filepos += (MagickOffsetType) MATLAB_HDR.ObjectSize + 4 + 4; if (clone_info != (ImageInfo *) NULL) clone_info=DestroyImageInfo(clone_info); clone_info=CloneImageInfo(image_info); if ((image != image2) && (image2 != (Image *) NULL)) image2=DestroyImage(image2); image2 = image; #if defined(MAGICKCORE_ZLIB_DELEGATE) if(MATLAB_HDR.DataType == miCOMPRESSED) { image2 = decompress_block(image,&MATLAB_HDR.ObjectSize,clone_info,exception); if(image2==NULL) continue; MATLAB_HDR.DataType = ReadBlobXXXLong(image2); /* replace compressed object type. */ } #endif if (MATLAB_HDR.DataType != miMATRIX) { clone_info=DestroyImageInfo(clone_info); #if defined(MAGICKCORE_ZLIB_DELEGATE) if (image2 != image) DeleteImageFromList(&image2); #endif continue; /* skip another objects. */ } MATLAB_HDR.unknown1 = ReadBlobXXXLong(image2); MATLAB_HDR.unknown2 = ReadBlobXXXLong(image2); MATLAB_HDR.unknown5 = ReadBlobXXXLong(image2); MATLAB_HDR.StructureClass = MATLAB_HDR.unknown5 & 0xFF; MATLAB_HDR.StructureFlag = (MATLAB_HDR.unknown5>>8) & 0xFF; MATLAB_HDR.unknown3 = ReadBlobXXXLong(image2); if(image!=image2) MATLAB_HDR.unknown4 = ReadBlobXXXLong(image2); /* ??? don't understand why ?? */ MATLAB_HDR.unknown4 = ReadBlobXXXLong(image2); MATLAB_HDR.DimFlag = ReadBlobXXXLong(image2); MATLAB_HDR.SizeX = ReadBlobXXXLong(image2); MATLAB_HDR.SizeY = ReadBlobXXXLong(image2); switch(MATLAB_HDR.DimFlag) { case 8: z2=z=1; break; /* 2D matrix*/ case 12: z2=z = ReadBlobXXXLong(image2); /* 3D matrix RGB*/ Unknown6 = ReadBlobXXXLong(image2); (void) Unknown6; if(z!=3) { if (clone_info != (ImageInfo *) NULL) clone_info=DestroyImageInfo(clone_info); if ((image != image2) && (image2 != (Image *) NULL)) image2=DestroyImage(image2); ThrowReaderException(CoderError, "MultidimensionalMatricesAreNotSupported"); } break; case 16: z2=z = ReadBlobXXXLong(image2); /* 4D matrix animation */ if(z!=3 && z!=1) { if (clone_info != (ImageInfo *) NULL) clone_info=DestroyImageInfo(clone_info); if ((image != image2) && (image2 != (Image *) NULL)) image2=DestroyImage(image2); ThrowReaderException(CoderError, "MultidimensionalMatricesAreNotSupported"); } Frames = ReadBlobXXXLong(image2); if (Frames == 0) { if (clone_info != (ImageInfo *) NULL) clone_info=DestroyImageInfo(clone_info); if ((image != image2) && (image2 != (Image *) NULL)) image2=DestroyImage(image2); ThrowReaderException(CorruptImageError,"ImproperImageHeader"); } if (AcquireMagickResource(ListLengthResource,Frames) == MagickFalse) { if (clone_info != (ImageInfo *) NULL) clone_info=DestroyImageInfo(clone_info); if ((image != image2) && (image2 != (Image *) NULL)) image2=DestroyImage(image2); ThrowReaderException(ResourceLimitError,"ListLengthExceedsLimit"); } break; default: if (clone_info != (ImageInfo *) NULL) clone_info=DestroyImageInfo(clone_info); if ((image != image2) && (image2 != (Image *) NULL)) image2=DestroyImage(image2); ThrowReaderException(CoderError, "MultidimensionalMatricesAreNotSupported"); } MATLAB_HDR.Flag1 = ReadBlobXXXShort(image2); MATLAB_HDR.NameFlag = ReadBlobXXXShort(image2); if (logging) (void)LogMagickEvent(CoderEvent,GetMagickModule(), "MATLAB_HDR.StructureClass %d",MATLAB_HDR.StructureClass); if (MATLAB_HDR.StructureClass != mxCHAR_CLASS && MATLAB_HDR.StructureClass != mxSINGLE_CLASS && /* float + complex float */ MATLAB_HDR.StructureClass != mxDOUBLE_CLASS && /* double + complex double */ MATLAB_HDR.StructureClass != mxINT8_CLASS && MATLAB_HDR.StructureClass != mxUINT8_CLASS && /* uint8 + uint8 3D */ MATLAB_HDR.StructureClass != mxINT16_CLASS && MATLAB_HDR.StructureClass != mxUINT16_CLASS && /* uint16 + uint16 3D */ MATLAB_HDR.StructureClass != mxINT32_CLASS && MATLAB_HDR.StructureClass != mxUINT32_CLASS && /* uint32 + uint32 3D */ MATLAB_HDR.StructureClass != mxINT64_CLASS && MATLAB_HDR.StructureClass != mxUINT64_CLASS) /* uint64 + uint64 3D */ { if ((image2 != (Image*) NULL) && (image2 != image)) { CloseBlob(image2); DeleteImageFromList(&image2); } if (clone_info != (ImageInfo *) NULL) clone_info=DestroyImageInfo(clone_info); ThrowReaderException(CoderError,"UnsupportedCellTypeInTheMatrix"); } switch (MATLAB_HDR.NameFlag) { case 0: size = ReadBlobXXXLong(image2); /* Object name string size */ size = 4 * (((size_t) size + 3 + 1) / 4); (void) SeekBlob(image2, size, SEEK_CUR); break; case 1: case 2: case 3: case 4: (void) ReadBlob(image2, 4, (unsigned char *) &size); /* Object name string */ break; default: goto MATLAB_KO; } CellType = ReadBlobXXXLong(image2); /* Additional object type */ if (logging) (void) LogMagickEvent(CoderEvent,GetMagickModule(), "MATLAB_HDR.CellType: %.20g",(double) CellType); /* data size */ if (ReadBlob(image2, 4, (unsigned char *) &size) != 4) goto MATLAB_KO; NEXT_FRAME: switch (CellType) { case miINT8: case miUINT8: sample_size = 8; if(MATLAB_HDR.StructureFlag & FLAG_LOGICAL) image->depth = 1; else image->depth = 8; /* Byte type cell */ ldblk = (ssize_t) MATLAB_HDR.SizeX; break; case miINT16: case miUINT16: sample_size = 16; image->depth = 16; /* Word type cell */ ldblk = (ssize_t) (2 * MATLAB_HDR.SizeX); break; case miINT32: case miUINT32: sample_size = 32; image->depth = 32; /* Dword type cell */ ldblk = (ssize_t) (4 * MATLAB_HDR.SizeX); break; case miINT64: case miUINT64: sample_size = 64; image->depth = 64; /* Qword type cell */ ldblk = (ssize_t) (8 * MATLAB_HDR.SizeX); break; case miSINGLE: sample_size = 32; image->depth = 32; /* double type cell */ (void) SetImageOption(clone_info,"quantum:format","floating-point"); if (MATLAB_HDR.StructureFlag & FLAG_COMPLEX) { /* complex float type cell */ } ldblk = (ssize_t) (4 * MATLAB_HDR.SizeX); break; case miDOUBLE: sample_size = 64; image->depth = 64; /* double type cell */ (void) SetImageOption(clone_info,"quantum:format","floating-point"); DisableMSCWarning(4127) if (sizeof(double) != 8) RestoreMSCWarning { if (clone_info != (ImageInfo *) NULL) clone_info=DestroyImageInfo(clone_info); if ((image != image2) && (image2 != (Image *) NULL)) image2=DestroyImage(image2); ThrowReaderException(CoderError, "IncompatibleSizeOfDouble"); } if (MATLAB_HDR.StructureFlag & FLAG_COMPLEX) { /* complex double type cell */ } ldblk = (ssize_t) (8 * MATLAB_HDR.SizeX); break; default: if ((image != image2) && (image2 != (Image *) NULL)) image2=DestroyImage(image2); if (clone_info) clone_info=DestroyImageInfo(clone_info); ThrowReaderException(CoderError, "UnsupportedCellTypeInTheMatrix"); } (void) sample_size; image->columns = MATLAB_HDR.SizeX; image->rows = MATLAB_HDR.SizeY; image->colors = GetQuantumRange(image->depth); if (image->columns == 0 || image->rows == 0) goto MATLAB_KO; if((unsigned int)ldblk*MATLAB_HDR.SizeY > MATLAB_HDR.ObjectSize) goto MATLAB_KO; /* Image is gray when no complex flag is set and 2D Matrix */ if ((MATLAB_HDR.DimFlag == 8) && ((MATLAB_HDR.StructureFlag & FLAG_COMPLEX) == 0)) { SetImageColorspace(image,GRAYColorspace); image->type=GrayscaleType; } /* If ping is true, then only set image size and colors without reading any image data. */ if (image_info->ping) { size_t temp = image->columns; image->columns = image->rows; image->rows = temp; goto done_reading; /* !!!!!! BAD !!!! */ } status=SetImageExtent(image,image->columns,image->rows); if (status == MagickFalse) { if (clone_info != (ImageInfo *) NULL) clone_info=DestroyImageInfo(clone_info); if ((image != image2) && (image2 != (Image *) NULL)) image2=DestroyImage(image2); InheritException(exception,&image->exception); return(DestroyImageList(image)); } (void) SetImageBackgroundColor(image); quantum_info=AcquireQuantumInfo(clone_info,image); if (quantum_info == (QuantumInfo *) NULL) { if (clone_info != (ImageInfo *) NULL) clone_info=DestroyImageInfo(clone_info); if ((image != image2) && (image2 != (Image *) NULL)) image2=DestroyImage(image2); if (quantum_info != (QuantumInfo *) NULL) quantum_info=DestroyQuantumInfo(quantum_info); ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); } /* ----- Load raster data ----- */ BImgBuff = (unsigned char *) AcquireQuantumMemory((size_t) (ldblk),sizeof(double)); /* Ldblk was set in the check phase */ if (BImgBuff == NULL) { if (clone_info != (ImageInfo *) NULL) clone_info=DestroyImageInfo(clone_info); if ((image != image2) && (image2 != (Image *) NULL)) image2=DestroyImage(image2); ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); } (void) memset(BImgBuff,0,ldblk*sizeof(double)); MinVal = 0; MaxVal = 0; if (CellType==miDOUBLE || CellType==miSINGLE) /* Find Min and Max Values for floats */ { CalcMinMax(image2, image_info->endian, MATLAB_HDR.SizeX, MATLAB_HDR.SizeY, CellType, ldblk, BImgBuff, &quantum_info->minimum, &quantum_info->maximum); } /* Main loop for reading all scanlines */ if(z==1) z=0; /* read grey scanlines */ /* else read color scanlines */ do { for (i = 0; i < (ssize_t) MATLAB_HDR.SizeY; i++) { q=GetAuthenticPixels(image,0,MATLAB_HDR.SizeY-i-1,image->columns,1,exception); if (q == (PixelPacket *) NULL) { if (logging) (void)LogMagickEvent(CoderEvent,GetMagickModule(), " MAT set image pixels returns unexpected NULL on a row %u.", (unsigned)(MATLAB_HDR.SizeY-i-1)); goto done_reading; /* Skip image rotation, when cannot set image pixels */ } if(ReadBlob(image2,ldblk,(unsigned char *)BImgBuff) != (ssize_t) ldblk) { if (logging) (void)LogMagickEvent(CoderEvent,GetMagickModule(), " MAT cannot read scanrow %u from a file.", (unsigned)(MATLAB_HDR.SizeY-i-1)); goto ExitLoop; } if((CellType==miINT8 || CellType==miUINT8) && (MATLAB_HDR.StructureFlag & FLAG_LOGICAL)) { FixLogical((unsigned char *)BImgBuff,ldblk); if(ImportQuantumPixels(image,(CacheView *) NULL,quantum_info,z2qtype[z],BImgBuff,exception) <= 0) { ImportQuantumPixelsFailed: if (logging) (void)LogMagickEvent(CoderEvent,GetMagickModule(), " MAT failed to ImportQuantumPixels for a row %u", (unsigned)(MATLAB_HDR.SizeY-i-1)); break; } } else { if(ImportQuantumPixels(image,(CacheView *) NULL,quantum_info,z2qtype[z],BImgBuff,exception) <= 0) goto ImportQuantumPixelsFailed; if (z<=1 && /* fix only during a last pass z==0 || z==1 */ (CellType==miINT8 || CellType==miINT16 || CellType==miINT32 || CellType==miINT64)) FixSignedValues(q,MATLAB_HDR.SizeX); } if (!SyncAuthenticPixels(image,exception)) { if (logging) (void)LogMagickEvent(CoderEvent,GetMagickModule(), " MAT failed to sync image pixels for a row %u", (unsigned)(MATLAB_HDR.SizeY-i-1)); goto ExitLoop; } } } while(z-- >= 2); ExitLoop: /* Read complex part of numbers here */ if (MATLAB_HDR.StructureFlag & FLAG_COMPLEX) { /* Find Min and Max Values for complex parts of floats */ CellType = ReadBlobXXXLong(image2); /* Additional object type */ i = ReadBlobXXXLong(image2); /* size of a complex part - toss away*/ if (CellType==miDOUBLE || CellType==miSINGLE) { CalcMinMax(image2, image_info->endian, MATLAB_HDR.SizeX, MATLAB_HDR.SizeY, CellType, ldblk, BImgBuff, &MinVal, &MaxVal); } if (CellType==miDOUBLE) for (i = 0; i < (ssize_t) MATLAB_HDR.SizeY; i++) { ReadBlobDoublesXXX(image2, ldblk, (double *)BImgBuff); if (EOFBlob(image) != MagickFalse) break; InsertComplexDoubleRow((double *)BImgBuff, i, image, MinVal, MaxVal); } if (CellType==miSINGLE) for (i = 0; i < (ssize_t) MATLAB_HDR.SizeY; i++) { ReadBlobFloatsXXX(image2, ldblk, (float *)BImgBuff); if (EOFBlob(image) != MagickFalse) break; InsertComplexFloatRow((float *)BImgBuff, i, image, MinVal, MaxVal); } } /* Image is gray when no complex flag is set and 2D Matrix AGAIN!!! */ if ((MATLAB_HDR.DimFlag == 8) && ((MATLAB_HDR.StructureFlag & FLAG_COMPLEX) == 0)) image->type=GrayscaleType; if (image->depth == 1) image->type=BilevelType; if(image2==image) image2 = NULL; /* Remove shadow copy to an image before rotation. */ /* Rotate image. */ rotated_image = RotateImage(image, 90.0, exception); if (rotated_image != (Image *) NULL) { /* Remove page offsets added by RotateImage */ rotated_image->page.x=0; rotated_image->page.y=0; rotated_image->colors = image->colors; DestroyBlob(rotated_image); rotated_image->blob=ReferenceBlob(image->blob); AppendImageToList(&image,rotated_image); DeleteImageFromList(&image); } done_reading: if(image2!=NULL) if(image2!=image) { DeleteImageFromList(&image2); if(clone_info) { if(clone_info->file) { fclose(clone_info->file); clone_info->file = NULL; (void) remove_utf8(clone_info->filename); } } } if (EOFBlob(image) != MagickFalse) break; /* Allocate next image structure. */ AcquireNextImage(image_info,image); if (image->next == (Image *) NULL) break; image=SyncNextImageInList(image); image->columns=image->rows=0; image->colors=0; /* row scan buffer is no longer needed */ RelinquishMagickMemory(BImgBuff); BImgBuff = NULL; if (quantum_info != (QuantumInfo *) NULL) quantum_info=DestroyQuantumInfo(quantum_info); if(--Frames>0) { z = z2; if(image2==NULL) image2 = image; if(!EOFBlob(image) && TellBlob(image)<filepos) goto NEXT_FRAME; } if(image2!=NULL) if(image2!=image) /* Does shadow temporary decompressed image exist? */ { /* CloseBlob(image2); */ DeleteImageFromList(&image2); if(clone_info) { if(clone_info->file) { fclose(clone_info->file); clone_info->file = NULL; (void) unlink(clone_info->filename); } } } if (clone_info) clone_info=DestroyImageInfo(clone_info); } END_OF_READING: RelinquishMagickMemory(BImgBuff); if (quantum_info != (QuantumInfo *) NULL) quantum_info=DestroyQuantumInfo(quantum_info); CloseBlob(image); { Image *p; ssize_t scene=0; /* Rewind list, removing any empty images while rewinding. */ p=image; image=NULL; while (p != (Image *) NULL) { Image *tmp=p; if ((p->rows == 0) || (p->columns == 0)) { p=p->previous; if (tmp == image2) image2=(Image *) NULL; DeleteImageFromList(&tmp); } else { image=p; p=p->previous; } } /* Fix scene numbers */ for (p=image; p != (Image *) NULL; p=p->next) p->scene=scene++; } if(clone_info != NULL) /* cleanup garbage file from compression */ { if(clone_info->file) { fclose(clone_info->file); clone_info->file = NULL; (void) remove_utf8(clone_info->filename); } DestroyImageInfo(clone_info); clone_info = NULL; } if (logging) (void)LogMagickEvent(CoderEvent,GetMagickModule(),"return"); if ((image != image2) && (image2 != (Image *) NULL)) image2=DestroyImage(image2); if (image == (Image *) NULL) ThrowReaderException(CorruptImageError,"ImproperImageHeader") return(image); }
1
[ "CWE-416" ]
ImageMagick6
5caef6e97f3f575cf7bea497865a4c1e624b8010
327,416,794,131,943,170,000,000,000,000,000,000,000
617
https://github.com/ImageMagick/ImageMagick/issues/1554
static UINT drive_process_irp_query_information(DRIVE_DEVICE* drive, IRP* irp) { DRIVE_FILE* file; UINT32 FsInformationClass; if (!drive || !irp || !irp->Complete) return ERROR_INVALID_PARAMETER; if (Stream_GetRemainingLength(irp->input) < 4) return ERROR_INVALID_DATA; Stream_Read_UINT32(irp->input, FsInformationClass); file = drive_get_file_by_id(drive, irp->FileId); if (!file) { irp->IoStatus = STATUS_UNSUCCESSFUL; } else if (!drive_file_query_information(file, FsInformationClass, irp->output)) { irp->IoStatus = drive_map_windows_err(GetLastError()); } return irp->Complete(irp); }
0
[ "CWE-125" ]
FreeRDP
6b485b146a1b9d6ce72dfd7b5f36456c166e7a16
118,095,259,644,824,920,000,000,000,000,000,000,000
25
Fixed oob read in irp_write and similar
static float powf64(float a, float b) { return powf_lim(a, b, 64.f); }
0
[ "CWE-476", "CWE-119" ]
LibRaw
d7c3d2cb460be10a3ea7b32e9443a83c243b2251
336,075,806,727,647,170,000,000,000,000,000,000,000
1
Secunia SA75000 advisory: several buffer overruns
CAMLprim value caml_alloc_dummy_float (value size) { mlsize_t wosize = Int_val(size) * Double_wosize; if (wosize == 0) return Atom(0); return caml_alloc (wosize, 0); }
1
[ "CWE-200" ]
ocaml
659615c7b100a89eafe6253e7a5b9d84d0e8df74
190,583,755,313,770,470,000,000,000,000,000,000,000
7
fix PR#7003 and a few other bugs caused by misuse of Int_val git-svn-id: http://caml.inria.fr/svn/ocaml/trunk@16525 f963ae5c-01c2-4b8c-9fe0-0dff7051ff02
char *bitmap_list_string(char *buf, char *end, unsigned long *bitmap, struct printf_spec spec, const char *fmt) { int nr_bits = max_t(int, spec.field_width, 0); /* current bit is 'cur', most recently seen range is [rbot, rtop] */ int cur, rbot, rtop; bool first = true; rbot = cur = find_first_bit(bitmap, nr_bits); while (cur < nr_bits) { rtop = cur; cur = find_next_bit(bitmap, nr_bits, cur + 1); if (cur < nr_bits && cur <= rtop + 1) continue; if (!first) { if (buf < end) *buf = ','; buf++; } first = false; buf = number(buf, end, rbot, default_dec_spec); if (rbot < rtop) { if (buf < end) *buf = '-'; buf++; buf = number(buf, end, rtop, default_dec_spec); } rbot = cur; } return buf; }
0
[ "CWE-200" ]
linux
91efafb1dd8f471177a3dddb4841d75d3df1cc46
321,867,259,995,876,960,000,000,000,000,000,000,000
35
lib/vsprintf: Replace space with '_' before crng is ready Before crng is ready, output of "%p" composes of "(ptrval)" and left padding spaces for alignment as no random address can be generated. This seems a little strange when default string width is larger than strlen("(ptrval)"). For example, when irq domain names are built with "%p", the nodes under /sys/kernel/debug/irq/domains like this on AArch64 system, [root@y irq]# ls domains/ default irqchip@ (ptrval)-2 irqchip@ (ptrval)-4 \_SB_.TCS0.QIC1 \_SB_.TCS0.QIC3 irqchip@ (ptrval) irqchip@ (ptrval)-3 \_SB_.TCS0.QIC0 \_SB_.TCS0.QIC2 The name "irqchip@ (ptrval)-2" is not so readable in console output. This patch replaces space with readable "_" when output needs padding. Following is the output after applying the patch, [root@y domains]# ls default irqchip@(____ptrval____)-2 irqchip@(____ptrval____)-4 \_SB_.TCS0.QIC1 \_SB_.TCS0.QIC3 irqchip@(____ptrval____) irqchip@(____ptrval____)-3 \_SB_.TCS0.QIC0 \_SB_.TCS0.QIC2 There is same problem in some subsystem's dmesg output. Moreover, someone may call "%p" in a similar case. In addition, the timing of crng initialization done may vary on different system. So, the change is made in vsprintf.c. Suggested-by: Rasmus Villemoes <[email protected]> Link: http://lkml.kernel.org/r/[email protected] To: "Tobin C . Harding" <[email protected]> To: [email protected] To: Joe Perches <[email protected]> To: [email protected] To: Andrew Morton <[email protected]> Cc: Joey Zheng <[email protected]> Signed-off-by: Shunyong Yang <[email protected]> Signed-off-by: Andy Shevchenko <[email protected]> Signed-off-by: Petr Mladek <[email protected]>
struct ext4_dir_entry_2 *ext4_init_dot_dotdot(struct inode *inode, struct ext4_dir_entry_2 *de, int blocksize, int csum_size, unsigned int parent_ino, int dotdot_real_len) { de->inode = cpu_to_le32(inode->i_ino); de->name_len = 1; de->rec_len = ext4_rec_len_to_disk(EXT4_DIR_REC_LEN(de->name_len), blocksize); strcpy(de->name, "."); ext4_set_de_type(inode->i_sb, de, S_IFDIR); de = ext4_next_entry(de, blocksize); de->inode = cpu_to_le32(parent_ino); de->name_len = 2; if (!dotdot_real_len) de->rec_len = ext4_rec_len_to_disk(blocksize - (csum_size + EXT4_DIR_REC_LEN(1)), blocksize); else de->rec_len = ext4_rec_len_to_disk( EXT4_DIR_REC_LEN(de->name_len), blocksize); strcpy(de->name, ".."); ext4_set_de_type(inode->i_sb, de, S_IFDIR); return ext4_next_entry(de, blocksize); }
0
[ "CWE-125" ]
linux
5872331b3d91820e14716632ebb56b1399b34fe1
293,249,186,579,148,570,000,000,000,000,000,000,000
27
ext4: fix potential negative array index in do_split() If for any reason a directory passed to do_split() does not have enough active entries to exceed half the size of the block, we can end up iterating over all "count" entries without finding a split point. In this case, count == move, and split will be zero, and we will attempt a negative index into map[]. Guard against this by detecting this case, and falling back to split-to-half-of-count instead; in this case we will still have plenty of space (> half blocksize) in each split block. Fixes: ef2b02d3e617 ("ext34: ensure do_split leaves enough free space in both blocks") Signed-off-by: Eric Sandeen <[email protected]> Reviewed-by: Andreas Dilger <[email protected]> Reviewed-by: Jan Kara <[email protected]> Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Theodore Ts'o <[email protected]>
static int llc_ui_listen(struct socket *sock, int backlog) { struct sock *sk = sock->sk; int rc = -EINVAL; lock_sock(sk); if (unlikely(sock->state != SS_UNCONNECTED)) goto out; rc = -EOPNOTSUPP; if (unlikely(sk->sk_type != SOCK_STREAM)) goto out; rc = -EAGAIN; if (sock_flag(sk, SOCK_ZAPPED)) goto out; rc = 0; if (!(unsigned int)backlog) /* BSDism */ backlog = 1; sk->sk_max_ack_backlog = backlog; if (sk->sk_state != TCP_LISTEN) { sk->sk_ack_backlog = 0; sk->sk_state = TCP_LISTEN; } sk->sk_socket->flags |= __SO_ACCEPTCON; out: release_sock(sk); return rc; }
0
[ "CWE-200" ]
net
b8670c09f37bdf2847cc44f36511a53afc6161fd
246,930,501,331,765,270,000,000,000,000,000,000,000
27
net: fix infoleak in llc The stack object “info” has a total size of 12 bytes. Its last byte is padding which is not initialized and leaked via “put_cmsg”. Signed-off-by: Kangjie Lu <[email protected]> Signed-off-by: David S. Miller <[email protected]>
bool vmx_nmi_blocked(struct kvm_vcpu *vcpu) { if (is_guest_mode(vcpu) && nested_exit_on_nmi(vcpu)) return false; if (!enable_vnmi && to_vmx(vcpu)->loaded_vmcs->soft_vnmi_blocked) return true; return (vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & (GUEST_INTR_STATE_MOV_SS | GUEST_INTR_STATE_STI | GUEST_INTR_STATE_NMI)); }
0
[ "CWE-787" ]
linux
04c4f2ee3f68c9a4bf1653d15f1a9a435ae33f7a
309,082,876,102,750,160,000,000,000,000,000,000,000
12
KVM: VMX: Don't use vcpu->run->internal.ndata as an array index __vmx_handle_exit() uses vcpu->run->internal.ndata as an index for an array access. Since vcpu->run is (can be) mapped to a user address space with a writer permission, the 'ndata' could be updated by the user process at anytime (the user process can set it to outside the bounds of the array). So, it is not safe that __vmx_handle_exit() uses the 'ndata' that way. Fixes: 1aa561b1a4c0 ("kvm: x86: Add "last CPU" to some KVM_EXIT information") Signed-off-by: Reiji Watanabe <[email protected]> Reviewed-by: Jim Mattson <[email protected]> Message-Id: <[email protected]> Cc: [email protected] Signed-off-by: Paolo Bonzini <[email protected]>
static int prepare_uprobe(struct uprobe *uprobe, struct file *file, struct mm_struct *mm, unsigned long vaddr) { int ret = 0; if (test_bit(UPROBE_COPY_INSN, &uprobe->flags)) return ret; /* TODO: move this into _register, until then we abuse this sem. */ down_write(&uprobe->consumer_rwsem); if (test_bit(UPROBE_COPY_INSN, &uprobe->flags)) goto out; ret = copy_insn(uprobe, file); if (ret) goto out; ret = -ENOTSUPP; if (is_trap_insn((uprobe_opcode_t *)&uprobe->arch.insn)) goto out; ret = arch_uprobe_analyze_insn(&uprobe->arch, mm, vaddr); if (ret) goto out; /* uprobe_write_opcode() assumes we don't cross page boundary */ BUG_ON((uprobe->offset & ~PAGE_MASK) + UPROBE_SWBP_INSN_SIZE > PAGE_SIZE); smp_wmb(); /* pairs with rmb() in find_active_uprobe() */ set_bit(UPROBE_COPY_INSN, &uprobe->flags); out: up_write(&uprobe->consumer_rwsem); return ret; }
0
[ "CWE-416" ]
linux
355627f518978b5167256d27492fe0b343aaf2f2
55,775,820,133,332,040,000,000,000,000,000,000,000
37
mm, uprobes: fix multiple free of ->uprobes_state.xol_area Commit 7c051267931a ("mm, fork: make dup_mmap wait for mmap_sem for write killable") made it possible to kill a forking task while it is waiting to acquire its ->mmap_sem for write, in dup_mmap(). However, it was overlooked that this introduced an new error path before the new mm_struct's ->uprobes_state.xol_area has been set to NULL after being copied from the old mm_struct by the memcpy in dup_mm(). For a task that has previously hit a uprobe tracepoint, this resulted in the 'struct xol_area' being freed multiple times if the task was killed at just the right time while forking. Fix it by setting ->uprobes_state.xol_area to NULL in mm_init() rather than in uprobe_dup_mmap(). With CONFIG_UPROBE_EVENTS=y, the bug can be reproduced by the same C program given by commit 2b7e8665b4ff ("fork: fix incorrect fput of ->exe_file causing use-after-free"), provided that a uprobe tracepoint has been set on the fork_thread() function. For example: $ gcc reproducer.c -o reproducer -lpthread $ nm reproducer | grep fork_thread 0000000000400719 t fork_thread $ echo "p $PWD/reproducer:0x719" > /sys/kernel/debug/tracing/uprobe_events $ echo 1 > /sys/kernel/debug/tracing/events/uprobes/enable $ ./reproducer Here is the use-after-free reported by KASAN: BUG: KASAN: use-after-free in uprobe_clear_state+0x1c4/0x200 Read of size 8 at addr ffff8800320a8b88 by task reproducer/198 CPU: 1 PID: 198 Comm: reproducer Not tainted 4.13.0-rc7-00015-g36fde05f3fb5 #255 Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.10.2-20170228_101828-anatol 04/01/2014 Call Trace: dump_stack+0xdb/0x185 print_address_description+0x7e/0x290 kasan_report+0x23b/0x350 __asan_report_load8_noabort+0x19/0x20 uprobe_clear_state+0x1c4/0x200 mmput+0xd6/0x360 do_exit+0x740/0x1670 do_group_exit+0x13f/0x380 get_signal+0x597/0x17d0 do_signal+0x99/0x1df0 exit_to_usermode_loop+0x166/0x1e0 syscall_return_slowpath+0x258/0x2c0 entry_SYSCALL_64_fastpath+0xbc/0xbe ... Allocated by task 199: save_stack_trace+0x1b/0x20 kasan_kmalloc+0xfc/0x180 kmem_cache_alloc_trace+0xf3/0x330 __create_xol_area+0x10f/0x780 uprobe_notify_resume+0x1674/0x2210 exit_to_usermode_loop+0x150/0x1e0 prepare_exit_to_usermode+0x14b/0x180 retint_user+0x8/0x20 Freed by task 199: save_stack_trace+0x1b/0x20 kasan_slab_free+0xa8/0x1a0 kfree+0xba/0x210 uprobe_clear_state+0x151/0x200 mmput+0xd6/0x360 copy_process.part.8+0x605f/0x65d0 _do_fork+0x1a5/0xbd0 SyS_clone+0x19/0x20 do_syscall_64+0x22f/0x660 return_from_SYSCALL_64+0x0/0x7a Note: without KASAN, you may instead see a "Bad page state" message, or simply a general protection fault. Link: http://lkml.kernel.org/r/[email protected] Fixes: 7c051267931a ("mm, fork: make dup_mmap wait for mmap_sem for write killable") Signed-off-by: Eric Biggers <[email protected]> Reported-by: Oleg Nesterov <[email protected]> Acked-by: Oleg Nesterov <[email protected]> Cc: Alexander Shishkin <[email protected]> Cc: Arnaldo Carvalho de Melo <[email protected]> Cc: Dmitry Vyukov <[email protected]> Cc: Ingo Molnar <[email protected]> Cc: Konstantin Khlebnikov <[email protected]> Cc: Mark Rutland <[email protected]> Cc: Michal Hocko <[email protected]> Cc: Peter Zijlstra <[email protected]> Cc: Vlastimil Babka <[email protected]> Cc: <[email protected]> [4.7+] Signed-off-by: Andrew Morton <[email protected]> Signed-off-by: Linus Torvalds <[email protected]>
void bn_sqr_comba4(BN_ULONG *r, const BN_ULONG *a) { BN_ULONG t[8]; bn_sqr_normal(r,a,4,t); }
1
[ "CWE-310" ]
openssl
a7a44ba55cb4f884c6bc9ceac90072dea38e66d0
223,460,949,842,664,400,000,000,000,000,000,000,000
5
Fix for CVE-2014-3570 (with minor bn_asm.c revamp). Reviewed-by: Emilia Kasper <[email protected]>
spnego_gss_process_context_token( OM_uint32 *minor_status, const gss_ctx_id_t context_handle, const gss_buffer_t token_buffer) { OM_uint32 ret; spnego_gss_ctx_id_t sc = (spnego_gss_ctx_id_t)context_handle; /* SPNEGO doesn't have its own context tokens. */ if (!sc->opened) return (GSS_S_DEFECTIVE_TOKEN); ret = gss_process_context_token(minor_status, sc->ctx_handle, token_buffer); return (ret); }
0
[ "CWE-18", "CWE-763" ]
krb5
b51b33f2bc5d1497ddf5bd107f791c101695000d
166,933,982,573,921,700,000,000,000,000,000,000,000
18
Fix SPNEGO context aliasing bugs [CVE-2015-2695] The SPNEGO mechanism currently replaces its context handle with the mechanism context handle upon establishment, under the assumption that most GSS functions are only called after context establishment. This assumption is incorrect, and can lead to aliasing violations for some programs. Maintain the SPNEGO context structure after context establishment and refer to it in all GSS methods. Add initiate and opened flags to the SPNEGO context structure for use in gss_inquire_context() prior to context establishment. CVE-2015-2695: In MIT krb5 1.5 and later, applications which call gss_inquire_context() on a partially-established SPNEGO context can cause the GSS-API library to read from a pointer using the wrong type, generally causing a process crash. This bug may go unnoticed, because the most common SPNEGO authentication scenario establishes the context after just one call to gss_accept_sec_context(). Java server applications using the native JGSS provider are vulnerable to this bug. A carefully crafted SPNEGO packet might allow the gss_inquire_context() call to succeed with attacker-determined results, but applications should not make access control decisions based on gss_inquire_context() results prior to context establishment. CVSSv2 Vector: AV:N/AC:M/Au:N/C:N/I:N/A:C/E:POC/RL:OF/RC:C [[email protected]: several bugfixes, style changes, and edge-case behavior changes; commit message and CVE description] ticket: 8244 target_version: 1.14 tags: pullup
static int read_request_line(request_rec *r, apr_bucket_brigade *bb) { const char *ll; const char *uri; const char *pro; unsigned int major = 1, minor = 0; /* Assume HTTP/1.0 if non-"HTTP" protocol */ char http[5]; apr_size_t len; int num_blank_lines = 0; int max_blank_lines = r->server->limit_req_fields; core_server_config *conf = ap_get_core_module_config(r->server->module_config); int strict = conf->http_conformance & AP_HTTP_CONFORMANCE_STRICT; int enforce_strict = !(conf->http_conformance & AP_HTTP_CONFORMANCE_LOGONLY); if (max_blank_lines <= 0) { max_blank_lines = DEFAULT_LIMIT_REQUEST_FIELDS; } /* Read past empty lines until we get a real request line, * a read error, the connection closes (EOF), or we timeout. * * We skip empty lines because browsers have to tack a CRLF on to the end * of POSTs to support old CERN webservers. But note that we may not * have flushed any previous response completely to the client yet. * We delay the flush as long as possible so that we can improve * performance for clients that are pipelining requests. If a request * is pipelined then we won't block during the (implicit) read() below. * If the requests aren't pipelined, then the client is still waiting * for the final buffer flush from us, and we will block in the implicit * read(). B_SAFEREAD ensures that the BUFF layer flushes if it will * have to block during a read. */ do { apr_status_t rv; /* ensure ap_rgetline allocates memory each time thru the loop * if there are empty lines */ r->the_request = NULL; rv = ap_rgetline(&(r->the_request), (apr_size_t)(r->server->limit_req_line + 2), &len, r, 0, bb); if (rv != APR_SUCCESS) { r->request_time = apr_time_now(); /* ap_rgetline returns APR_ENOSPC if it fills up the * buffer before finding the end-of-line. This is only going to * happen if it exceeds the configured limit for a request-line. */ if (APR_STATUS_IS_ENOSPC(rv)) { r->status = HTTP_REQUEST_URI_TOO_LARGE; } else if (APR_STATUS_IS_TIMEUP(rv)) { r->status = HTTP_REQUEST_TIME_OUT; } else if (APR_STATUS_IS_EINVAL(rv)) { r->status = HTTP_BAD_REQUEST; } r->proto_num = HTTP_VERSION(1,0); r->protocol = apr_pstrdup(r->pool, "HTTP/1.0"); return 0; } } while ((len <= 0) && (++num_blank_lines < max_blank_lines)); if (APLOGrtrace5(r)) { ap_log_rerror(APLOG_MARK, APLOG_TRACE5, 0, r, "Request received from client: %s", ap_escape_logitem(r->pool, r->the_request)); } r->request_time = apr_time_now(); ll = r->the_request; r->method = ap_getword_white(r->pool, &ll); uri = ap_getword_white(r->pool, &ll); /* Provide quick information about the request method as soon as known */ r->method_number = ap_method_number_of(r->method); if (r->method_number == M_GET && r->method[0] == 'H') { r->header_only = 1; } ap_parse_uri(r, uri); if (ll[0]) { r->assbackwards = 0; pro = ll; len = strlen(ll); } else { r->assbackwards = 1; pro = "HTTP/0.9"; len = 8; if (conf->http09_enable == AP_HTTP09_DISABLE) { r->status = HTTP_VERSION_NOT_SUPPORTED; r->protocol = apr_pstrmemdup(r->pool, pro, len); /* If we deny 0.9, send error message with 1.x */ r->assbackwards = 0; r->proto_num = HTTP_VERSION(0, 9); r->connection->keepalive = AP_CONN_CLOSE; ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(02401) "HTTP/0.9 denied by server configuration"); return 0; } } r->protocol = apr_pstrmemdup(r->pool, pro, len); /* Avoid sscanf in the common case */ if (len == 8 && pro[0] == 'H' && pro[1] == 'T' && pro[2] == 'T' && pro[3] == 'P' && pro[4] == '/' && apr_isdigit(pro[5]) && pro[6] == '.' && apr_isdigit(pro[7])) { r->proto_num = HTTP_VERSION(pro[5] - '0', pro[7] - '0'); } else { if (strict) { ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(02418) "Invalid protocol '%s'", r->protocol); if (enforce_strict) { r->status = HTTP_BAD_REQUEST; return 0; } } if (3 == sscanf(r->protocol, "%4s/%u.%u", http, &major, &minor) && (strcasecmp("http", http) == 0) && (minor < HTTP_VERSION(1, 0)) ) { /* don't allow HTTP/0.1000 */ r->proto_num = HTTP_VERSION(major, minor); } else { r->proto_num = HTTP_VERSION(1, 0); } } if (strict) { int err = 0; if (ap_has_cntrl(r->the_request)) { ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(02420) "Request line must not contain control characters"); err = HTTP_BAD_REQUEST; } if (r->parsed_uri.fragment) { /* RFC3986 3.5: no fragment */ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(02421) "URI must not contain a fragment"); err = HTTP_BAD_REQUEST; } else if (r->parsed_uri.user || r->parsed_uri.password) { ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(02422) "URI must not contain a username/password"); err = HTTP_BAD_REQUEST; } else if (r->method_number == M_INVALID) { ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(02423) "Invalid HTTP method string: %s", r->method); err = HTTP_NOT_IMPLEMENTED; } else if (r->assbackwards == 0 && r->proto_num < HTTP_VERSION(1, 0)) { ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(02424) "HTTP/0.x does not take a protocol"); err = HTTP_BAD_REQUEST; } if (err && enforce_strict) { r->status = err; return 0; } } return 1; }
0
[ "CWE-703" ]
httpd
be0f5335e3e73eb63253b050fdc23f252f5c8ae3
334,381,240,124,800,070,000,000,000,000,000,000,000
172
*) SECURITY: CVE-2015-0253 (cve.mitre.org) core: Fix a crash introduced in with ErrorDocument 400 pointing to a local URL-path with the INCLUDES filter active, introduced in 2.4.11. PR 57531. [Yann Ylavic] Submitted By: ylavic Committed By: covener git-svn-id: https://svn.apache.org/repos/asf/httpd/httpd/trunk@1664205 13f79535-47bb-0310-9956-ffa450edef68
static unsigned int macsec_extra_len(bool sci_present) { return macsec_sectag_len(sci_present) + sizeof(__be16); }
0
[ "CWE-119" ]
net
5294b83086cc1c35b4efeca03644cf9d12282e5b
132,466,264,884,338,630,000,000,000,000,000,000,000
4
macsec: dynamically allocate space for sglist We call skb_cow_data, which is good anyway to ensure we can actually modify the skb as such (another error from prior). Now that we have the number of fragments required, we can safely allocate exactly that amount of memory. Fixes: c09440f7dcb3 ("macsec: introduce IEEE 802.1AE driver") Signed-off-by: Jason A. Donenfeld <[email protected]> Acked-by: Sabrina Dubroca <[email protected]> Signed-off-by: David S. Miller <[email protected]>
void open_project(char *project_filename) { /* TODO: check if layers is modified and show it to user. */ if (mainProject->last_loaded >= 0 && !interface_get_alert_dialog_response ( _("Do you want to close any open layers and load " "an existing project?"), _("Loading a project will cause all currently open " "layers to be closed. Any unsaved changes " "will be lost."), FALSE, NULL, GTK_STOCK_CLOSE, GTK_STOCK_CANCEL)) { return; } /* Update the last folder */ g_free (mainProject->path); mainProject->path = g_strdup(project_filename); gerbv_unload_all_layers (mainProject); main_open_project_from_filename (mainProject, project_filename); }
0
[ "CWE-200" ]
gerbv
319a8af890e4d0a5c38e6d08f510da8eefc42537
166,275,841,162,345,730,000,000,000,000,000,000,000
24
Remove local alias to parameter array Normalizing access to `gerbv_simplified_amacro_t::parameter` as a step to fix CVE-2021-40402
bson_iter_find_case (bson_iter_t *iter, /* INOUT */ const char *key) /* IN */ { BSON_ASSERT (iter); BSON_ASSERT (key); while (bson_iter_next (iter)) { if (!bson_strcasecmp (key, bson_iter_key (iter))) { return true; } } return false; }
0
[ "CWE-125" ]
libbson
42900956dc461dfe7fb91d93361d10737c1602b3
309,140,193,859,384,170,000,000,000,000,000,000,000
14
CDRIVER-2269 Check for zero string length in codewscope
my_bool opt_flush_ok_packet(MYSQL *mysql, my_bool *is_ok_packet) { ulong packet_length= cli_safe_read(mysql); if (packet_length == packet_error) return TRUE; /* cli_safe_read always reads a non-empty packet. */ DBUG_ASSERT(packet_length); *is_ok_packet= mysql->net.read_pos[0] == 0; if (*is_ok_packet) { uchar *pos= mysql->net.read_pos + 1; net_field_length_ll(&pos); /* affected rows */ net_field_length_ll(&pos); /* insert id */ mysql->server_status=uint2korr(pos); pos+=2; if (protocol_41(mysql)) { mysql->warning_count=uint2korr(pos); pos+=2; } } return FALSE; }
0
[ "CWE-254" ]
mysql-server
13380bf81f6bc20d39549f531f9acebdfb5a8c37
156,841,840,909,114,680,000,000,000,000,000,000,000
29
Bug #22295186: CERTIFICATE VALIDATION BUG IN MYSQL MAY ALLOW MITM
snmp_fix_pdu(netsnmp_pdu *pdu, int command) { netsnmp_pdu *newpdu; if ((pdu->command != SNMP_MSG_RESPONSE) || (pdu->errstat == SNMP_ERR_NOERROR) || (NULL == pdu->variables) || (pdu->errindex > (int)snmp_varbind_len(pdu)) || (pdu->errindex <= 0)) { return NULL; /* pre-condition tests fail */ } newpdu = _clone_pdu(pdu, 1); /* copies all except errored variable */ if (!newpdu) return NULL; if (!newpdu->variables) { snmp_free_pdu(newpdu); return NULL; /* no variables. "should not happen" */ } newpdu->command = command; newpdu->reqid = snmp_get_next_reqid(); newpdu->msgid = snmp_get_next_msgid(); newpdu->errstat = SNMP_DEFAULT_ERRSTAT; newpdu->errindex = SNMP_DEFAULT_ERRINDEX; return newpdu; }
0
[ "CWE-415" ]
net-snmp
5f881d3bf24599b90d67a45cae7a3eb099cd71c9
180,116,869,278,057,240,000,000,000,000,000,000,000
27
libsnmp, USM: Introduce a reference count in struct usmStateReference This patch fixes https://sourceforge.net/p/net-snmp/bugs/2956/.
int install_process_keyring_to_cred(struct cred *new) { struct key *keyring; if (new->process_keyring) return 0; keyring = keyring_alloc("_pid", new->uid, new->gid, new, KEY_POS_ALL | KEY_USR_VIEW, KEY_ALLOC_QUOTA_OVERRUN, NULL, NULL); if (IS_ERR(keyring)) return PTR_ERR(keyring); new->process_keyring = keyring; return 0; }
0
[ "CWE-399", "CWE-404" ]
linux
c9f838d104fed6f2f61d68164712e3204bf5271b
105,329,037,171,485,960,000,000,000,000,000,000,000
17
KEYS: fix keyctl_set_reqkey_keyring() to not leak thread keyrings This fixes CVE-2017-7472. Running the following program as an unprivileged user exhausts kernel memory by leaking thread keyrings: #include <keyutils.h> int main() { for (;;) keyctl_set_reqkey_keyring(KEY_REQKEY_DEFL_THREAD_KEYRING); } Fix it by only creating a new thread keyring if there wasn't one before. To make things more consistent, make install_thread_keyring_to_cred() and install_process_keyring_to_cred() both return 0 if the corresponding keyring is already present. Fixes: d84f4f992cbd ("CRED: Inaugurate COW credentials") Cc: [email protected] # 2.6.29+ Signed-off-by: Eric Biggers <[email protected]> Signed-off-by: David Howells <[email protected]>
static int get_reg_offset(struct insn *insn, struct pt_regs *regs, enum reg_type type) { int regno = 0; static const int regoff[] = { offsetof(struct pt_regs, ax), offsetof(struct pt_regs, cx), offsetof(struct pt_regs, dx), offsetof(struct pt_regs, bx), offsetof(struct pt_regs, sp), offsetof(struct pt_regs, bp), offsetof(struct pt_regs, si), offsetof(struct pt_regs, di), #ifdef CONFIG_X86_64 offsetof(struct pt_regs, r8), offsetof(struct pt_regs, r9), offsetof(struct pt_regs, r10), offsetof(struct pt_regs, r11), offsetof(struct pt_regs, r12), offsetof(struct pt_regs, r13), offsetof(struct pt_regs, r14), offsetof(struct pt_regs, r15), #endif }; int nr_registers = ARRAY_SIZE(regoff); /* * Don't possibly decode a 32-bit instructions as * reading a 64-bit-only register. */ if (IS_ENABLED(CONFIG_X86_64) && !insn->x86_64) nr_registers -= 8; switch (type) { case REG_TYPE_RM: regno = X86_MODRM_RM(insn->modrm.value); /* * ModRM.mod == 0 and ModRM.rm == 5 means a 32-bit displacement * follows the ModRM byte. */ if (!X86_MODRM_MOD(insn->modrm.value) && regno == 5) return -EDOM; if (X86_REX_B(insn->rex_prefix.value)) regno += 8; break; case REG_TYPE_INDEX: regno = X86_SIB_INDEX(insn->sib.value); if (X86_REX_X(insn->rex_prefix.value)) regno += 8; /* * If ModRM.mod != 3 and SIB.index = 4 the scale*index * portion of the address computation is null. This is * true only if REX.X is 0. In such a case, the SIB index * is used in the address computation. */ if (X86_MODRM_MOD(insn->modrm.value) != 3 && regno == 4) return -EDOM; break; case REG_TYPE_BASE: regno = X86_SIB_BASE(insn->sib.value); /* * If ModRM.mod is 0 and SIB.base == 5, the base of the * register-indirect addressing is 0. In this case, a * 32-bit displacement follows the SIB byte. */ if (!X86_MODRM_MOD(insn->modrm.value) && regno == 5) return -EDOM; if (X86_REX_B(insn->rex_prefix.value)) regno += 8; break; default: pr_err_ratelimited("invalid register type: %d\n", type); return -EINVAL; } if (regno >= nr_registers) { WARN_ONCE(1, "decoded an instruction with an invalid register"); return -EINVAL; } return regoff[regno]; }
0
[ "CWE-416", "CWE-362" ]
linux
de9f869616dd95e95c00bdd6b0fcd3421e8a4323
24,742,853,381,212,667,000,000,000,000,000,000,000
88
x86/insn-eval: Fix use-after-free access to LDT entry get_desc() computes a pointer into the LDT while holding a lock that protects the LDT from being freed, but then drops the lock and returns the (now potentially dangling) pointer to its caller. Fix it by giving the caller a copy of the LDT entry instead. Fixes: 670f928ba09b ("x86/insn-eval: Add utility function to get segment descriptor") Cc: [email protected] Signed-off-by: Jann Horn <[email protected]> Signed-off-by: Linus Torvalds <[email protected]>
static void TranslateSFWMarker(unsigned char *marker) { switch (marker[1]) { case 0xc8: marker[1]=0xd8; break; /* soi */ case 0xd0: marker[1]=0xe0; break; /* app */ case 0xcb: marker[1]=0xdb; break; /* dqt */ case 0xa0: marker[1]=0xc0; break; /* sof */ case 0xa4: marker[1]=0xc4; break; /* sof */ case 0xca: marker[1]=0xda; break; /* sos */ case 0xc9: marker[1]=0xd9; break; /* eoi */ default: break; } }
0
[ "CWE-119" ]
ImageMagick
d4145e664aea3752ca6d3bf1ee825352b595dab5
259,880,778,978,260,800,000,000,000,000,000,000,000
14
https://github.com/ImageMagick/ImageMagick/issues/682
decompilePUSHDUP (SWF_ACTION *act) { SanityCheck(SWF_PUSHDUP, act->SWF_ACTIONRECORD.ActionCode == SWFACTION_PUSHDUP, "not a PUSHDUP") pushdup(); }
0
[ "CWE-119", "CWE-125" ]
libming
da9d86eab55cbf608d5c916b8b690f5b76bca462
262,854,836,694,167,770,000,000,000,000,000,000,000
7
decompileAction: Prevent heap buffer overflow and underflow with using OpCode
strlen16 (unsigned short *s) { int len = 0; while (*s) { s++; len++; } return len; }
0
[ "CWE-119", "CWE-787" ]
libgd
77f619d48259383628c3ec4654b1ad578e9eb40e
124,696,469,421,988,060,000,000,000,000,000,000,000
9
fix #215 gdImageFillToBorder stack-overflow when invalid color is used
static inline void rb_event_discard(struct ring_buffer_event *event) { if (extended_time(event)) event = skip_time_extend(event); /* array[0] holds the actual length for the discarded event */ event->array[0] = rb_event_data_length(event) - RB_EVNT_HDR_SIZE; event->type_len = RINGBUF_TYPE_PADDING; /* time delta must be non zero */ if (!event->time_delta) event->time_delta = 1; }
0
[ "CWE-362" ]
linux
bbeb97464eefc65f506084fd9f18f21653e01137
165,085,246,883,141,400,000,000,000,000,000,000,000
12
tracing: Fix race in trace_open and buffer resize call Below race can come, if trace_open and resize of cpu buffer is running parallely on different cpus CPUX CPUY ring_buffer_resize atomic_read(&buffer->resize_disabled) tracing_open tracing_reset_online_cpus ring_buffer_reset_cpu rb_reset_cpu rb_update_pages remove/insert pages resetting pointer This race can cause data abort or some times infinte loop in rb_remove_pages and rb_insert_pages while checking pages for sanity. Take buffer lock to fix this. Link: https://lkml.kernel.org/r/[email protected] Cc: [email protected] Fixes: b23d7a5f4a07a ("ring-buffer: speed up buffer resets by avoiding synchronize_rcu for each CPU") Signed-off-by: Gaurav Kohli <[email protected]> Signed-off-by: Steven Rostedt (VMware) <[email protected]>
run (const gchar *name, gint nparams, const GimpParam *param, gint *nreturn_vals, GimpParam **return_vals) { static GimpParam values[2]; GimpRunMode run_mode; GimpPDBStatusType status = GIMP_PDB_SUCCESS; gint32 image_ID; gint32 drawable_ID; GimpExportReturn export = GIMP_EXPORT_CANCEL; GError *error = NULL; INIT_I18N (); gegl_init (NULL, NULL); run_mode = param[0].data.d_int32; *nreturn_vals = 1; *return_vals = values; values[0].type = GIMP_PDB_STATUS; values[0].data.d_status = GIMP_PDB_EXECUTION_ERROR; if (strcmp (name, LOAD_PROC) == 0) { image_ID = load_image (g_file_new_for_uri (param[1].data.d_string), &error); if (image_ID != -1) { *nreturn_vals = 2; values[1].type = GIMP_PDB_IMAGE; values[1].data.d_image = image_ID; } else { status = GIMP_PDB_EXECUTION_ERROR; } } else if (strcmp (name, SAVE_PROC) == 0) { GFile *file; GimpParasite *parasite; gint32 orig_image_ID; image_ID = param[1].data.d_int32; drawable_ID = param[2].data.d_int32; file = g_file_new_for_uri (param[3].data.d_string); orig_image_ID = image_ID; switch (run_mode) { case GIMP_RUN_INTERACTIVE: case GIMP_RUN_WITH_LAST_VALS: gimp_ui_init (PLUG_IN_BINARY, FALSE); export = gimp_export_image (&image_ID, &drawable_ID, "GBR", GIMP_EXPORT_CAN_HANDLE_GRAY | GIMP_EXPORT_CAN_HANDLE_RGB | GIMP_EXPORT_CAN_HANDLE_INDEXED | GIMP_EXPORT_CAN_HANDLE_ALPHA); if (export == GIMP_EXPORT_CANCEL) { values[0].data.d_status = GIMP_PDB_CANCEL; return; } /* Possibly retrieve data */ gimp_get_data (SAVE_PROC, &info); parasite = gimp_image_get_parasite (orig_image_ID, "gimp-brush-name"); if (parasite) { strncpy (info.description, gimp_parasite_data (parasite), MIN (sizeof (info.description), gimp_parasite_data_size (parasite))); info.description[sizeof (info.description) - 1] = '\0'; gimp_parasite_free (parasite); } else { gchar *name = g_path_get_basename (gimp_file_get_utf8_name (file)); if (g_str_has_suffix (name, ".gbr")) name[strlen (name) - 4] = '\0'; if (strlen (name)) { strncpy (info.description, name, sizeof (info.description)); info.description[sizeof (info.description) - 1] = '\0'; } g_free (name); } break; default: break; } switch (run_mode) { case GIMP_RUN_INTERACTIVE: if (! save_dialog ()) status = GIMP_PDB_CANCEL; break; case GIMP_RUN_NONINTERACTIVE: if (nparams != 7) { status = GIMP_PDB_CALLING_ERROR; } else { info.spacing = (param[5].data.d_int32); strncpy (info.description, param[6].data.d_string, sizeof (info.description)); info.description[sizeof (info.description) - 1] = '\0'; } break; default: break; } if (status == GIMP_PDB_SUCCESS) { if (save_image (file, image_ID, drawable_ID, &error)) { gimp_set_data (SAVE_PROC, &info, sizeof (info)); } else { status = GIMP_PDB_EXECUTION_ERROR; } } if (export == GIMP_EXPORT_EXPORT) gimp_image_delete (image_ID); if (strlen (info.description)) { GimpParasite *parasite; parasite = gimp_parasite_new ("gimp-brush-name", GIMP_PARASITE_PERSISTENT, strlen (info.description) + 1, info.description); gimp_image_attach_parasite (orig_image_ID, parasite); gimp_parasite_free (parasite); } else { gimp_image_detach_parasite (orig_image_ID, "gimp-brush-name"); } } else { status = GIMP_PDB_CALLING_ERROR; } if (status != GIMP_PDB_SUCCESS && error) { *nreturn_vals = 2; values[1].type = GIMP_PDB_STRING; values[1].data.d_string = error->message; } values[0].data.d_status = status; }
0
[ "CWE-125" ]
gimp
06d24a79af94837d615d0024916bb95a01bf3c59
324,154,281,999,626,700,000,000,000,000,000,000,000
177
Bug 790784 - (CVE-2017-17784) heap overread in gbr parser / load_image. We were assuming the input name was well formed, hence was nul-terminated. As any data coming from external input, this has to be thorougly checked.
void CertDecoder::GetName(NameType nt) { if (source_.GetError().What()) return; SHA sha; word32 length = GetSequence(); // length of all distinguished names if (length >= ASN_NAME_MAX) return; if (source_.IsLeft(length) == false) return; length += source_.get_index(); char* ptr; char* buf_end; if (nt == ISSUER) { ptr = issuer_; buf_end = ptr + sizeof(issuer_) - 1; // 1 byte for trailing 0 } else { ptr = subject_; buf_end = ptr + sizeof(subject_) - 1; // 1 byte for trailing 0 } while (source_.get_index() < length) { GetSet(); if (source_.GetError().What() == SET_E) { source_.SetError(NO_ERROR_E); // extensions may only have sequence source_.prev(); } GetSequence(); byte b = source_.next(); if (b != OBJECT_IDENTIFIER) { source_.SetError(OBJECT_ID_E); return; } word32 oidSz = GetLength(source_); if (source_.IsLeft(oidSz) == false) return; byte joint[2]; if (source_.IsLeft(sizeof(joint)) == false) return; memcpy(joint, source_.get_current(), sizeof(joint)); // v1 name types if (joint[0] == 0x55 && joint[1] == 0x04) { source_.advance(2); byte id = source_.next(); b = source_.next(); // strType word32 strLen = GetLength(source_); if (source_.IsLeft(strLen) == false) return; switch (id) { case COMMON_NAME: if (!(ptr = AddTag(ptr, buf_end, "/CN=", 4, strLen))) return; break; case SUR_NAME: if (!(ptr = AddTag(ptr, buf_end, "/SN=", 4, strLen))) return; break; case COUNTRY_NAME: if (!(ptr = AddTag(ptr, buf_end, "/C=", 3, strLen))) return; break; case LOCALITY_NAME: if (!(ptr = AddTag(ptr, buf_end, "/L=", 3, strLen))) return; break; case STATE_NAME: if (!(ptr = AddTag(ptr, buf_end, "/ST=", 4, strLen))) return; break; case ORG_NAME: if (!(ptr = AddTag(ptr, buf_end, "/O=", 3, strLen))) return; break; case ORGUNIT_NAME: if (!(ptr = AddTag(ptr, buf_end, "/OU=", 4, strLen))) return; break; } sha.Update(source_.get_current(), strLen); source_.advance(strLen); } else { bool email = false; if (joint[0] == 0x2a && joint[1] == 0x86) // email id hdr email = true; source_.advance(oidSz + 1); word32 length = GetLength(source_); if (source_.IsLeft(length) == false) return; if (email) { if (!(ptr = AddTag(ptr, buf_end, "/emailAddress=", 14, length))) return; } source_.advance(length); } } *ptr = 0; if (nt == ISSUER) sha.Final(issuerHash_); else sha.Final(subjectHash_); }
1
[ "CWE-254" ]
mysql-server
e7061f7e5a96c66cb2e0bf46bec7f6ff35801a69
232,338,863,828,165,000,000,000,000,000,000,000,000
113
Bug #22738607: YASSL FUNCTION X509_NAME_GET_INDEX_BY_NID IS NOT WORKING AS EXPECTED.
static int bad_inode_mknod (struct inode *dir, struct dentry *dentry, int mode, dev_t rdev) { return -EIO; }
0
[]
linux-2.6
be6aab0e9fa6d3c6d75aa1e38ac972d8b4ee82b8
151,512,264,163,497,950,000,000,000,000,000,000,000
5
[PATCH] fix memory corruption from misinterpreted bad_inode_ops return values CVE-2006-5753 is for a case where an inode can be marked bad, switching the ops to bad_inode_ops, which are all connected as: static int return_EIO(void) { return -EIO; } #define EIO_ERROR ((void *) (return_EIO)) static struct inode_operations bad_inode_ops = { .create = bad_inode_create ...etc... The problem here is that the void cast causes return types to not be promoted, and for ops such as listxattr which expect more than 32 bits of return value, the 32-bit -EIO is interpreted as a large positive 64-bit number, i.e. 0x00000000fffffffa instead of 0xfffffffa. This goes particularly badly when the return value is taken as a number of bytes to copy into, say, a user's buffer for example... I originally had coded up the fix by creating a return_EIO_<TYPE> macro for each return type, like this: static int return_EIO_int(void) { return -EIO; } #define EIO_ERROR_INT ((void *) (return_EIO_int)) static struct inode_operations bad_inode_ops = { .create = EIO_ERROR_INT, ...etc... but Al felt that it was probably better to create an EIO-returner for each actual op signature. Since so few ops share a signature, I just went ahead & created an EIO function for each individual file & inode op that returns a value. Signed-off-by: Eric Sandeen <[email protected]> Cc: Al Viro <[email protected]> Signed-off-by: Andrew Morton <[email protected]> Signed-off-by: Linus Torvalds <[email protected]>
*/ xmlNodePtr xmlXPathNextDescendant(xmlXPathParserContextPtr ctxt, xmlNodePtr cur) { if ((ctxt == NULL) || (ctxt->context == NULL)) return(NULL); if (cur == NULL) { if (ctxt->context->node == NULL) return(NULL); if ((ctxt->context->node->type == XML_ATTRIBUTE_NODE) || (ctxt->context->node->type == XML_NAMESPACE_DECL)) return(NULL); if (ctxt->context->node == (xmlNodePtr) ctxt->context->doc) return(ctxt->context->doc->children); return(ctxt->context->node->children); } if (cur->type == XML_NAMESPACE_DECL) return(NULL); if (cur->children != NULL) { /* * Do not descend on entities declarations */ if (cur->children->type != XML_ENTITY_DECL) { cur = cur->children; /* * Skip DTDs */ if (cur->type != XML_DTD_NODE) return(cur); } } if (cur == ctxt->context->node) return(NULL); while (cur->next != NULL) { cur = cur->next; if ((cur->type != XML_ENTITY_DECL) && (cur->type != XML_DTD_NODE)) return(cur); } do { cur = cur->parent; if (cur == NULL) break; if (cur == ctxt->context->node) return(NULL); if (cur->next != NULL) { cur = cur->next; return(cur); } } while (cur != NULL);
0
[]
libxml2
03c6723043775122313f107695066e5744189a08
76,374,820,783,237,480,000,000,000,000,000,000,000
50
Handling of XPath function arguments in error case The XPath engine tries to guarantee that every XPath function can pop 'nargs' non-NULL values off the stack. libxslt, for example, relies on this assumption. But the check isn't thorough enough if there are errors during the evaluation of arguments. This can lead to segfaults: https://mail.gnome.org/archives/xslt/2013-December/msg00005.html This commit makes the handling of function arguments more robust. * Bail out early when evaluation of XPath function arguments fails. * Make sure that there are 'nargs' arguments in the current call frame.
static int check_reqid(struct xfrm_policy *xp, int dir, int count, void *ptr) { int i; u32 reqid = *(u32*)ptr; for (i=0; i<xp->xfrm_nr; i++) { if (xp->xfrm_vec[i].reqid == reqid) return -EEXIST; } return 0; }
0
[]
linux
096f41d3a8fcbb8dde7f71379b1ca85fe213eded
23,343,429,592,050,593,000,000,000,000,000,000,000
11
af_key: Fix sadb_x_ipsecrequest parsing The parsing of sadb_x_ipsecrequest is broken in a number of ways. First of all we're not verifying sadb_x_ipsecrequest_len. This is needed when the structure carries addresses at the end. Worse we don't even look at the length when we parse those optional addresses. The migration code had similar parsing code that's better but it also has some deficiencies. The length is overcounted first of all as it includes the header itself. It also fails to check the length before dereferencing the sa_family field. This patch fixes those problems in parse_sockaddr_pair and then uses it in parse_ipsecrequest. Reported-by: Andrey Konovalov <[email protected]> Signed-off-by: Herbert Xu <[email protected]> Signed-off-by: Steffen Klassert <[email protected]>
vol_print(netdissect_options *ndo, register const u_char *bp, int length) { int vol_op; if (length <= (int)sizeof(struct rx_header)) return; if (ndo->ndo_snapend - bp + 1 <= (int)(sizeof(struct rx_header) + sizeof(int32_t))) { goto trunc; } /* * Print out the afs call we're invoking. The table used here was * gleaned from volser/volint.xg */ vol_op = EXTRACT_32BITS(bp + sizeof(struct rx_header)); ND_PRINT((ndo, " vol call %s", tok2str(vol_req, "op#%d", vol_op))); bp += sizeof(struct rx_header) + 4; switch (vol_op) { case 100: /* Create volume */ ND_PRINT((ndo, " partition")); UINTOUT(); ND_PRINT((ndo, " name")); STROUT(AFSNAMEMAX); ND_PRINT((ndo, " type")); UINTOUT(); ND_PRINT((ndo, " parent")); UINTOUT(); break; case 101: /* Delete volume */ case 107: /* Get flags */ ND_PRINT((ndo, " trans")); UINTOUT(); break; case 102: /* Restore */ ND_PRINT((ndo, " totrans")); UINTOUT(); ND_PRINT((ndo, " flags")); UINTOUT(); break; case 103: /* Forward */ ND_PRINT((ndo, " fromtrans")); UINTOUT(); ND_PRINT((ndo, " fromdate")); DATEOUT(); DESTSERVEROUT(); ND_PRINT((ndo, " desttrans")); INTOUT(); break; case 104: /* End trans */ ND_PRINT((ndo, " trans")); UINTOUT(); break; case 105: /* Clone */ ND_PRINT((ndo, " trans")); UINTOUT(); ND_PRINT((ndo, " purgevol")); UINTOUT(); ND_PRINT((ndo, " newtype")); UINTOUT(); ND_PRINT((ndo, " newname")); STROUT(AFSNAMEMAX); break; case 106: /* Set flags */ ND_PRINT((ndo, " trans")); UINTOUT(); ND_PRINT((ndo, " flags")); UINTOUT(); break; case 108: /* Trans create */ ND_PRINT((ndo, " vol")); UINTOUT(); ND_PRINT((ndo, " partition")); UINTOUT(); ND_PRINT((ndo, " flags")); UINTOUT(); break; case 109: /* Dump */ case 655537: /* Get size */ ND_PRINT((ndo, " fromtrans")); UINTOUT(); ND_PRINT((ndo, " fromdate")); DATEOUT(); break; case 110: /* Get n-th volume */ ND_PRINT((ndo, " index")); UINTOUT(); break; case 111: /* Set forwarding */ ND_PRINT((ndo, " tid")); UINTOUT(); ND_PRINT((ndo, " newsite")); UINTOUT(); break; case 112: /* Get name */ case 113: /* Get status */ ND_PRINT((ndo, " tid")); break; case 114: /* Signal restore */ ND_PRINT((ndo, " name")); STROUT(AFSNAMEMAX); ND_PRINT((ndo, " type")); UINTOUT(); ND_PRINT((ndo, " pid")); UINTOUT(); ND_PRINT((ndo, " cloneid")); UINTOUT(); break; case 116: /* List volumes */ ND_PRINT((ndo, " partition")); UINTOUT(); ND_PRINT((ndo, " flags")); UINTOUT(); break; case 117: /* Set id types */ ND_PRINT((ndo, " tid")); UINTOUT(); ND_PRINT((ndo, " name")); STROUT(AFSNAMEMAX); ND_PRINT((ndo, " type")); UINTOUT(); ND_PRINT((ndo, " pid")); UINTOUT(); ND_PRINT((ndo, " clone")); UINTOUT(); ND_PRINT((ndo, " backup")); UINTOUT(); break; case 119: /* Partition info */ ND_PRINT((ndo, " name")); STROUT(AFSNAMEMAX); break; case 120: /* Reclone */ ND_PRINT((ndo, " tid")); UINTOUT(); break; case 121: /* List one volume */ case 122: /* Nuke volume */ case 124: /* Extended List volumes */ case 125: /* Extended List one volume */ case 65536: /* Convert RO to RW volume */ ND_PRINT((ndo, " partid")); UINTOUT(); ND_PRINT((ndo, " volid")); UINTOUT(); break; case 123: /* Set date */ ND_PRINT((ndo, " tid")); UINTOUT(); ND_PRINT((ndo, " date")); DATEOUT(); break; case 126: /* Set info */ ND_PRINT((ndo, " tid")); UINTOUT(); break; case 128: /* Forward multiple */ ND_PRINT((ndo, " fromtrans")); UINTOUT(); ND_PRINT((ndo, " fromdate")); DATEOUT(); { unsigned long i, j; ND_TCHECK2(bp[0], 4); j = EXTRACT_32BITS(bp); bp += sizeof(int32_t); for (i = 0; i < j; i++) { DESTSERVEROUT(); if (i != j - 1) ND_PRINT((ndo, ",")); } if (j == 0) ND_PRINT((ndo, " <none!>")); } break; case 65538: /* Dump version 2 */ ND_PRINT((ndo, " fromtrans")); UINTOUT(); ND_PRINT((ndo, " fromdate")); DATEOUT(); ND_PRINT((ndo, " flags")); UINTOUT(); break; default: ; } return; trunc: ND_PRINT((ndo, " [|vol]")); }
0
[ "CWE-125" ]
tcpdump
c24922e692a52121e853a84ead6b9337f4c08a94
302,540,800,248,122,500,000,000,000,000,000,000,000
196
(for 4.9.3) CVE-2018-14466/Rx: fix an over-read bug In rx_cache_insert() and rx_cache_find() properly read the serviceId field of the rx_header structure as a 16-bit integer. When those functions tried to read 32 bits the extra 16 bits could be outside of the bounds checked in rx_print() for the rx_header structure, as serviceId is the last field in that structure. This fixes a buffer over-read discovered by Bhargava Shastry, SecT/TU Berlin. Add a test using the capture file supplied by the reporter(s).
Mat_PrintNumber(enum matio_types type, void *data) { switch ( type ) { case MAT_T_DOUBLE: printf("%g",*(double*)data); break; case MAT_T_SINGLE: printf("%g",*(float*)data); break; #ifdef HAVE_MAT_INT64_T case MAT_T_INT64: #if HAVE_INTTYPES_H printf("%" PRIi64,*(mat_int64_t*)data); #elif defined(_MSC_VER) && _MSC_VER >= 1200 printf("%I64i",*(mat_int64_t*)data); #elif defined(HAVE_LONG_LONG_INT) printf("%lld",(long long)(*(mat_int64_t*)data)); #else printf("%ld",(long)(*(mat_int64_t*)data)); #endif break; #endif #ifdef HAVE_MAT_UINT64_T case MAT_T_UINT64: #if HAVE_INTTYPES_H printf("%" PRIu64,*(mat_uint64_t*)data); #elif defined(_MSC_VER) && _MSC_VER >= 1200 printf("%I64u",*(mat_uint64_t*)data); #elif defined(HAVE_UNSIGNED_LONG_LONG_INT) printf("%llu",(unsigned long long)(*(mat_uint64_t*)data)); #else printf("%lu",(unsigned long)(*(mat_uint64_t*)data)); #endif break; #endif case MAT_T_INT32: printf("%d",*(mat_int32_t*)data); break; case MAT_T_UINT32: printf("%u",*(mat_uint32_t*)data); break; case MAT_T_INT16: printf("%hd",*(mat_int16_t*)data); break; case MAT_T_UINT16: printf("%hu",*(mat_uint16_t*)data); break; case MAT_T_INT8: printf("%hhd",*(mat_int8_t*)data); break; case MAT_T_UINT8: printf("%hhu",*(mat_uint8_t*)data); break; default: break; } }
0
[ "CWE-401" ]
matio
a47b7cd3aca70e9a0bddf8146eb4ab0cbd19c2c3
312,635,775,520,514,640,000,000,000,000,000,000,000
57
Fix memory leak As reported by https://github.com/tbeu/matio/issues/131
static void iucv_sock_kill(struct sock *sk) { if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket) return; iucv_sock_unlink(&iucv_sk_list, sk); sock_set_flag(sk, SOCK_DEAD); sock_put(sk); }
0
[ "CWE-20", "CWE-269" ]
linux
f3d3342602f8bcbf37d7c46641cb9bca7618eb1c
285,768,480,939,036,670,000,000,000,000,000,000,000
9
net: rework recvmsg handler msg_name and msg_namelen logic This patch now always passes msg->msg_namelen as 0. recvmsg handlers must set msg_namelen to the proper size <= sizeof(struct sockaddr_storage) to return msg_name to the user. This prevents numerous uninitialized memory leaks we had in the recvmsg handlers and makes it harder for new code to accidentally leak uninitialized memory. Optimize for the case recvfrom is called with NULL as address. We don't need to copy the address at all, so set it to NULL before invoking the recvmsg handler. We can do so, because all the recvmsg handlers must cope with the case a plain read() is called on them. read() also sets msg_name to NULL. Also document these changes in include/linux/net.h as suggested by David Miller. Changes since RFC: Set msg->msg_name = NULL if user specified a NULL in msg_name but had a non-null msg_namelen in verify_iovec/verify_compat_iovec. This doesn't affect sendto as it would bail out earlier while trying to copy-in the address. It also more naturally reflects the logic by the callers of verify_iovec. With this change in place I could remove " if (!uaddr || msg_sys->msg_namelen == 0) msg->msg_name = NULL ". This change does not alter the user visible error logic as we ignore msg_namelen as long as msg_name is NULL. Also remove two unnecessary curly brackets in ___sys_recvmsg and change comments to netdev style. Cc: David Miller <[email protected]> Suggested-by: Eric Dumazet <[email protected]> Signed-off-by: Hannes Frederic Sowa <[email protected]> Signed-off-by: David S. Miller <[email protected]>
#ifdef CONFIG_OF_NET static int of_dev_node_match(struct device *dev, const void *data) { int ret = 0; if (dev->parent) ret = dev->parent->of_node == data; return ret == 0 ? dev->of_node == data : ret;
0
[ "CWE-401" ]
linux
895a5e96dbd6386c8e78e5b78e067dcc67b7f0ab
174,958,355,182,136,830,000,000,000,000,000,000,000
9
net-sysfs: Fix mem leak in netdev_register_kobject syzkaller report this: BUG: memory leak unreferenced object 0xffff88837a71a500 (size 256): comm "syz-executor.2", pid 9770, jiffies 4297825125 (age 17.843s) hex dump (first 32 bytes): 00 00 00 00 ad 4e ad de ff ff ff ff 00 00 00 00 .....N.......... ff ff ff ff ff ff ff ff 20 c0 ef 86 ff ff ff ff ........ ....... backtrace: [<00000000db12624b>] netdev_register_kobject+0x124/0x2e0 net/core/net-sysfs.c:1751 [<00000000dc49a994>] register_netdevice+0xcc1/0x1270 net/core/dev.c:8516 [<00000000e5f3fea0>] tun_set_iff drivers/net/tun.c:2649 [inline] [<00000000e5f3fea0>] __tun_chr_ioctl+0x2218/0x3d20 drivers/net/tun.c:2883 [<000000001b8ac127>] vfs_ioctl fs/ioctl.c:46 [inline] [<000000001b8ac127>] do_vfs_ioctl+0x1a5/0x10e0 fs/ioctl.c:690 [<0000000079b269f8>] ksys_ioctl+0x89/0xa0 fs/ioctl.c:705 [<00000000de649beb>] __do_sys_ioctl fs/ioctl.c:712 [inline] [<00000000de649beb>] __se_sys_ioctl fs/ioctl.c:710 [inline] [<00000000de649beb>] __x64_sys_ioctl+0x74/0xb0 fs/ioctl.c:710 [<000000007ebded1e>] do_syscall_64+0xc8/0x580 arch/x86/entry/common.c:290 [<00000000db315d36>] entry_SYSCALL_64_after_hwframe+0x49/0xbe [<00000000115be9bb>] 0xffffffffffffffff It should call kset_unregister to free 'dev->queues_kset' in error path of register_queue_kobjects, otherwise will cause a mem leak. Reported-by: Hulk Robot <[email protected]> Fixes: 1d24eb4815d1 ("xps: Transmit Packet Steering") Signed-off-by: YueHaibing <[email protected]> Signed-off-by: David S. Miller <[email protected]>
bool WebContents::SendIPCMessageToFrame(bool internal, bool send_to_all, int32_t frame_id, const std::string& channel, v8::Local<v8::Value> args) { blink::CloneableMessage message; if (!gin::ConvertFromV8(isolate(), args, &message)) { isolate()->ThrowException(v8::Exception::Error( gin::StringToV8(isolate(), "Failed to serialize arguments"))); return false; } auto frames = web_contents()->GetAllFrames(); auto iter = std::find_if(frames.begin(), frames.end(), [frame_id](auto* f) { return f->GetRoutingID() == frame_id; }); if (iter == frames.end()) return false; if (!(*iter)->IsRenderFrameLive()) return false; mojo::AssociatedRemote<mojom::ElectronRenderer> electron_renderer; (*iter)->GetRemoteAssociatedInterfaces()->GetInterface(&electron_renderer); electron_renderer->Message(internal, send_to_all, channel, std::move(message), 0 /* sender_id */); return true; }
0
[ "CWE-284", "CWE-693" ]
electron
18613925610ba319da7f497b6deed85ad712c59b
227,033,684,529,676,100,000,000,000,000,000,000,000
26
refactor: wire will-navigate up to a navigation throttle instead of OpenURL (#25108) * refactor: wire will-navigate up to a navigation throttle instead of OpenURL (#25065) * refactor: wire will-navigate up to a navigation throttle instead of OpenURL * spec: add test for x-site _top navigation * chore: old code be old
int snd_seq_queue_alloc(int client, int locked, unsigned int info_flags) { struct snd_seq_queue *q; q = queue_new(client, locked); if (q == NULL) return -ENOMEM; q->info_flags = info_flags; if (queue_list_add(q) < 0) { queue_delete(q); return -ENOMEM; } snd_seq_queue_use(q->queue, client, 1); /* use this queue */ return q->queue; }
0
[ "CWE-362" ]
linux
3567eb6af614dac436c4b16a8d426f9faed639b3
147,462,164,742,929,540,000,000,000,000,000,000,000
15
ALSA: seq: Fix race at timer setup and close ALSA sequencer code has an open race between the timer setup ioctl and the close of the client. This was triggered by syzkaller fuzzer, and a use-after-free was caught there as a result. This patch papers over it by adding a proper queue->timer_mutex lock around the timer-related calls in the relevant code path. Reported-by: Dmitry Vyukov <[email protected]> Tested-by: Dmitry Vyukov <[email protected]> Cc: <[email protected]> Signed-off-by: Takashi Iwai <[email protected]>
DEFUN (clear_ip_bgp_peer_vpnv4_soft, clear_ip_bgp_peer_vpnv4_soft_cmd, "clear ip bgp A.B.C.D vpnv4 unicast soft", CLEAR_STR IP_STR BGP_STR "BGP neighbor address to clear\n" "Address family\n" "Address Family Modifier\n" "Soft reconfig\n") { return bgp_clear_vty (vty, NULL, AFI_IP, SAFI_MPLS_VPN, clear_peer, BGP_CLEAR_SOFT_BOTH, argv[0]); }
0
[ "CWE-125" ]
frr
6d58272b4cf96f0daa846210dd2104877900f921
261,760,078,787,053,940,000,000,000,000,000,000,000
14
[bgpd] cleanup, compact and consolidate capability parsing code 2007-07-26 Paul Jakma <[email protected]> * (general) Clean up and compact capability parsing slightly. Consolidate validation of length and logging of generic TLV, and memcpy of capability data, thus removing such from cap specifc code (not always present or correct). * bgp_open.h: Add structures for the generic capability TLV header and for the data formats of the various specific capabilities we support. Hence remove the badly named, or else misdefined, struct capability. * bgp_open.c: (bgp_capability_vty_out) Use struct capability_mp_data. Do the length checks *before* memcpy()'ing based on that length (stored capability - should have been validated anyway on input, but..). (bgp_afi_safi_valid_indices) new function to validate (afi,safi) which is about to be used as index into arrays, consolidates several instances of same, at least one of which appeared to be incomplete.. (bgp_capability_mp) Much condensed. (bgp_capability_orf_entry) New, process one ORF entry (bgp_capability_orf) Condensed. Fixed to process all ORF entries. (bgp_capability_restart) Condensed, and fixed to use a cap-specific type, rather than abusing capability_mp. (struct message capcode_str) added to aid generic logging. (size_t cap_minsizes[]) added to aid generic validation of capability length field. (bgp_capability_parse) Generic logging and validation of TLV consolidated here. Code compacted as much as possible. * bgp_packet.c: (bgp_open_receive) Capability parsers now use streams, so no more need here to manually fudge the input stream getp. (bgp_capability_msg_parse) use struct capability_mp_data. Validate lengths /before/ memcpy. Use bgp_afi_safi_valid_indices. (bgp_capability_receive) Exported for use by test harness. * bgp_vty.c: (bgp_show_summary) fix conversion warning (bgp_show_peer) ditto * bgp_debug.h: Fix storage 'extern' after type 'const'. * lib/log.c: (mes_lookup) warning about code not being in same-number array slot should be debug, not warning. E.g. BGP has several discontigious number spaces, allocating from different parts of a space is not uncommon (e.g. IANA assigned versus vendor-assigned code points in some number space).
xmlXIncludeErr(xmlXIncludeCtxtPtr ctxt, xmlNodePtr node, int error, const char *msg, const xmlChar *extra) { if (ctxt != NULL) ctxt->nbErrors++; __xmlRaiseError(NULL, NULL, NULL, ctxt, node, XML_FROM_XINCLUDE, error, XML_ERR_ERROR, NULL, 0, (const char *) extra, NULL, NULL, 0, 0, msg, (const char *) extra); }
0
[ "CWE-416" ]
libxml2
1098c30a040e72a4654968547f415be4e4c40fe7
44,096,514,450,161,370,000,000,000,000,000,000,000
10
Fix user-after-free with `xmllint --xinclude --dropdtd` The --dropdtd option can leave dangling pointers in entity reference nodes. Make sure to skip these nodes when processing XIncludes. This also avoids scanning entity declarations and even modifying them inadvertently during XInclude processing. Move from a block list to an allow list approach to avoid descending into other node types that can't contain elements. Fixes #237.
static void create_certinfo(struct curl_certinfo *ci, zval *listcode TSRMLS_DC) { int i; if(ci) { zval *certhash = NULL; for(i=0; i<ci->num_of_certs; i++) { struct curl_slist *slist; MAKE_STD_ZVAL(certhash); array_init(certhash); for(slist = ci->certinfo[i]; slist; slist = slist->next) { int len; char s[64]; char *tmp; strncpy(s, slist->data, 64); tmp = memchr(s, ':', 64); if(tmp) { *tmp = '\0'; len = strlen(s); if(!strcmp(s, "Subject") || !strcmp(s, "Issuer")) { zval *hash; MAKE_STD_ZVAL(hash); array_init(hash); split_certinfo(&slist->data[len+1], hash); add_assoc_zval(certhash, s, hash); } else { add_assoc_string(certhash, s, &slist->data[len+1], 1); } } else { php_error_docref(NULL TSRMLS_CC, E_WARNING, "Could not extract hash key from certificate info"); } } add_next_index_zval(listcode, certhash); } } }
0
[]
php-src
0ea75af9be8a40836951fc89f723dd5390b8b46f
327,770,592,671,061,100,000,000,000,000,000,000,000
40
Fixed bug #69316 (Use-after-free in php_curl related to CURLOPT_FILE/_INFILE/_WRITEHEADER)
ATTypedTableRecursion(List **wqueue, Relation rel, AlterTableCmd *cmd, LOCKMODE lockmode) { ListCell *child; List *children; Assert(rel->rd_rel->relkind == RELKIND_COMPOSITE_TYPE); children = find_typed_table_dependencies(rel->rd_rel->reltype, RelationGetRelationName(rel), cmd->behavior); foreach(child, children) { Oid childrelid = lfirst_oid(child); Relation childrel; childrel = relation_open(childrelid, lockmode); CheckTableNotInUse(childrel, "ALTER TABLE"); ATPrepCmd(wqueue, childrel, cmd, true, true, lockmode); relation_close(childrel, NoLock); } }
0
[ "CWE-362" ]
postgres
5f173040e324f6c2eebb90d86cf1b0cdb5890f0a
111,328,673,523,312,220,000,000,000,000,000,000,000
23
Avoid repeated name lookups during table and index DDL. If the name lookups come to different conclusions due to concurrent activity, we might perform some parts of the DDL on a different table than other parts. At least in the case of CREATE INDEX, this can be used to cause the permissions checks to be performed against a different table than the index creation, allowing for a privilege escalation attack. This changes the calling convention for DefineIndex, CreateTrigger, transformIndexStmt, transformAlterTableStmt, CheckIndexCompatible (in 9.2 and newer), and AlterTable (in 9.1 and older). In addition, CheckRelationOwnership is removed in 9.2 and newer and the calling convention is changed in older branches. A field has also been added to the Constraint node (FkConstraint in 8.4). Third-party code calling these functions or using the Constraint node will require updating. Report by Andres Freund. Patch by Robert Haas and Andres Freund, reviewed by Tom Lane. Security: CVE-2014-0062
*/ int skb_append_datato_frags(struct sock *sk, struct sk_buff *skb, int (*getfrag)(void *from, char *to, int offset, int len, int odd, struct sk_buff *skb), void *from, int length) { int frg_cnt = skb_shinfo(skb)->nr_frags; int copy; int offset = 0; int ret; struct page_frag *pfrag = &current->task_frag; do { /* Return error if we don't have space for new frag */ if (frg_cnt >= MAX_SKB_FRAGS) return -EMSGSIZE; if (!sk_page_frag_refill(sk, pfrag)) return -ENOMEM; /* copy the user data to page */ copy = min_t(int, length, pfrag->size - pfrag->offset); ret = getfrag(from, page_address(pfrag->page) + pfrag->offset, offset, copy, 0, skb); if (ret < 0) return -EFAULT; /* copy was successful so update the size parameters */ skb_fill_page_desc(skb, frg_cnt, pfrag->page, pfrag->offset, copy); frg_cnt++; pfrag->offset += copy; get_page(pfrag->page); skb->truesize += copy; atomic_add(copy, &sk->sk_wmem_alloc); skb->len += copy; skb->data_len += copy; offset += copy; length -= copy; } while (length > 0); return 0;
0
[ "CWE-703", "CWE-125" ]
linux
8605330aac5a5785630aec8f64378a54891937cc
40,780,009,290,012,727,000,000,000,000,000,000,000
45
tcp: fix SCM_TIMESTAMPING_OPT_STATS for normal skbs __sock_recv_timestamp can be called for both normal skbs (for receive timestamps) and for skbs on the error queue (for transmit timestamps). Commit 1c885808e456 (tcp: SOF_TIMESTAMPING_OPT_STATS option for SO_TIMESTAMPING) assumes any skb passed to __sock_recv_timestamp are from the error queue, containing OPT_STATS in the content of the skb. This results in accessing invalid memory or generating junk data. To fix this, set skb->pkt_type to PACKET_OUTGOING for packets on the error queue. This is safe because on the receive path on local sockets skb->pkt_type is never set to PACKET_OUTGOING. With that, copy OPT_STATS from a packet, only if its pkt_type is PACKET_OUTGOING. Fixes: 1c885808e456 ("tcp: SOF_TIMESTAMPING_OPT_STATS option for SO_TIMESTAMPING") Reported-by: JongHwan Kim <[email protected]> Signed-off-by: Soheil Hassas Yeganeh <[email protected]> Signed-off-by: Eric Dumazet <[email protected]> Signed-off-by: Willem de Bruijn <[email protected]> Signed-off-by: David S. Miller <[email protected]>
flow_unwildcard_tp_ports(const struct flow *flow, struct flow_wildcards *wc) { if (flow->nw_proto != IPPROTO_ICMP) { memset(&wc->masks.tp_src, 0xff, sizeof wc->masks.tp_src); memset(&wc->masks.tp_dst, 0xff, sizeof wc->masks.tp_dst); } else { wc->masks.tp_src = htons(0xff); wc->masks.tp_dst = htons(0xff); } }
0
[ "CWE-400" ]
ovs
79349cbab0b2a755140eedb91833ad2760520a83
296,946,637,387,549,100,000,000,000,000,000,000,000
10
flow: Support extra padding length. Although not required, padding can be optionally added until the packet length is MTU bytes. A packet with extra padding currently fails sanity checks. Vulnerability: CVE-2020-35498 Fixes: fa8d9001a624 ("miniflow_extract: Properly handle small IP packets.") Reported-by: Joakim Hindersson <[email protected]> Acked-by: Ilya Maximets <[email protected]> Signed-off-by: Flavio Leitner <[email protected]> Signed-off-by: Ilya Maximets <[email protected]>
static int __init init_hw_perf_events(void) { u64 dfr = read_cpuid(ID_AA64DFR0_EL1); switch ((dfr >> 8) & 0xf) { case 0x1: /* PMUv3 */ cpu_pmu = armv8_pmuv3_pmu_init(); break; } if (cpu_pmu) { pr_info("enabled with %s PMU driver, %d counters available\n", cpu_pmu->name, cpu_pmu->num_events); cpu_pmu_init(cpu_pmu); armpmu_register(cpu_pmu, "cpu", PERF_TYPE_RAW); } else { pr_info("no hardware support available\n"); } return 0; }
0
[ "CWE-284", "CWE-264" ]
linux
8fff105e13041e49b82f92eef034f363a6b1c071
294,034,899,134,002,670,000,000,000,000,000,000,000
21
arm64: perf: reject groups spanning multiple HW PMUs The perf core implicitly rejects events spanning multiple HW PMUs, as in these cases the event->ctx will differ. However this validation is performed after pmu::event_init() is called in perf_init_event(), and thus pmu::event_init() may be called with a group leader from a different HW PMU. The ARM64 PMU driver does not take this fact into account, and when validating groups assumes that it can call to_arm_pmu(event->pmu) for any HW event. When the event in question is from another HW PMU this is wrong, and results in dereferencing garbage. This patch updates the ARM64 PMU driver to first test for and reject events from other PMUs, moving the to_arm_pmu and related logic after this test. Fixes a crash triggered by perf_fuzzer on Linux-4.0-rc2, with a CCI PMU present: Bad mode in Synchronous Abort handler detected, code 0x86000006 -- IABT (current EL) CPU: 0 PID: 1371 Comm: perf_fuzzer Not tainted 3.19.0+ #249 Hardware name: V2F-1XV7 Cortex-A53x2 SMM (DT) task: ffffffc07c73a280 ti: ffffffc07b0a0000 task.ti: ffffffc07b0a0000 PC is at 0x0 LR is at validate_event+0x90/0xa8 pc : [<0000000000000000>] lr : [<ffffffc000090228>] pstate: 00000145 sp : ffffffc07b0a3ba0 [< (null)>] (null) [<ffffffc0000907d8>] armpmu_event_init+0x174/0x3cc [<ffffffc00015d870>] perf_try_init_event+0x34/0x70 [<ffffffc000164094>] perf_init_event+0xe0/0x10c [<ffffffc000164348>] perf_event_alloc+0x288/0x358 [<ffffffc000164c5c>] SyS_perf_event_open+0x464/0x98c Code: bad PC value Also cleans up the code to use the arm_pmu only when we know that we are dealing with an arm pmu event. Cc: Will Deacon <[email protected]> Acked-by: Mark Rutland <[email protected]> Acked-by: Peter Ziljstra (Intel) <[email protected]> Signed-off-by: Suzuki K. Poulose <[email protected]> Signed-off-by: Will Deacon <[email protected]>
static void runPcapLoop(u_int16_t thread_id) { if((!shutdown_app) && (ndpi_thread_info[thread_id].workflow->pcap_handle != NULL)) if(pcap_loop(ndpi_thread_info[thread_id].workflow->pcap_handle, -1, &ndpi_process_packet, (u_char*)&thread_id) < 0) printf("Error while reading pcap file: '%s'\n", pcap_geterr(ndpi_thread_info[thread_id].workflow->pcap_handle)); }
0
[ "CWE-125" ]
nDPI
b7e666e465f138ae48ab81976726e67deed12701
204,961,100,344,487,930,000,000,000,000,000,000,000
5
Added fix to avoid potential heap buffer overflow in H.323 dissector Modified HTTP report information to make it closer to the HTTP field names
int _modbus_receive_msg(modbus_t *ctx, uint8_t *msg, msg_type_t msg_type) { int rc; fd_set rset; struct timeval tv; struct timeval *p_tv; int length_to_read; int msg_length = 0; _step_t step; if (ctx->debug) { if (msg_type == MSG_INDICATION) { printf("Waiting for an indication...\n"); } else { printf("Waiting for a confirmation...\n"); } } /* Add a file descriptor to the set */ FD_ZERO(&rset); FD_SET(ctx->s, &rset); /* We need to analyse the message step by step. At the first step, we want * to reach the function code because all packets contain this * information. */ step = _STEP_FUNCTION; length_to_read = ctx->backend->header_length + 1; if (msg_type == MSG_INDICATION) { /* Wait for a message, we don't know when the message will be * received */ if (ctx->indication_timeout.tv_sec == 0 && ctx->indication_timeout.tv_usec == 0) { /* By default, the indication timeout isn't set */ p_tv = NULL; } else { /* Wait for an indication (name of a received request by a server, see schema) */ tv.tv_sec = ctx->indication_timeout.tv_sec; tv.tv_usec = ctx->indication_timeout.tv_usec; p_tv = &tv; } } else { tv.tv_sec = ctx->response_timeout.tv_sec; tv.tv_usec = ctx->response_timeout.tv_usec; p_tv = &tv; } while (length_to_read != 0) { rc = ctx->backend->select(ctx, &rset, p_tv, length_to_read); if (rc == -1) { _error_print(ctx, "select"); if (ctx->error_recovery & MODBUS_ERROR_RECOVERY_LINK) { int saved_errno = errno; if (errno == ETIMEDOUT) { _sleep_response_timeout(ctx); modbus_flush(ctx); } else if (errno == EBADF) { modbus_close(ctx); modbus_connect(ctx); } errno = saved_errno; } return -1; } rc = ctx->backend->recv(ctx, msg + msg_length, length_to_read); if (rc == 0) { errno = ECONNRESET; rc = -1; } if (rc == -1) { _error_print(ctx, "read"); if ((ctx->error_recovery & MODBUS_ERROR_RECOVERY_LINK) && (errno == ECONNRESET || errno == ECONNREFUSED || errno == EBADF)) { int saved_errno = errno; modbus_close(ctx); modbus_connect(ctx); /* Could be removed by previous calls */ errno = saved_errno; } return -1; } /* Display the hex code of each character received */ if (ctx->debug) { int i; for (i=0; i < rc; i++) printf("<%.2X>", msg[msg_length + i]); } /* Sums bytes received */ msg_length += rc; /* Computes remaining bytes */ length_to_read -= rc; if (length_to_read == 0) { switch (step) { case _STEP_FUNCTION: /* Function code position */ length_to_read = compute_meta_length_after_function( msg[ctx->backend->header_length], msg_type); if (length_to_read != 0) { step = _STEP_META; break; } /* else switches straight to the next step */ case _STEP_META: length_to_read = compute_data_length_after_meta( ctx, msg, msg_type); if ((msg_length + length_to_read) > (int)ctx->backend->max_adu_length) { errno = EMBBADDATA; _error_print(ctx, "too many data"); return -1; } step = _STEP_DATA; break; default: break; } } if (length_to_read > 0 && (ctx->byte_timeout.tv_sec > 0 || ctx->byte_timeout.tv_usec > 0)) { /* If there is no character in the buffer, the allowed timeout interval between two consecutive bytes is defined by byte_timeout */ tv.tv_sec = ctx->byte_timeout.tv_sec; tv.tv_usec = ctx->byte_timeout.tv_usec; p_tv = &tv; } /* else timeout isn't set again, the full response must be read before expiration of response timeout (for CONFIRMATION only) */ } if (ctx->debug) printf("\n"); return ctx->backend->check_integrity(ctx, msg, msg_length); }
0
[ "CWE-125" ]
libmodbus
5ccdf5ef79d742640355d1132fa9e2abc7fbaefc
137,090,617,354,726,460,000,000,000,000,000,000,000
141
Fix VD-1301 and VD-1302 vulnerabilities This patch was contributed by Maor Vermucht and Or Peles from VDOO Connected Trust.
static void ctcp_msg_dcc_chat(IRC_SERVER_REC *server, const char *data, const char *nick, const char *addr, const char *target, CHAT_DCC_REC *chat) { CHAT_DCC_REC *dcc; char **params; int paramcount; int passive, autoallow = FALSE; /* CHAT <unused> <address> <port> */ /* CHAT <unused> <address> 0 <id> (DCC CHAT passive protocol) */ params = g_strsplit(data, " ", -1); paramcount = g_strv_length(params); if (paramcount < 3) { g_strfreev(params); return; } passive = paramcount == 4 && g_strcmp0(params[2], "0") == 0; dcc = DCC_CHAT(dcc_find_request(DCC_CHAT_TYPE, nick, NULL)); if (dcc != NULL) { if (dcc_is_listening(dcc)) { /* we requested dcc chat, they requested dcc chat from us .. allow it. */ dcc_destroy(DCC(dcc)); autoallow = TRUE; } else if (!dcc_is_passive(dcc)) { /* we already have one dcc chat request from this nick, remove it. */ dcc_destroy(DCC(dcc)); } else if (passive) { if (dcc->pasv_id != atoi(params[3])) { /* IDs don't match! */ dcc_destroy(DCC(dcc)); } else { /* IDs are ok! Update address and port and connect! */ dcc->target = g_strdup(target); dcc->port = atoi(params[2]); dcc_str2ip(params[1], &dcc->addr); net_ip2host(&dcc->addr, dcc->addrstr); dcc_chat_connect(dcc); g_strfreev(params); return; } } } dcc = dcc_chat_create(server, chat, nick, params[0]); dcc->target = g_strdup(target); dcc->port = atoi(params[2]); if (passive) dcc->pasv_id = atoi(params[3]); dcc_str2ip(params[1], &dcc->addr); net_ip2host(&dcc->addr, dcc->addrstr); signal_emit("dcc request", 2, dcc, addr); if (autoallow || DCC_CHAT_AUTOACCEPT(dcc, server, nick, addr)) { if (passive) { /* Passive DCC... let's set up a listening socket and send reply back */ dcc_chat_passive(dcc); } else { dcc_chat_connect(dcc); } } g_strfreev(params); }
1
[ "CWE-416" ]
irssi
43e44d553d44e313003cee87e6ea5e24d68b84a1
270,273,807,209,694,340,000,000,000,000,000,000,000
73
Merge branch 'security' into 'master' Security Closes GL#12, GL#13, GL#14, GL#15, GL#16 See merge request irssi/irssi!23
proto_tree_add_bitmask_list(proto_tree *tree, tvbuff_t *tvb, const guint offset, const int len, int * const *fields, const guint encoding) { guint64 value; if (tree) { value = get_uint64_value(tree, tvb, offset, len, encoding); proto_item_add_bitmask_tree(NULL, tvb, offset, len, -1, fields, BMT_NO_APPEND, FALSE, TRUE, tree, value); } }
0
[ "CWE-401" ]
wireshark
a9fc769d7bb4b491efb61c699d57c9f35269d871
129,199,440,931,788,640,000,000,000,000,000,000,000
11
epan: Fix a memory leak. Make sure _proto_tree_add_bits_ret_val allocates a bits array using the packet scope, otherwise we leak memory. Fixes #17032.
bool checkInboundFrameLimits() override { return true; }
0
[ "CWE-400" ]
envoy
0e49a495826ea9e29134c1bd54fdeb31a034f40c
173,282,190,688,612,380,000,000,000,000,000,000,000
1
http/2: add stats and stream flush timeout (#139) This commit adds a new stream flush timeout to guard against a remote server that does not open window once an entire stream has been buffered for flushing. Additional stats have also been added to better understand the codecs view of active streams as well as amount of data buffered. Signed-off-by: Matt Klein <[email protected]>
static int raw_send_hdrinc(struct sock *sk, void *from, size_t length, struct rtable **rtp, unsigned int flags) { struct inet_sock *inet = inet_sk(sk); struct net *net = sock_net(sk); struct iphdr *iph; struct sk_buff *skb; unsigned int iphlen; int err; struct rtable *rt = *rtp; if (length > rt->dst.dev->mtu) { ip_local_error(sk, EMSGSIZE, rt->rt_dst, inet->inet_dport, rt->dst.dev->mtu); return -EMSGSIZE; } if (flags&MSG_PROBE) goto out; skb = sock_alloc_send_skb(sk, length + LL_ALLOCATED_SPACE(rt->dst.dev) + 15, flags & MSG_DONTWAIT, &err); if (skb == NULL) goto error; skb_reserve(skb, LL_RESERVED_SPACE(rt->dst.dev)); skb->priority = sk->sk_priority; skb->mark = sk->sk_mark; skb_dst_set(skb, &rt->dst); *rtp = NULL; skb_reset_network_header(skb); iph = ip_hdr(skb); skb_put(skb, length); skb->ip_summed = CHECKSUM_NONE; skb->transport_header = skb->network_header; err = -EFAULT; if (memcpy_fromiovecend((void *)iph, from, 0, length)) goto error_free; iphlen = iph->ihl * 4; /* * We don't want to modify the ip header, but we do need to * be sure that it won't cause problems later along the network * stack. Specifically we want to make sure that iph->ihl is a * sane value. If ihl points beyond the length of the buffer passed * in, reject the frame as invalid */ err = -EINVAL; if (iphlen > length) goto error_free; if (iphlen >= sizeof(*iph)) { if (!iph->saddr) iph->saddr = rt->rt_src; iph->check = 0; iph->tot_len = htons(length); if (!iph->id) ip_select_ident(iph, &rt->dst, NULL); iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl); } if (iph->protocol == IPPROTO_ICMP) icmp_out_count(net, ((struct icmphdr *) skb_transport_header(skb))->type); err = NF_HOOK(NFPROTO_IPV4, NF_INET_LOCAL_OUT, skb, NULL, rt->dst.dev, dst_output); if (err > 0) err = net_xmit_errno(err); if (err) goto error; out: return 0; error_free: kfree_skb(skb); error: IP_INC_STATS(net, IPSTATS_MIB_OUTDISCARDS); if (err == -ENOBUFS && !inet->recverr) err = 0; return err; }
0
[ "CWE-362" ]
linux-2.6
f6d8bd051c391c1c0458a30b2a7abcd939329259
126,397,512,742,644,180,000,000,000,000,000,000,000
87
inet: add RCU protection to inet->opt We lack proper synchronization to manipulate inet->opt ip_options Problem is ip_make_skb() calls ip_setup_cork() and ip_setup_cork() possibly makes a copy of ipc->opt (struct ip_options), without any protection against another thread manipulating inet->opt. Another thread can change inet->opt pointer and free old one under us. Use RCU to protect inet->opt (changed to inet->inet_opt). Instead of handling atomic refcounts, just copy ip_options when necessary, to avoid cache line dirtying. We cant insert an rcu_head in struct ip_options since its included in skb->cb[], so this patch is large because I had to introduce a new ip_options_rcu structure. Signed-off-by: Eric Dumazet <[email protected]> Cc: Herbert Xu <[email protected]> Signed-off-by: David S. Miller <[email protected]>
static inline void ModulateLCHab(const double percent_luma, const double percent_chroma,const double percent_hue,double *red, double *green,double *blue) { double hue, luma, chroma; /* Increase or decrease color luma, chroma, or hue. */ ConvertRGBToLCHab(*red,*green,*blue,&luma,&chroma,&hue); luma*=0.01*percent_luma; chroma*=0.01*percent_chroma; hue+=fmod((percent_hue-100.0),200.0)/200.0; ConvertLCHabToRGB(luma,chroma,hue,red,green,blue); }
0
[ "CWE-401", "CWE-119", "CWE-787" ]
ImageMagick
1cc6f0ccc92c20c7cab6c4a7335daf29c91f0d8e
229,342,055,710,180,000,000,000,000,000,000,000,000
18
https://github.com/ImageMagick/ImageMagick/issues/663 https://github.com/ImageMagick/ImageMagick/issues/655
static void complete_format_list(THEME_SEARCH_REC *rec, const char *key, GList **list) { FORMAT_REC *formats; int n, len; formats = g_hash_table_lookup(default_formats, rec->name); len = strlen(key); for (n = 1; formats[n].def != NULL; n++) { const char *item = formats[n].tag; if (item != NULL && g_ascii_strncasecmp(item, key, len) == 0) *list = g_list_append(*list, g_strdup(item)); } }
0
[ "CWE-416" ]
irssi
43e44d553d44e313003cee87e6ea5e24d68b84a1
57,606,392,263,691,610,000,000,000,000,000,000,000
15
Merge branch 'security' into 'master' Security Closes GL#12, GL#13, GL#14, GL#15, GL#16 See merge request irssi/irssi!23
MagickExport size_t ConcatenateMagickString(char *destination, const char *source,const size_t length) { register char *q; register const char *p; register size_t i; size_t count; assert(destination != (char *) NULL); assert(source != (const char *) NULL); assert(length >= 1); p=source; q=destination; i=length; while ((i-- != 0) && (*q != '\0')) q++; count=(size_t) (q-destination); i=length-count; if (i == 0) return(count+strlen(p)); while (*p != '\0') { if (i != 1) { *q++=(*p); i--; } p++; } *q='\0'; return(count+(p-source)); }
0
[ "CWE-190" ]
ImageMagick
be90a5395695f0d19479a5d46b06c678be7f7927
185,848,109,702,951,320,000,000,000,000,000,000,000
39
https://github.com/ImageMagick/ImageMagick/issues/1721
vte_sequence_handler_normal_keypad (VteTerminal *terminal, GValueArray *params) { _vte_debug_print(VTE_DEBUG_KEYBOARD, "Leaving application keypad mode.\n"); terminal->pvt->keypad_mode = VTE_KEYMODE_NORMAL; }
0
[]
vte
58bc3a942f198a1a8788553ca72c19d7c1702b74
153,926,881,585,871,470,000,000,000,000,000,000,000
6
fix bug #548272 svn path=/trunk/; revision=2365
size_t HTTPSession::sendCertificateRequest( std::unique_ptr<folly::IOBuf> certificateRequestContext, std::vector<fizz::Extension> extensions) { // Check if both sending and receiving peer have advertised valid // SETTINGS_HTTP_CERT_AUTH setting. Otherwise, the frames for secondary // authentication should not be sent. auto ingressSettings = codec_->getIngressSettings(); auto egressSettings = codec_->getEgressSettings(); if (ingressSettings && egressSettings) { if (ingressSettings->getSetting(SettingsId::SETTINGS_HTTP_CERT_AUTH, 0) == 0 || egressSettings->getSetting(SettingsId::SETTINGS_HTTP_CERT_AUTH, 0) == 0) { VLOG(4) << "Secondary certificate authentication is not supported."; return 0; } } auto authRequest = secondAuthManager_->createAuthRequest( std::move(certificateRequestContext), std::move(extensions)); auto encodedSize = codec_->generateCertificateRequest( writeBuf_, authRequest.first, std::move(authRequest.second)); if (encodedSize > 0) { scheduleWrite(); } else { VLOG(4) << "Failed to generate CERTIFICATE_REQUEST frame."; } return encodedSize; }
0
[ "CWE-20" ]
proxygen
0600ebe59c3e82cd012def77ca9ca1918da74a71
93,771,738,606,624,570,000,000,000,000,000,000,000
28
Check that a secondary auth manager is set before dereferencing. Summary: CVE-2018-6343 Reviewed By: mingtaoy Differential Revision: D12994423 fbshipit-source-id: 9229ec11da8085f1fa153595e8e5353e19d06fb7
static bool cgroup_devices_has_allow_or_deny(struct cgfs_data *d, char *v, bool for_allow) { char *path; FILE *devices_list; char *line = NULL; size_t sz = 0; bool ret = !for_allow; const char *parts[3] = { NULL, "devices.list", NULL }; // XXX FIXME if users could use something other than 'lxc.devices.deny = a'. // not sure they ever do, but they *could* // right now, I'm assuming they do NOT if (!for_allow && strcmp(v, "a") != 0 && strcmp(v, "a *:* rwm") != 0) return false; parts[0] = (const char *)lxc_cgroup_get_hierarchy_abs_path_data("devices", d); if (!parts[0]) return false; path = lxc_string_join("/", parts, false); if (!path) { free((void *)parts[0]); return false; } devices_list = fopen_cloexec(path, "r"); if (!devices_list) { free(path); return false; } while (getline(&line, &sz, devices_list) != -1) { size_t len = strlen(line); if (len > 0 && line[len-1] == '\n') line[len-1] = '\0'; if (strcmp(line, "a *:* rwm") == 0) { ret = for_allow; goto out; } else if (for_allow && strcmp(line, v) == 0) { ret = true; goto out; } } out: fclose(devices_list); free(line); free(path); return ret; }
0
[ "CWE-59", "CWE-61" ]
lxc
592fd47a6245508b79fe6ac819fe6d3b2c1289be
15,350,415,716,152,786,000,000,000,000,000,000,000
54
CVE-2015-1335: Protect container mounts against symlinks When a container starts up, lxc sets up the container's inital fstree by doing a bunch of mounting, guided by the container configuration file. The container config is owned by the admin or user on the host, so we do not try to guard against bad entries. However, since the mount target is in the container, it's possible that the container admin could divert the mount with symbolic links. This could bypass proper container startup (i.e. confinement of a root-owned container by the restrictive apparmor policy, by diverting the required write to /proc/self/attr/current), or bypass the (path-based) apparmor policy by diverting, say, /proc to /mnt in the container. To prevent this, 1. do not allow mounts to paths containing symbolic links 2. do not allow bind mounts from relative paths containing symbolic links. Details: Define safe_mount which ensures that the container has not inserted any symbolic links into any mount targets for mounts to be done during container setup. The host's mount path may contain symbolic links. As it is under the control of the administrator, that's ok. So safe_mount begins the check for symbolic links after the rootfs->mount, by opening that directory. It opens each directory along the path using openat() relative to the parent directory using O_NOFOLLOW. When the target is reached, it mounts onto /proc/self/fd/<targetfd>. Use safe_mount() in mount_entry(), when mounting container proc, and when needed. In particular, safe_mount() need not be used in any case where: 1. the mount is done in the container's namespace 2. the mount is for the container's rootfs 3. the mount is relative to a tmpfs or proc/sysfs which we have just safe_mount()ed ourselves Since we were using proc/net as a temporary placeholder for /proc/sys/net during container startup, and proc/net is a symbolic link, use proc/tty instead. Update the lxc.container.conf manpage with details about the new restrictions. Finally, add a testcase to test some symbolic link possibilities. Reported-by: Roman Fiedler Signed-off-by: Serge Hallyn <[email protected]> Acked-by: Stéphane Graber <[email protected]>
bool LEX::save_prep_leaf_tables() { if (!thd->save_prep_leaf_list) return FALSE; Query_arena *arena= thd->stmt_arena, backup; arena= thd->activate_stmt_arena_if_needed(&backup); //It is used for DETETE/UPDATE so top level has only one SELECT DBUG_ASSERT(first_select_lex()->next_select() == NULL); bool res= first_select_lex()->save_prep_leaf_tables(thd); if (arena) thd->restore_active_arena(arena, &backup); if (res) return TRUE; thd->save_prep_leaf_list= FALSE; return FALSE; }
0
[ "CWE-703" ]
server
39feab3cd31b5414aa9b428eaba915c251ac34a2
59,096,478,763,029,750,000,000,000,000,000,000,000
20
MDEV-26412 Server crash in Item_field::fix_outer_field for INSERT SELECT IF an INSERT/REPLACE SELECT statement contained an ON expression in the top level select and this expression used a subquery with a column reference that could not be resolved then an attempt to resolve this reference as an outer reference caused a crash of the server. This happened because the outer context field in the Name_resolution_context structure was not set to NULL for such references. Rather it pointed to the first element in the select_stack. Note that starting from 10.4 we cannot use the SELECT_LEX::outer_select() method when parsing a SELECT construct. Approved by Oleksandr Byelkin <[email protected]>
DEFUN (show_bgp_memory, show_bgp_memory_cmd, "show bgp memory", SHOW_STR BGP_STR "Global BGP memory statistics\n") { char memstrbuf[MTYPE_MEMSTR_LEN]; unsigned long count; /* RIB related usage stats */ count = mtype_stats_alloc (MTYPE_BGP_NODE); vty_out (vty, "%ld RIB nodes, using %s of memory%s", count, mtype_memstr (memstrbuf, sizeof (memstrbuf), count * sizeof (struct bgp_node)), VTY_NEWLINE); count = mtype_stats_alloc (MTYPE_BGP_ROUTE); vty_out (vty, "%ld BGP routes, using %s of memory%s", count, mtype_memstr (memstrbuf, sizeof (memstrbuf), count * sizeof (struct bgp_info)), VTY_NEWLINE); if ((count = mtype_stats_alloc (MTYPE_BGP_ROUTE_EXTRA))) vty_out (vty, "%ld BGP route ancillaries, using %s of memory%s", count, mtype_memstr (memstrbuf, sizeof (memstrbuf), count * sizeof (struct bgp_info_extra)), VTY_NEWLINE); if ((count = mtype_stats_alloc (MTYPE_BGP_STATIC))) vty_out (vty, "%ld Static routes, using %s of memory%s", count, mtype_memstr (memstrbuf, sizeof (memstrbuf), count * sizeof (struct bgp_static)), VTY_NEWLINE); /* Adj-In/Out */ if ((count = mtype_stats_alloc (MTYPE_BGP_ADJ_IN))) vty_out (vty, "%ld Adj-In entries, using %s of memory%s", count, mtype_memstr (memstrbuf, sizeof (memstrbuf), count * sizeof (struct bgp_adj_in)), VTY_NEWLINE); if ((count = mtype_stats_alloc (MTYPE_BGP_ADJ_OUT))) vty_out (vty, "%ld Adj-Out entries, using %s of memory%s", count, mtype_memstr (memstrbuf, sizeof (memstrbuf), count * sizeof (struct bgp_adj_out)), VTY_NEWLINE); if ((count = mtype_stats_alloc (MTYPE_BGP_NEXTHOP_CACHE))) vty_out (vty, "%ld Nexthop cache entries, using %s of memory%s", count, mtype_memstr (memstrbuf, sizeof (memstrbuf), count * sizeof (struct bgp_nexthop_cache)), VTY_NEWLINE); if ((count = mtype_stats_alloc (MTYPE_BGP_DAMP_INFO))) vty_out (vty, "%ld Dampening entries, using %s of memory%s", count, mtype_memstr (memstrbuf, sizeof (memstrbuf), count * sizeof (struct bgp_damp_info)), VTY_NEWLINE); /* Attributes */ count = attr_count(); vty_out (vty, "%ld BGP attributes, using %s of memory%s", count, mtype_memstr (memstrbuf, sizeof (memstrbuf), count * sizeof(struct attr)), VTY_NEWLINE); if ((count = mtype_stats_alloc (MTYPE_ATTR_EXTRA))) vty_out (vty, "%ld BGP extra attributes, using %s of memory%s", count, mtype_memstr (memstrbuf, sizeof (memstrbuf), count * sizeof(struct attr_extra)), VTY_NEWLINE); if ((count = attr_unknown_count())) vty_out (vty, "%ld unknown attributes%s", count, VTY_NEWLINE); /* AS_PATH attributes */ count = aspath_count (); vty_out (vty, "%ld BGP AS-PATH entries, using %s of memory%s", count, mtype_memstr (memstrbuf, sizeof (memstrbuf), count * sizeof (struct aspath)), VTY_NEWLINE); count = mtype_stats_alloc (MTYPE_AS_SEG); vty_out (vty, "%ld BGP AS-PATH segments, using %s of memory%s", count, mtype_memstr (memstrbuf, sizeof (memstrbuf), count * sizeof (struct assegment)), VTY_NEWLINE); /* Other attributes */ if ((count = community_count ())) vty_out (vty, "%ld BGP community entries, using %s of memory%s", count, mtype_memstr (memstrbuf, sizeof (memstrbuf), count * sizeof (struct community)), VTY_NEWLINE); if ((count = mtype_stats_alloc (MTYPE_ECOMMUNITY))) vty_out (vty, "%ld BGP community entries, using %s of memory%s", count, mtype_memstr (memstrbuf, sizeof (memstrbuf), count * sizeof (struct ecommunity)), VTY_NEWLINE); if ((count = mtype_stats_alloc (MTYPE_CLUSTER))) vty_out (vty, "%ld Cluster lists, using %s of memory%s", count, mtype_memstr (memstrbuf, sizeof (memstrbuf), count * sizeof (struct cluster_list)), VTY_NEWLINE); /* Peer related usage */ count = mtype_stats_alloc (MTYPE_BGP_PEER); vty_out (vty, "%ld peers, using %s of memory%s", count, mtype_memstr (memstrbuf, sizeof (memstrbuf), count * sizeof (struct peer)), VTY_NEWLINE); if ((count = mtype_stats_alloc (MTYPE_PEER_GROUP))) vty_out (vty, "%ld peer groups, using %s of memory%s", count, mtype_memstr (memstrbuf, sizeof (memstrbuf), count * sizeof (struct peer_group)), VTY_NEWLINE); /* Other */ if ((count = mtype_stats_alloc (MTYPE_HASH))) vty_out (vty, "%ld hash tables, using %s of memory%s", count, mtype_memstr (memstrbuf, sizeof (memstrbuf), count * sizeof (struct hash)), VTY_NEWLINE); if ((count = mtype_stats_alloc (MTYPE_HASH_BACKET))) vty_out (vty, "%ld hash buckets, using %s of memory%s", count, mtype_memstr (memstrbuf, sizeof (memstrbuf), count * sizeof (struct hash_backet)), VTY_NEWLINE); if ((count = mtype_stats_alloc (MTYPE_BGP_REGEXP))) vty_out (vty, "%ld compiled regexes, using %s of memory%s", count, mtype_memstr (memstrbuf, sizeof (memstrbuf), count * sizeof (regex_t)), VTY_NEWLINE); return CMD_SUCCESS; }
0
[ "CWE-125" ]
frr
6d58272b4cf96f0daa846210dd2104877900f921
79,615,494,172,040,730,000,000,000,000,000,000,000
135
[bgpd] cleanup, compact and consolidate capability parsing code 2007-07-26 Paul Jakma <[email protected]> * (general) Clean up and compact capability parsing slightly. Consolidate validation of length and logging of generic TLV, and memcpy of capability data, thus removing such from cap specifc code (not always present or correct). * bgp_open.h: Add structures for the generic capability TLV header and for the data formats of the various specific capabilities we support. Hence remove the badly named, or else misdefined, struct capability. * bgp_open.c: (bgp_capability_vty_out) Use struct capability_mp_data. Do the length checks *before* memcpy()'ing based on that length (stored capability - should have been validated anyway on input, but..). (bgp_afi_safi_valid_indices) new function to validate (afi,safi) which is about to be used as index into arrays, consolidates several instances of same, at least one of which appeared to be incomplete.. (bgp_capability_mp) Much condensed. (bgp_capability_orf_entry) New, process one ORF entry (bgp_capability_orf) Condensed. Fixed to process all ORF entries. (bgp_capability_restart) Condensed, and fixed to use a cap-specific type, rather than abusing capability_mp. (struct message capcode_str) added to aid generic logging. (size_t cap_minsizes[]) added to aid generic validation of capability length field. (bgp_capability_parse) Generic logging and validation of TLV consolidated here. Code compacted as much as possible. * bgp_packet.c: (bgp_open_receive) Capability parsers now use streams, so no more need here to manually fudge the input stream getp. (bgp_capability_msg_parse) use struct capability_mp_data. Validate lengths /before/ memcpy. Use bgp_afi_safi_valid_indices. (bgp_capability_receive) Exported for use by test harness. * bgp_vty.c: (bgp_show_summary) fix conversion warning (bgp_show_peer) ditto * bgp_debug.h: Fix storage 'extern' after type 'const'. * lib/log.c: (mes_lookup) warning about code not being in same-number array slot should be debug, not warning. E.g. BGP has several discontigious number spaces, allocating from different parts of a space is not uncommon (e.g. IANA assigned versus vendor-assigned code points in some number space).
void md5_update(MD5CTX c, const void *data, unsigned long len) { MD5_Update(c, data, len); }
0
[ "CWE-310" ]
libssh
e99246246b4061f7e71463f8806b9dcad65affa0
320,578,162,222,556,530,000,000,000,000,000,000,000
3
security: fix for vulnerability CVE-2014-0017 When accepting a new connection, a forking server based on libssh forks and the child process handles the request. The RAND_bytes() function of openssl doesn't reset its state after the fork, but simply adds the current process id (getpid) to the PRNG state, which is not guaranteed to be unique. This can cause several children to end up with same PRNG state which is a security issue.
void sqlite3Pragma( Parse *pParse, Token *pId1, /* First part of [schema.]id field */ Token *pId2, /* Second part of [schema.]id field, or NULL */ Token *pValue, /* Token for <value>, or NULL */ int minusFlag /* True if a '-' sign preceded <value> */ ){ char *zLeft = 0; /* Nul-terminated UTF-8 string <id> */ char *zRight = 0; /* Nul-terminated UTF-8 string <value>, or NULL */ const char *zDb = 0; /* The database name */ Token *pId; /* Pointer to <id> token */ char *aFcntl[4]; /* Argument to SQLITE_FCNTL_PRAGMA */ int iDb; /* Database index for <database> */ int rc; /* return value form SQLITE_FCNTL_PRAGMA */ sqlite3 *db = pParse->db; /* The database connection */ Db *pDb; /* The specific database being pragmaed */ Vdbe *v = sqlite3GetVdbe(pParse); /* Prepared statement */ const PragmaName *pPragma; /* The pragma */ if( v==0 ) return; sqlite3VdbeRunOnlyOnce(v); pParse->nMem = 2; /* Interpret the [schema.] part of the pragma statement. iDb is the ** index of the database this pragma is being applied to in db.aDb[]. */ iDb = sqlite3TwoPartName(pParse, pId1, pId2, &pId); if( iDb<0 ) return; pDb = &db->aDb[iDb]; /* If the temp database has been explicitly named as part of the ** pragma, make sure it is open. */ if( iDb==1 && sqlite3OpenTempDatabase(pParse) ){ return; } zLeft = sqlite3NameFromToken(db, pId); if( !zLeft ) return; if( minusFlag ){ zRight = sqlite3MPrintf(db, "-%T", pValue); }else{ zRight = sqlite3NameFromToken(db, pValue); } assert( pId2 ); zDb = pId2->n>0 ? pDb->zDbSName : 0; if( sqlite3AuthCheck(pParse, SQLITE_PRAGMA, zLeft, zRight, zDb) ){ goto pragma_out; } /* Send an SQLITE_FCNTL_PRAGMA file-control to the underlying VFS ** connection. If it returns SQLITE_OK, then assume that the VFS ** handled the pragma and generate a no-op prepared statement. ** ** IMPLEMENTATION-OF: R-12238-55120 Whenever a PRAGMA statement is parsed, ** an SQLITE_FCNTL_PRAGMA file control is sent to the open sqlite3_file ** object corresponding to the database file to which the pragma ** statement refers. ** ** IMPLEMENTATION-OF: R-29875-31678 The argument to the SQLITE_FCNTL_PRAGMA ** file control is an array of pointers to strings (char**) in which the ** second element of the array is the name of the pragma and the third ** element is the argument to the pragma or NULL if the pragma has no ** argument. */ aFcntl[0] = 0; aFcntl[1] = zLeft; aFcntl[2] = zRight; aFcntl[3] = 0; db->busyHandler.nBusy = 0; rc = sqlite3_file_control(db, zDb, SQLITE_FCNTL_PRAGMA, (void*)aFcntl); if( rc==SQLITE_OK ){ sqlite3VdbeSetNumCols(v, 1); sqlite3VdbeSetColName(v, 0, COLNAME_NAME, aFcntl[0], SQLITE_TRANSIENT); returnSingleText(v, aFcntl[0]); sqlite3_free(aFcntl[0]); goto pragma_out; } if( rc!=SQLITE_NOTFOUND ){ if( aFcntl[0] ){ sqlite3ErrorMsg(pParse, "%s", aFcntl[0]); sqlite3_free(aFcntl[0]); } pParse->nErr++; pParse->rc = rc; goto pragma_out; } /* Locate the pragma in the lookup table */ pPragma = pragmaLocate(zLeft); if( pPragma==0 ) goto pragma_out; /* Make sure the database schema is loaded if the pragma requires that */ if( (pPragma->mPragFlg & PragFlg_NeedSchema)!=0 ){ if( sqlite3ReadSchema(pParse) ) goto pragma_out; } /* Register the result column names for pragmas that return results */ if( (pPragma->mPragFlg & PragFlg_NoColumns)==0 && ((pPragma->mPragFlg & PragFlg_NoColumns1)==0 || zRight==0) ){ setPragmaResultColumnNames(v, pPragma); } /* Jump to the appropriate pragma handler */ switch( pPragma->ePragTyp ){ #if !defined(SQLITE_OMIT_PAGER_PRAGMAS) && !defined(SQLITE_OMIT_DEPRECATED) /* ** PRAGMA [schema.]default_cache_size ** PRAGMA [schema.]default_cache_size=N ** ** The first form reports the current persistent setting for the ** page cache size. The value returned is the maximum number of ** pages in the page cache. The second form sets both the current ** page cache size value and the persistent page cache size value ** stored in the database file. ** ** Older versions of SQLite would set the default cache size to a ** negative number to indicate synchronous=OFF. These days, synchronous ** is always on by default regardless of the sign of the default cache ** size. But continue to take the absolute value of the default cache ** size of historical compatibility. */ case PragTyp_DEFAULT_CACHE_SIZE: { static const int iLn = VDBE_OFFSET_LINENO(2); static const VdbeOpList getCacheSize[] = { { OP_Transaction, 0, 0, 0}, /* 0 */ { OP_ReadCookie, 0, 1, BTREE_DEFAULT_CACHE_SIZE}, /* 1 */ { OP_IfPos, 1, 8, 0}, { OP_Integer, 0, 2, 0}, { OP_Subtract, 1, 2, 1}, { OP_IfPos, 1, 8, 0}, { OP_Integer, 0, 1, 0}, /* 6 */ { OP_Noop, 0, 0, 0}, { OP_ResultRow, 1, 1, 0}, }; VdbeOp *aOp; sqlite3VdbeUsesBtree(v, iDb); if( !zRight ){ pParse->nMem += 2; sqlite3VdbeVerifyNoMallocRequired(v, ArraySize(getCacheSize)); aOp = sqlite3VdbeAddOpList(v, ArraySize(getCacheSize), getCacheSize, iLn); if( ONLY_IF_REALLOC_STRESS(aOp==0) ) break; aOp[0].p1 = iDb; aOp[1].p1 = iDb; aOp[6].p1 = SQLITE_DEFAULT_CACHE_SIZE; }else{ int size = sqlite3AbsInt32(sqlite3Atoi(zRight)); sqlite3BeginWriteOperation(pParse, 0, iDb); sqlite3VdbeAddOp3(v, OP_SetCookie, iDb, BTREE_DEFAULT_CACHE_SIZE, size); assert( sqlite3SchemaMutexHeld(db, iDb, 0) ); pDb->pSchema->cache_size = size; sqlite3BtreeSetCacheSize(pDb->pBt, pDb->pSchema->cache_size); } break; } #endif /* !SQLITE_OMIT_PAGER_PRAGMAS && !SQLITE_OMIT_DEPRECATED */ #if !defined(SQLITE_OMIT_PAGER_PRAGMAS) /* ** PRAGMA [schema.]page_size ** PRAGMA [schema.]page_size=N ** ** The first form reports the current setting for the ** database page size in bytes. The second form sets the ** database page size value. The value can only be set if ** the database has not yet been created. */ case PragTyp_PAGE_SIZE: { Btree *pBt = pDb->pBt; assert( pBt!=0 ); if( !zRight ){ int size = ALWAYS(pBt) ? sqlite3BtreeGetPageSize(pBt) : 0; returnSingleInt(v, size); }else{ /* Malloc may fail when setting the page-size, as there is an internal ** buffer that the pager module resizes using sqlite3_realloc(). */ db->nextPagesize = sqlite3Atoi(zRight); if( SQLITE_NOMEM==sqlite3BtreeSetPageSize(pBt, db->nextPagesize,-1,0) ){ sqlite3OomFault(db); } } break; } /* ** PRAGMA [schema.]secure_delete ** PRAGMA [schema.]secure_delete=ON/OFF/FAST ** ** The first form reports the current setting for the ** secure_delete flag. The second form changes the secure_delete ** flag setting and reports the new value. */ case PragTyp_SECURE_DELETE: { Btree *pBt = pDb->pBt; int b = -1; assert( pBt!=0 ); if( zRight ){ if( sqlite3_stricmp(zRight, "fast")==0 ){ b = 2; }else{ b = sqlite3GetBoolean(zRight, 0); } } if( pId2->n==0 && b>=0 ){ int ii; for(ii=0; ii<db->nDb; ii++){ sqlite3BtreeSecureDelete(db->aDb[ii].pBt, b); } } b = sqlite3BtreeSecureDelete(pBt, b); returnSingleInt(v, b); break; } /* ** PRAGMA [schema.]max_page_count ** PRAGMA [schema.]max_page_count=N ** ** The first form reports the current setting for the ** maximum number of pages in the database file. The ** second form attempts to change this setting. Both ** forms return the current setting. ** ** The absolute value of N is used. This is undocumented and might ** change. The only purpose is to provide an easy way to test ** the sqlite3AbsInt32() function. ** ** PRAGMA [schema.]page_count ** ** Return the number of pages in the specified database. */ case PragTyp_PAGE_COUNT: { int iReg; sqlite3CodeVerifySchema(pParse, iDb); iReg = ++pParse->nMem; if( sqlite3Tolower(zLeft[0])=='p' ){ sqlite3VdbeAddOp2(v, OP_Pagecount, iDb, iReg); }else{ sqlite3VdbeAddOp3(v, OP_MaxPgcnt, iDb, iReg, sqlite3AbsInt32(sqlite3Atoi(zRight))); } sqlite3VdbeAddOp2(v, OP_ResultRow, iReg, 1); break; } /* ** PRAGMA [schema.]locking_mode ** PRAGMA [schema.]locking_mode = (normal|exclusive) */ case PragTyp_LOCKING_MODE: { const char *zRet = "normal"; int eMode = getLockingMode(zRight); if( pId2->n==0 && eMode==PAGER_LOCKINGMODE_QUERY ){ /* Simple "PRAGMA locking_mode;" statement. This is a query for ** the current default locking mode (which may be different to ** the locking-mode of the main database). */ eMode = db->dfltLockMode; }else{ Pager *pPager; if( pId2->n==0 ){ /* This indicates that no database name was specified as part ** of the PRAGMA command. In this case the locking-mode must be ** set on all attached databases, as well as the main db file. ** ** Also, the sqlite3.dfltLockMode variable is set so that ** any subsequently attached databases also use the specified ** locking mode. */ int ii; assert(pDb==&db->aDb[0]); for(ii=2; ii<db->nDb; ii++){ pPager = sqlite3BtreePager(db->aDb[ii].pBt); sqlite3PagerLockingMode(pPager, eMode); } db->dfltLockMode = (u8)eMode; } pPager = sqlite3BtreePager(pDb->pBt); eMode = sqlite3PagerLockingMode(pPager, eMode); } assert( eMode==PAGER_LOCKINGMODE_NORMAL || eMode==PAGER_LOCKINGMODE_EXCLUSIVE ); if( eMode==PAGER_LOCKINGMODE_EXCLUSIVE ){ zRet = "exclusive"; } returnSingleText(v, zRet); break; } /* ** PRAGMA [schema.]journal_mode ** PRAGMA [schema.]journal_mode = ** (delete|persist|off|truncate|memory|wal|off) */ case PragTyp_JOURNAL_MODE: { int eMode; /* One of the PAGER_JOURNALMODE_XXX symbols */ int ii; /* Loop counter */ if( zRight==0 ){ /* If there is no "=MODE" part of the pragma, do a query for the ** current mode */ eMode = PAGER_JOURNALMODE_QUERY; }else{ const char *zMode; int n = sqlite3Strlen30(zRight); for(eMode=0; (zMode = sqlite3JournalModename(eMode))!=0; eMode++){ if( sqlite3StrNICmp(zRight, zMode, n)==0 ) break; } if( !zMode ){ /* If the "=MODE" part does not match any known journal mode, ** then do a query */ eMode = PAGER_JOURNALMODE_QUERY; } if( eMode==PAGER_JOURNALMODE_OFF && (db->flags & SQLITE_Defensive)!=0 ){ /* Do not allow journal-mode "OFF" in defensive since the database ** can become corrupted using ordinary SQL when the journal is off */ eMode = PAGER_JOURNALMODE_QUERY; } } if( eMode==PAGER_JOURNALMODE_QUERY && pId2->n==0 ){ /* Convert "PRAGMA journal_mode" into "PRAGMA main.journal_mode" */ iDb = 0; pId2->n = 1; } for(ii=db->nDb-1; ii>=0; ii--){ if( db->aDb[ii].pBt && (ii==iDb || pId2->n==0) ){ sqlite3VdbeUsesBtree(v, ii); sqlite3VdbeAddOp3(v, OP_JournalMode, ii, 1, eMode); } } sqlite3VdbeAddOp2(v, OP_ResultRow, 1, 1); break; } /* ** PRAGMA [schema.]journal_size_limit ** PRAGMA [schema.]journal_size_limit=N ** ** Get or set the size limit on rollback journal files. */ case PragTyp_JOURNAL_SIZE_LIMIT: { Pager *pPager = sqlite3BtreePager(pDb->pBt); i64 iLimit = -2; if( zRight ){ sqlite3DecOrHexToI64(zRight, &iLimit); if( iLimit<-1 ) iLimit = -1; } iLimit = sqlite3PagerJournalSizeLimit(pPager, iLimit); returnSingleInt(v, iLimit); break; } #endif /* SQLITE_OMIT_PAGER_PRAGMAS */ /* ** PRAGMA [schema.]auto_vacuum ** PRAGMA [schema.]auto_vacuum=N ** ** Get or set the value of the database 'auto-vacuum' parameter. ** The value is one of: 0 NONE 1 FULL 2 INCREMENTAL */ #ifndef SQLITE_OMIT_AUTOVACUUM case PragTyp_AUTO_VACUUM: { Btree *pBt = pDb->pBt; assert( pBt!=0 ); if( !zRight ){ returnSingleInt(v, sqlite3BtreeGetAutoVacuum(pBt)); }else{ int eAuto = getAutoVacuum(zRight); assert( eAuto>=0 && eAuto<=2 ); db->nextAutovac = (u8)eAuto; /* Call SetAutoVacuum() to set initialize the internal auto and ** incr-vacuum flags. This is required in case this connection ** creates the database file. It is important that it is created ** as an auto-vacuum capable db. */ rc = sqlite3BtreeSetAutoVacuum(pBt, eAuto); if( rc==SQLITE_OK && (eAuto==1 || eAuto==2) ){ /* When setting the auto_vacuum mode to either "full" or ** "incremental", write the value of meta[6] in the database ** file. Before writing to meta[6], check that meta[3] indicates ** that this really is an auto-vacuum capable database. */ static const int iLn = VDBE_OFFSET_LINENO(2); static const VdbeOpList setMeta6[] = { { OP_Transaction, 0, 1, 0}, /* 0 */ { OP_ReadCookie, 0, 1, BTREE_LARGEST_ROOT_PAGE}, { OP_If, 1, 0, 0}, /* 2 */ { OP_Halt, SQLITE_OK, OE_Abort, 0}, /* 3 */ { OP_SetCookie, 0, BTREE_INCR_VACUUM, 0}, /* 4 */ }; VdbeOp *aOp; int iAddr = sqlite3VdbeCurrentAddr(v); sqlite3VdbeVerifyNoMallocRequired(v, ArraySize(setMeta6)); aOp = sqlite3VdbeAddOpList(v, ArraySize(setMeta6), setMeta6, iLn); if( ONLY_IF_REALLOC_STRESS(aOp==0) ) break; aOp[0].p1 = iDb; aOp[1].p1 = iDb; aOp[2].p2 = iAddr+4; aOp[4].p1 = iDb; aOp[4].p3 = eAuto - 1; sqlite3VdbeUsesBtree(v, iDb); } } break; } #endif /* ** PRAGMA [schema.]incremental_vacuum(N) ** ** Do N steps of incremental vacuuming on a database. */ #ifndef SQLITE_OMIT_AUTOVACUUM case PragTyp_INCREMENTAL_VACUUM: { int iLimit, addr; if( zRight==0 || !sqlite3GetInt32(zRight, &iLimit) || iLimit<=0 ){ iLimit = 0x7fffffff; } sqlite3BeginWriteOperation(pParse, 0, iDb); sqlite3VdbeAddOp2(v, OP_Integer, iLimit, 1); addr = sqlite3VdbeAddOp1(v, OP_IncrVacuum, iDb); VdbeCoverage(v); sqlite3VdbeAddOp1(v, OP_ResultRow, 1); sqlite3VdbeAddOp2(v, OP_AddImm, 1, -1); sqlite3VdbeAddOp2(v, OP_IfPos, 1, addr); VdbeCoverage(v); sqlite3VdbeJumpHere(v, addr); break; } #endif #ifndef SQLITE_OMIT_PAGER_PRAGMAS /* ** PRAGMA [schema.]cache_size ** PRAGMA [schema.]cache_size=N ** ** The first form reports the current local setting for the ** page cache size. The second form sets the local ** page cache size value. If N is positive then that is the ** number of pages in the cache. If N is negative, then the ** number of pages is adjusted so that the cache uses -N kibibytes ** of memory. */ case PragTyp_CACHE_SIZE: { assert( sqlite3SchemaMutexHeld(db, iDb, 0) ); if( !zRight ){ returnSingleInt(v, pDb->pSchema->cache_size); }else{ int size = sqlite3Atoi(zRight); pDb->pSchema->cache_size = size; sqlite3BtreeSetCacheSize(pDb->pBt, pDb->pSchema->cache_size); } break; } /* ** PRAGMA [schema.]cache_spill ** PRAGMA cache_spill=BOOLEAN ** PRAGMA [schema.]cache_spill=N ** ** The first form reports the current local setting for the ** page cache spill size. The second form turns cache spill on ** or off. When turnning cache spill on, the size is set to the ** current cache_size. The third form sets a spill size that ** may be different form the cache size. ** If N is positive then that is the ** number of pages in the cache. If N is negative, then the ** number of pages is adjusted so that the cache uses -N kibibytes ** of memory. ** ** If the number of cache_spill pages is less then the number of ** cache_size pages, no spilling occurs until the page count exceeds ** the number of cache_size pages. ** ** The cache_spill=BOOLEAN setting applies to all attached schemas, ** not just the schema specified. */ case PragTyp_CACHE_SPILL: { assert( sqlite3SchemaMutexHeld(db, iDb, 0) ); if( !zRight ){ returnSingleInt(v, (db->flags & SQLITE_CacheSpill)==0 ? 0 : sqlite3BtreeSetSpillSize(pDb->pBt,0)); }else{ int size = 1; if( sqlite3GetInt32(zRight, &size) ){ sqlite3BtreeSetSpillSize(pDb->pBt, size); } if( sqlite3GetBoolean(zRight, size!=0) ){ db->flags |= SQLITE_CacheSpill; }else{ db->flags &= ~(u64)SQLITE_CacheSpill; } setAllPagerFlags(db); } break; } /* ** PRAGMA [schema.]mmap_size(N) ** ** Used to set mapping size limit. The mapping size limit is ** used to limit the aggregate size of all memory mapped regions of the ** database file. If this parameter is set to zero, then memory mapping ** is not used at all. If N is negative, then the default memory map ** limit determined by sqlite3_config(SQLITE_CONFIG_MMAP_SIZE) is set. ** The parameter N is measured in bytes. ** ** This value is advisory. The underlying VFS is free to memory map ** as little or as much as it wants. Except, if N is set to 0 then the ** upper layers will never invoke the xFetch interfaces to the VFS. */ case PragTyp_MMAP_SIZE: { sqlite3_int64 sz; #if SQLITE_MAX_MMAP_SIZE>0 assert( sqlite3SchemaMutexHeld(db, iDb, 0) ); if( zRight ){ int ii; sqlite3DecOrHexToI64(zRight, &sz); if( sz<0 ) sz = sqlite3GlobalConfig.szMmap; if( pId2->n==0 ) db->szMmap = sz; for(ii=db->nDb-1; ii>=0; ii--){ if( db->aDb[ii].pBt && (ii==iDb || pId2->n==0) ){ sqlite3BtreeSetMmapLimit(db->aDb[ii].pBt, sz); } } } sz = -1; rc = sqlite3_file_control(db, zDb, SQLITE_FCNTL_MMAP_SIZE, &sz); #else sz = 0; rc = SQLITE_OK; #endif if( rc==SQLITE_OK ){ returnSingleInt(v, sz); }else if( rc!=SQLITE_NOTFOUND ){ pParse->nErr++; pParse->rc = rc; } break; } /* ** PRAGMA temp_store ** PRAGMA temp_store = "default"|"memory"|"file" ** ** Return or set the local value of the temp_store flag. Changing ** the local value does not make changes to the disk file and the default ** value will be restored the next time the database is opened. ** ** Note that it is possible for the library compile-time options to ** override this setting */ case PragTyp_TEMP_STORE: { if( !zRight ){ returnSingleInt(v, db->temp_store); }else{ changeTempStorage(pParse, zRight); } break; } /* ** PRAGMA temp_store_directory ** PRAGMA temp_store_directory = ""|"directory_name" ** ** Return or set the local value of the temp_store_directory flag. Changing ** the value sets a specific directory to be used for temporary files. ** Setting to a null string reverts to the default temporary directory search. ** If temporary directory is changed, then invalidateTempStorage. ** */ case PragTyp_TEMP_STORE_DIRECTORY: { if( !zRight ){ returnSingleText(v, sqlite3_temp_directory); }else{ #ifndef SQLITE_OMIT_WSD if( zRight[0] ){ int res; rc = sqlite3OsAccess(db->pVfs, zRight, SQLITE_ACCESS_READWRITE, &res); if( rc!=SQLITE_OK || res==0 ){ sqlite3ErrorMsg(pParse, "not a writable directory"); goto pragma_out; } } if( SQLITE_TEMP_STORE==0 || (SQLITE_TEMP_STORE==1 && db->temp_store<=1) || (SQLITE_TEMP_STORE==2 && db->temp_store==1) ){ invalidateTempStorage(pParse); } sqlite3_free(sqlite3_temp_directory); if( zRight[0] ){ sqlite3_temp_directory = sqlite3_mprintf("%s", zRight); }else{ sqlite3_temp_directory = 0; } #endif /* SQLITE_OMIT_WSD */ } break; } #if SQLITE_OS_WIN /* ** PRAGMA data_store_directory ** PRAGMA data_store_directory = ""|"directory_name" ** ** Return or set the local value of the data_store_directory flag. Changing ** the value sets a specific directory to be used for database files that ** were specified with a relative pathname. Setting to a null string reverts ** to the default database directory, which for database files specified with ** a relative path will probably be based on the current directory for the ** process. Database file specified with an absolute path are not impacted ** by this setting, regardless of its value. ** */ case PragTyp_DATA_STORE_DIRECTORY: { if( !zRight ){ returnSingleText(v, sqlite3_data_directory); }else{ #ifndef SQLITE_OMIT_WSD if( zRight[0] ){ int res; rc = sqlite3OsAccess(db->pVfs, zRight, SQLITE_ACCESS_READWRITE, &res); if( rc!=SQLITE_OK || res==0 ){ sqlite3ErrorMsg(pParse, "not a writable directory"); goto pragma_out; } } sqlite3_free(sqlite3_data_directory); if( zRight[0] ){ sqlite3_data_directory = sqlite3_mprintf("%s", zRight); }else{ sqlite3_data_directory = 0; } #endif /* SQLITE_OMIT_WSD */ } break; } #endif #if SQLITE_ENABLE_LOCKING_STYLE /* ** PRAGMA [schema.]lock_proxy_file ** PRAGMA [schema.]lock_proxy_file = ":auto:"|"lock_file_path" ** ** Return or set the value of the lock_proxy_file flag. Changing ** the value sets a specific file to be used for database access locks. ** */ case PragTyp_LOCK_PROXY_FILE: { if( !zRight ){ Pager *pPager = sqlite3BtreePager(pDb->pBt); char *proxy_file_path = NULL; sqlite3_file *pFile = sqlite3PagerFile(pPager); sqlite3OsFileControlHint(pFile, SQLITE_GET_LOCKPROXYFILE, &proxy_file_path); returnSingleText(v, proxy_file_path); }else{ Pager *pPager = sqlite3BtreePager(pDb->pBt); sqlite3_file *pFile = sqlite3PagerFile(pPager); int res; if( zRight[0] ){ res=sqlite3OsFileControl(pFile, SQLITE_SET_LOCKPROXYFILE, zRight); } else { res=sqlite3OsFileControl(pFile, SQLITE_SET_LOCKPROXYFILE, NULL); } if( res!=SQLITE_OK ){ sqlite3ErrorMsg(pParse, "failed to set lock proxy file"); goto pragma_out; } } break; } #endif /* SQLITE_ENABLE_LOCKING_STYLE */ /* ** PRAGMA [schema.]synchronous ** PRAGMA [schema.]synchronous=OFF|ON|NORMAL|FULL|EXTRA ** ** Return or set the local value of the synchronous flag. Changing ** the local value does not make changes to the disk file and the ** default value will be restored the next time the database is ** opened. */ case PragTyp_SYNCHRONOUS: { if( !zRight ){ returnSingleInt(v, pDb->safety_level-1); }else{ if( !db->autoCommit ){ sqlite3ErrorMsg(pParse, "Safety level may not be changed inside a transaction"); }else if( iDb!=1 ){ int iLevel = (getSafetyLevel(zRight,0,1)+1) & PAGER_SYNCHRONOUS_MASK; if( iLevel==0 ) iLevel = 1; pDb->safety_level = iLevel; pDb->bSyncSet = 1; setAllPagerFlags(db); } } break; } #endif /* SQLITE_OMIT_PAGER_PRAGMAS */ #ifndef SQLITE_OMIT_FLAG_PRAGMAS case PragTyp_FLAG: { if( zRight==0 ){ setPragmaResultColumnNames(v, pPragma); returnSingleInt(v, (db->flags & pPragma->iArg)!=0 ); }else{ u64 mask = pPragma->iArg; /* Mask of bits to set or clear. */ if( db->autoCommit==0 ){ /* Foreign key support may not be enabled or disabled while not ** in auto-commit mode. */ mask &= ~(SQLITE_ForeignKeys); } #if SQLITE_USER_AUTHENTICATION if( db->auth.authLevel==UAUTH_User ){ /* Do not allow non-admin users to modify the schema arbitrarily */ mask &= ~(SQLITE_WriteSchema); } #endif if( sqlite3GetBoolean(zRight, 0) ){ db->flags |= mask; }else{ db->flags &= ~mask; if( mask==SQLITE_DeferFKs ) db->nDeferredImmCons = 0; } /* Many of the flag-pragmas modify the code generated by the SQL ** compiler (eg. count_changes). So add an opcode to expire all ** compiled SQL statements after modifying a pragma value. */ sqlite3VdbeAddOp0(v, OP_Expire); setAllPagerFlags(db); } break; } #endif /* SQLITE_OMIT_FLAG_PRAGMAS */ #ifndef SQLITE_OMIT_SCHEMA_PRAGMAS /* ** PRAGMA table_info(<table>) ** ** Return a single row for each column of the named table. The columns of ** the returned data set are: ** ** cid: Column id (numbered from left to right, starting at 0) ** name: Column name ** type: Column declaration type. ** notnull: True if 'NOT NULL' is part of column declaration ** dflt_value: The default value for the column, if any. ** pk: Non-zero for PK fields. */ case PragTyp_TABLE_INFO: if( zRight ){ Table *pTab; pTab = sqlite3LocateTable(pParse, LOCATE_NOERR, zRight, zDb); if( pTab ){ int iTabDb = sqlite3SchemaToIndex(db, pTab->pSchema); int i, k; int nHidden = 0; Column *pCol; Index *pPk = sqlite3PrimaryKeyIndex(pTab); pParse->nMem = 7; sqlite3CodeVerifySchema(pParse, iTabDb); sqlite3ViewGetColumnNames(pParse, pTab); for(i=0, pCol=pTab->aCol; i<pTab->nCol; i++, pCol++){ int isHidden = 0; if( pCol->colFlags & COLFLAG_NOINSERT ){ if( pPragma->iArg==0 ){ nHidden++; continue; } if( pCol->colFlags & COLFLAG_VIRTUAL ){ isHidden = 2; /* GENERATED ALWAYS AS ... VIRTUAL */ }else if( pCol->colFlags & COLFLAG_STORED ){ isHidden = 3; /* GENERATED ALWAYS AS ... STORED */ }else{ assert( pCol->colFlags & COLFLAG_HIDDEN ); isHidden = 1; /* HIDDEN */ } } if( (pCol->colFlags & COLFLAG_PRIMKEY)==0 ){ k = 0; }else if( pPk==0 ){ k = 1; }else{ for(k=1; k<=pTab->nCol && pPk->aiColumn[k-1]!=i; k++){} } assert( pCol->pDflt==0 || pCol->pDflt->op==TK_SPAN || isHidden>=2 ); sqlite3VdbeMultiLoad(v, 1, pPragma->iArg ? "issisii" : "issisi", i-nHidden, pCol->zName, sqlite3ColumnType(pCol,""), pCol->notNull ? 1 : 0, pCol->pDflt && isHidden<2 ? pCol->pDflt->u.zToken : 0, k, isHidden); } } } break; #ifdef SQLITE_DEBUG case PragTyp_STATS: { Index *pIdx; HashElem *i; pParse->nMem = 5; sqlite3CodeVerifySchema(pParse, iDb); for(i=sqliteHashFirst(&pDb->pSchema->tblHash); i; i=sqliteHashNext(i)){ Table *pTab = sqliteHashData(i); sqlite3VdbeMultiLoad(v, 1, "ssiii", pTab->zName, 0, pTab->szTabRow, pTab->nRowLogEst, pTab->tabFlags); for(pIdx=pTab->pIndex; pIdx; pIdx=pIdx->pNext){ sqlite3VdbeMultiLoad(v, 2, "siiiX", pIdx->zName, pIdx->szIdxRow, pIdx->aiRowLogEst[0], pIdx->hasStat1); sqlite3VdbeAddOp2(v, OP_ResultRow, 1, 5); } } } break; #endif case PragTyp_INDEX_INFO: if( zRight ){ Index *pIdx; Table *pTab; pIdx = sqlite3FindIndex(db, zRight, zDb); if( pIdx==0 ){ /* If there is no index named zRight, check to see if there is a ** WITHOUT ROWID table named zRight, and if there is, show the ** structure of the PRIMARY KEY index for that table. */ pTab = sqlite3LocateTable(pParse, LOCATE_NOERR, zRight, zDb); if( pTab && !HasRowid(pTab) ){ pIdx = sqlite3PrimaryKeyIndex(pTab); } } if( pIdx ){ int iIdxDb = sqlite3SchemaToIndex(db, pIdx->pSchema); int i; int mx; if( pPragma->iArg ){ /* PRAGMA index_xinfo (newer version with more rows and columns) */ mx = pIdx->nColumn; pParse->nMem = 6; }else{ /* PRAGMA index_info (legacy version) */ mx = pIdx->nKeyCol; pParse->nMem = 3; } pTab = pIdx->pTable; sqlite3CodeVerifySchema(pParse, iIdxDb); assert( pParse->nMem<=pPragma->nPragCName ); for(i=0; i<mx; i++){ i16 cnum = pIdx->aiColumn[i]; sqlite3VdbeMultiLoad(v, 1, "iisX", i, cnum, cnum<0 ? 0 : pTab->aCol[cnum].zName); if( pPragma->iArg ){ sqlite3VdbeMultiLoad(v, 4, "isiX", pIdx->aSortOrder[i], pIdx->azColl[i], i<pIdx->nKeyCol); } sqlite3VdbeAddOp2(v, OP_ResultRow, 1, pParse->nMem); } } } break; case PragTyp_INDEX_LIST: if( zRight ){ Index *pIdx; Table *pTab; int i; pTab = sqlite3FindTable(db, zRight, zDb); if( pTab ){ int iTabDb = sqlite3SchemaToIndex(db, pTab->pSchema); pParse->nMem = 5; sqlite3CodeVerifySchema(pParse, iTabDb); for(pIdx=pTab->pIndex, i=0; pIdx; pIdx=pIdx->pNext, i++){ const char *azOrigin[] = { "c", "u", "pk" }; sqlite3VdbeMultiLoad(v, 1, "isisi", i, pIdx->zName, IsUniqueIndex(pIdx), azOrigin[pIdx->idxType], pIdx->pPartIdxWhere!=0); } } } break; case PragTyp_DATABASE_LIST: { int i; pParse->nMem = 3; for(i=0; i<db->nDb; i++){ if( db->aDb[i].pBt==0 ) continue; assert( db->aDb[i].zDbSName!=0 ); sqlite3VdbeMultiLoad(v, 1, "iss", i, db->aDb[i].zDbSName, sqlite3BtreeGetFilename(db->aDb[i].pBt)); } } break; case PragTyp_COLLATION_LIST: { int i = 0; HashElem *p; pParse->nMem = 2; for(p=sqliteHashFirst(&db->aCollSeq); p; p=sqliteHashNext(p)){ CollSeq *pColl = (CollSeq *)sqliteHashData(p); sqlite3VdbeMultiLoad(v, 1, "is", i++, pColl->zName); } } break; #ifndef SQLITE_OMIT_INTROSPECTION_PRAGMAS case PragTyp_FUNCTION_LIST: { int i; HashElem *j; FuncDef *p; pParse->nMem = 2; for(i=0; i<SQLITE_FUNC_HASH_SZ; i++){ for(p=sqlite3BuiltinFunctions.a[i]; p; p=p->u.pHash ){ if( p->funcFlags & SQLITE_FUNC_INTERNAL ) continue; sqlite3VdbeMultiLoad(v, 1, "si", p->zName, 1); } } for(j=sqliteHashFirst(&db->aFunc); j; j=sqliteHashNext(j)){ p = (FuncDef*)sqliteHashData(j); sqlite3VdbeMultiLoad(v, 1, "si", p->zName, 0); } } break; #ifndef SQLITE_OMIT_VIRTUALTABLE case PragTyp_MODULE_LIST: { HashElem *j; pParse->nMem = 1; for(j=sqliteHashFirst(&db->aModule); j; j=sqliteHashNext(j)){ Module *pMod = (Module*)sqliteHashData(j); sqlite3VdbeMultiLoad(v, 1, "s", pMod->zName); } } break; #endif /* SQLITE_OMIT_VIRTUALTABLE */ case PragTyp_PRAGMA_LIST: { int i; for(i=0; i<ArraySize(aPragmaName); i++){ sqlite3VdbeMultiLoad(v, 1, "s", aPragmaName[i].zName); } } break; #endif /* SQLITE_INTROSPECTION_PRAGMAS */ #endif /* SQLITE_OMIT_SCHEMA_PRAGMAS */ #ifndef SQLITE_OMIT_FOREIGN_KEY case PragTyp_FOREIGN_KEY_LIST: if( zRight ){ FKey *pFK; Table *pTab; pTab = sqlite3FindTable(db, zRight, zDb); if( pTab ){ pFK = pTab->pFKey; if( pFK ){ int iTabDb = sqlite3SchemaToIndex(db, pTab->pSchema); int i = 0; pParse->nMem = 8; sqlite3CodeVerifySchema(pParse, iTabDb); while(pFK){ int j; for(j=0; j<pFK->nCol; j++){ sqlite3VdbeMultiLoad(v, 1, "iissssss", i, j, pFK->zTo, pTab->aCol[pFK->aCol[j].iFrom].zName, pFK->aCol[j].zCol, actionName(pFK->aAction[1]), /* ON UPDATE */ actionName(pFK->aAction[0]), /* ON DELETE */ "NONE"); } ++i; pFK = pFK->pNextFrom; } } } } break; #endif /* !defined(SQLITE_OMIT_FOREIGN_KEY) */ #ifndef SQLITE_OMIT_FOREIGN_KEY #ifndef SQLITE_OMIT_TRIGGER case PragTyp_FOREIGN_KEY_CHECK: { FKey *pFK; /* A foreign key constraint */ Table *pTab; /* Child table contain "REFERENCES" keyword */ Table *pParent; /* Parent table that child points to */ Index *pIdx; /* Index in the parent table */ int i; /* Loop counter: Foreign key number for pTab */ int j; /* Loop counter: Field of the foreign key */ HashElem *k; /* Loop counter: Next table in schema */ int x; /* result variable */ int regResult; /* 3 registers to hold a result row */ int regKey; /* Register to hold key for checking the FK */ int regRow; /* Registers to hold a row from pTab */ int addrTop; /* Top of a loop checking foreign keys */ int addrOk; /* Jump here if the key is OK */ int *aiCols; /* child to parent column mapping */ regResult = pParse->nMem+1; pParse->nMem += 4; regKey = ++pParse->nMem; regRow = ++pParse->nMem; k = sqliteHashFirst(&db->aDb[iDb].pSchema->tblHash); while( k ){ int iTabDb; if( zRight ){ pTab = sqlite3LocateTable(pParse, 0, zRight, zDb); k = 0; }else{ pTab = (Table*)sqliteHashData(k); k = sqliteHashNext(k); } if( pTab==0 || pTab->pFKey==0 ) continue; iTabDb = sqlite3SchemaToIndex(db, pTab->pSchema); sqlite3CodeVerifySchema(pParse, iTabDb); sqlite3TableLock(pParse, iTabDb, pTab->tnum, 0, pTab->zName); if( pTab->nCol+regRow>pParse->nMem ) pParse->nMem = pTab->nCol + regRow; sqlite3OpenTable(pParse, 0, iTabDb, pTab, OP_OpenRead); sqlite3VdbeLoadString(v, regResult, pTab->zName); for(i=1, pFK=pTab->pFKey; pFK; i++, pFK=pFK->pNextFrom){ pParent = sqlite3FindTable(db, pFK->zTo, zDb); if( pParent==0 ) continue; pIdx = 0; sqlite3TableLock(pParse, iTabDb, pParent->tnum, 0, pParent->zName); x = sqlite3FkLocateIndex(pParse, pParent, pFK, &pIdx, 0); if( x==0 ){ if( pIdx==0 ){ sqlite3OpenTable(pParse, i, iTabDb, pParent, OP_OpenRead); }else{ sqlite3VdbeAddOp3(v, OP_OpenRead, i, pIdx->tnum, iTabDb); sqlite3VdbeSetP4KeyInfo(pParse, pIdx); } }else{ k = 0; break; } } assert( pParse->nErr>0 || pFK==0 ); if( pFK ) break; if( pParse->nTab<i ) pParse->nTab = i; addrTop = sqlite3VdbeAddOp1(v, OP_Rewind, 0); VdbeCoverage(v); for(i=1, pFK=pTab->pFKey; pFK; i++, pFK=pFK->pNextFrom){ pParent = sqlite3FindTable(db, pFK->zTo, zDb); pIdx = 0; aiCols = 0; if( pParent ){ x = sqlite3FkLocateIndex(pParse, pParent, pFK, &pIdx, &aiCols); assert( x==0 ); } addrOk = sqlite3VdbeMakeLabel(pParse); /* Generate code to read the child key values into registers ** regRow..regRow+n. If any of the child key values are NULL, this ** row cannot cause an FK violation. Jump directly to addrOk in ** this case. */ for(j=0; j<pFK->nCol; j++){ int iCol = aiCols ? aiCols[j] : pFK->aCol[j].iFrom; sqlite3ExprCodeGetColumnOfTable(v, pTab, 0, iCol, regRow+j); sqlite3VdbeAddOp2(v, OP_IsNull, regRow+j, addrOk); VdbeCoverage(v); } /* Generate code to query the parent index for a matching parent ** key. If a match is found, jump to addrOk. */ if( pIdx ){ sqlite3VdbeAddOp4(v, OP_MakeRecord, regRow, pFK->nCol, regKey, sqlite3IndexAffinityStr(db,pIdx), pFK->nCol); sqlite3VdbeAddOp4Int(v, OP_Found, i, addrOk, regKey, 0); VdbeCoverage(v); }else if( pParent ){ int jmp = sqlite3VdbeCurrentAddr(v)+2; sqlite3VdbeAddOp3(v, OP_SeekRowid, i, jmp, regRow); VdbeCoverage(v); sqlite3VdbeGoto(v, addrOk); assert( pFK->nCol==1 ); } /* Generate code to report an FK violation to the caller. */ if( HasRowid(pTab) ){ sqlite3VdbeAddOp2(v, OP_Rowid, 0, regResult+1); }else{ sqlite3VdbeAddOp2(v, OP_Null, 0, regResult+1); } sqlite3VdbeMultiLoad(v, regResult+2, "siX", pFK->zTo, i-1); sqlite3VdbeAddOp2(v, OP_ResultRow, regResult, 4); sqlite3VdbeResolveLabel(v, addrOk); sqlite3DbFree(db, aiCols); } sqlite3VdbeAddOp2(v, OP_Next, 0, addrTop+1); VdbeCoverage(v); sqlite3VdbeJumpHere(v, addrTop); } } break; #endif /* !defined(SQLITE_OMIT_TRIGGER) */ #endif /* !defined(SQLITE_OMIT_FOREIGN_KEY) */ #ifndef SQLITE_OMIT_CASE_SENSITIVE_LIKE_PRAGMA /* Reinstall the LIKE and GLOB functions. The variant of LIKE ** used will be case sensitive or not depending on the RHS. */ case PragTyp_CASE_SENSITIVE_LIKE: { if( zRight ){ sqlite3RegisterLikeFunctions(db, sqlite3GetBoolean(zRight, 0)); } } break; #endif /* SQLITE_OMIT_CASE_SENSITIVE_LIKE_PRAGMA */ #ifndef SQLITE_INTEGRITY_CHECK_ERROR_MAX # define SQLITE_INTEGRITY_CHECK_ERROR_MAX 100 #endif #ifndef SQLITE_OMIT_INTEGRITY_CHECK /* PRAGMA integrity_check ** PRAGMA integrity_check(N) ** PRAGMA quick_check ** PRAGMA quick_check(N) ** ** Verify the integrity of the database. ** ** The "quick_check" is reduced version of ** integrity_check designed to detect most database corruption ** without the overhead of cross-checking indexes. Quick_check ** is linear time wherease integrity_check is O(NlogN). */ case PragTyp_INTEGRITY_CHECK: { int i, j, addr, mxErr; int isQuick = (sqlite3Tolower(zLeft[0])=='q'); /* If the PRAGMA command was of the form "PRAGMA <db>.integrity_check", ** then iDb is set to the index of the database identified by <db>. ** In this case, the integrity of database iDb only is verified by ** the VDBE created below. ** ** Otherwise, if the command was simply "PRAGMA integrity_check" (or ** "PRAGMA quick_check"), then iDb is set to 0. In this case, set iDb ** to -1 here, to indicate that the VDBE should verify the integrity ** of all attached databases. */ assert( iDb>=0 ); assert( iDb==0 || pId2->z ); if( pId2->z==0 ) iDb = -1; /* Initialize the VDBE program */ pParse->nMem = 6; /* Set the maximum error count */ mxErr = SQLITE_INTEGRITY_CHECK_ERROR_MAX; if( zRight ){ sqlite3GetInt32(zRight, &mxErr); if( mxErr<=0 ){ mxErr = SQLITE_INTEGRITY_CHECK_ERROR_MAX; } } sqlite3VdbeAddOp2(v, OP_Integer, mxErr-1, 1); /* reg[1] holds errors left */ /* Do an integrity check on each database file */ for(i=0; i<db->nDb; i++){ HashElem *x; /* For looping over tables in the schema */ Hash *pTbls; /* Set of all tables in the schema */ int *aRoot; /* Array of root page numbers of all btrees */ int cnt = 0; /* Number of entries in aRoot[] */ int mxIdx = 0; /* Maximum number of indexes for any table */ if( OMIT_TEMPDB && i==1 ) continue; if( iDb>=0 && i!=iDb ) continue; sqlite3CodeVerifySchema(pParse, i); /* Do an integrity check of the B-Tree ** ** Begin by finding the root pages numbers ** for all tables and indices in the database. */ assert( sqlite3SchemaMutexHeld(db, i, 0) ); pTbls = &db->aDb[i].pSchema->tblHash; for(cnt=0, x=sqliteHashFirst(pTbls); x; x=sqliteHashNext(x)){ Table *pTab = sqliteHashData(x); /* Current table */ Index *pIdx; /* An index on pTab */ int nIdx; /* Number of indexes on pTab */ if( HasRowid(pTab) ) cnt++; for(nIdx=0, pIdx=pTab->pIndex; pIdx; pIdx=pIdx->pNext, nIdx++){ cnt++; } if( nIdx>mxIdx ) mxIdx = nIdx; } aRoot = sqlite3DbMallocRawNN(db, sizeof(int)*(cnt+1)); if( aRoot==0 ) break; for(cnt=0, x=sqliteHashFirst(pTbls); x; x=sqliteHashNext(x)){ Table *pTab = sqliteHashData(x); Index *pIdx; if( HasRowid(pTab) ) aRoot[++cnt] = pTab->tnum; for(pIdx=pTab->pIndex; pIdx; pIdx=pIdx->pNext){ aRoot[++cnt] = pIdx->tnum; } } aRoot[0] = cnt; /* Make sure sufficient number of registers have been allocated */ pParse->nMem = MAX( pParse->nMem, 8+mxIdx ); sqlite3ClearTempRegCache(pParse); /* Do the b-tree integrity checks */ sqlite3VdbeAddOp4(v, OP_IntegrityCk, 2, cnt, 1, (char*)aRoot,P4_INTARRAY); sqlite3VdbeChangeP5(v, (u8)i); addr = sqlite3VdbeAddOp1(v, OP_IsNull, 2); VdbeCoverage(v); sqlite3VdbeAddOp4(v, OP_String8, 0, 3, 0, sqlite3MPrintf(db, "*** in database %s ***\n", db->aDb[i].zDbSName), P4_DYNAMIC); sqlite3VdbeAddOp3(v, OP_Concat, 2, 3, 3); integrityCheckResultRow(v); sqlite3VdbeJumpHere(v, addr); /* Make sure all the indices are constructed correctly. */ for(x=sqliteHashFirst(pTbls); x; x=sqliteHashNext(x)){ Table *pTab = sqliteHashData(x); Index *pIdx, *pPk; Index *pPrior = 0; int loopTop; int iDataCur, iIdxCur; int r1 = -1; if( pTab->tnum<1 ) continue; /* Skip VIEWs or VIRTUAL TABLEs */ pPk = HasRowid(pTab) ? 0 : sqlite3PrimaryKeyIndex(pTab); sqlite3OpenTableAndIndices(pParse, pTab, OP_OpenRead, 0, 1, 0, &iDataCur, &iIdxCur); /* reg[7] counts the number of entries in the table. ** reg[8+i] counts the number of entries in the i-th index */ sqlite3VdbeAddOp2(v, OP_Integer, 0, 7); for(j=0, pIdx=pTab->pIndex; pIdx; pIdx=pIdx->pNext, j++){ sqlite3VdbeAddOp2(v, OP_Integer, 0, 8+j); /* index entries counter */ } assert( pParse->nMem>=8+j ); assert( sqlite3NoTempsInRange(pParse,1,7+j) ); sqlite3VdbeAddOp2(v, OP_Rewind, iDataCur, 0); VdbeCoverage(v); loopTop = sqlite3VdbeAddOp2(v, OP_AddImm, 7, 1); if( !isQuick ){ /* Sanity check on record header decoding */ sqlite3VdbeAddOp3(v, OP_Column, iDataCur, pTab->nNVCol-1,3); sqlite3VdbeChangeP5(v, OPFLAG_TYPEOFARG); } /* Verify that all NOT NULL columns really are NOT NULL */ for(j=0; j<pTab->nCol; j++){ char *zErr; int jmp2; if( j==pTab->iPKey ) continue; if( pTab->aCol[j].notNull==0 ) continue; sqlite3ExprCodeGetColumnOfTable(v, pTab, iDataCur, j, 3); if( sqlite3VdbeGetOp(v,-1)->opcode==OP_Column ){ sqlite3VdbeChangeP5(v, OPFLAG_TYPEOFARG); } jmp2 = sqlite3VdbeAddOp1(v, OP_NotNull, 3); VdbeCoverage(v); zErr = sqlite3MPrintf(db, "NULL value in %s.%s", pTab->zName, pTab->aCol[j].zName); sqlite3VdbeAddOp4(v, OP_String8, 0, 3, 0, zErr, P4_DYNAMIC); integrityCheckResultRow(v); sqlite3VdbeJumpHere(v, jmp2); } /* Verify CHECK constraints */ if( pTab->pCheck && (db->flags & SQLITE_IgnoreChecks)==0 ){ ExprList *pCheck = sqlite3ExprListDup(db, pTab->pCheck, 0); if( db->mallocFailed==0 ){ int addrCkFault = sqlite3VdbeMakeLabel(pParse); int addrCkOk = sqlite3VdbeMakeLabel(pParse); char *zErr; int k; pParse->iSelfTab = iDataCur + 1; for(k=pCheck->nExpr-1; k>0; k--){ sqlite3ExprIfFalse(pParse, pCheck->a[k].pExpr, addrCkFault, 0); } sqlite3ExprIfTrue(pParse, pCheck->a[0].pExpr, addrCkOk, SQLITE_JUMPIFNULL); sqlite3VdbeResolveLabel(v, addrCkFault); pParse->iSelfTab = 0; zErr = sqlite3MPrintf(db, "CHECK constraint failed in %s", pTab->zName); sqlite3VdbeAddOp4(v, OP_String8, 0, 3, 0, zErr, P4_DYNAMIC); integrityCheckResultRow(v); sqlite3VdbeResolveLabel(v, addrCkOk); } sqlite3ExprListDelete(db, pCheck); } if( !isQuick ){ /* Omit the remaining tests for quick_check */ /* Validate index entries for the current row */ for(j=0, pIdx=pTab->pIndex; pIdx; pIdx=pIdx->pNext, j++){ int jmp2, jmp3, jmp4, jmp5; int ckUniq = sqlite3VdbeMakeLabel(pParse); if( pPk==pIdx ) continue; r1 = sqlite3GenerateIndexKey(pParse, pIdx, iDataCur, 0, 0, &jmp3, pPrior, r1); pPrior = pIdx; sqlite3VdbeAddOp2(v, OP_AddImm, 8+j, 1);/* increment entry count */ /* Verify that an index entry exists for the current table row */ jmp2 = sqlite3VdbeAddOp4Int(v, OP_Found, iIdxCur+j, ckUniq, r1, pIdx->nColumn); VdbeCoverage(v); sqlite3VdbeLoadString(v, 3, "row "); sqlite3VdbeAddOp3(v, OP_Concat, 7, 3, 3); sqlite3VdbeLoadString(v, 4, " missing from index "); sqlite3VdbeAddOp3(v, OP_Concat, 4, 3, 3); jmp5 = sqlite3VdbeLoadString(v, 4, pIdx->zName); sqlite3VdbeAddOp3(v, OP_Concat, 4, 3, 3); jmp4 = integrityCheckResultRow(v); sqlite3VdbeJumpHere(v, jmp2); /* For UNIQUE indexes, verify that only one entry exists with the ** current key. The entry is unique if (1) any column is NULL ** or (2) the next entry has a different key */ if( IsUniqueIndex(pIdx) ){ int uniqOk = sqlite3VdbeMakeLabel(pParse); int jmp6; int kk; for(kk=0; kk<pIdx->nKeyCol; kk++){ int iCol = pIdx->aiColumn[kk]; assert( iCol!=XN_ROWID && iCol<pTab->nCol ); if( iCol>=0 && pTab->aCol[iCol].notNull ) continue; sqlite3VdbeAddOp2(v, OP_IsNull, r1+kk, uniqOk); VdbeCoverage(v); } jmp6 = sqlite3VdbeAddOp1(v, OP_Next, iIdxCur+j); VdbeCoverage(v); sqlite3VdbeGoto(v, uniqOk); sqlite3VdbeJumpHere(v, jmp6); sqlite3VdbeAddOp4Int(v, OP_IdxGT, iIdxCur+j, uniqOk, r1, pIdx->nKeyCol); VdbeCoverage(v); sqlite3VdbeLoadString(v, 3, "non-unique entry in index "); sqlite3VdbeGoto(v, jmp5); sqlite3VdbeResolveLabel(v, uniqOk); } sqlite3VdbeJumpHere(v, jmp4); sqlite3ResolvePartIdxLabel(pParse, jmp3); } } sqlite3VdbeAddOp2(v, OP_Next, iDataCur, loopTop); VdbeCoverage(v); sqlite3VdbeJumpHere(v, loopTop-1); #ifndef SQLITE_OMIT_BTREECOUNT if( !isQuick ){ sqlite3VdbeLoadString(v, 2, "wrong # of entries in index "); for(j=0, pIdx=pTab->pIndex; pIdx; pIdx=pIdx->pNext, j++){ if( pPk==pIdx ) continue; sqlite3VdbeAddOp2(v, OP_Count, iIdxCur+j, 3); addr = sqlite3VdbeAddOp3(v, OP_Eq, 8+j, 0, 3); VdbeCoverage(v); sqlite3VdbeChangeP5(v, SQLITE_NOTNULL); sqlite3VdbeLoadString(v, 4, pIdx->zName); sqlite3VdbeAddOp3(v, OP_Concat, 4, 2, 3); integrityCheckResultRow(v); sqlite3VdbeJumpHere(v, addr); } } #endif /* SQLITE_OMIT_BTREECOUNT */ } } { static const int iLn = VDBE_OFFSET_LINENO(2); static const VdbeOpList endCode[] = { { OP_AddImm, 1, 0, 0}, /* 0 */ { OP_IfNotZero, 1, 4, 0}, /* 1 */ { OP_String8, 0, 3, 0}, /* 2 */ { OP_ResultRow, 3, 1, 0}, /* 3 */ { OP_Halt, 0, 0, 0}, /* 4 */ { OP_String8, 0, 3, 0}, /* 5 */ { OP_Goto, 0, 3, 0}, /* 6 */ }; VdbeOp *aOp; aOp = sqlite3VdbeAddOpList(v, ArraySize(endCode), endCode, iLn); if( aOp ){ aOp[0].p2 = 1-mxErr; aOp[2].p4type = P4_STATIC; aOp[2].p4.z = "ok"; aOp[5].p4type = P4_STATIC; aOp[5].p4.z = (char*)sqlite3ErrStr(SQLITE_CORRUPT); } sqlite3VdbeChangeP3(v, 0, sqlite3VdbeCurrentAddr(v)-2); } } break; #endif /* SQLITE_OMIT_INTEGRITY_CHECK */ #ifndef SQLITE_OMIT_UTF16 /* ** PRAGMA encoding ** PRAGMA encoding = "utf-8"|"utf-16"|"utf-16le"|"utf-16be" ** ** In its first form, this pragma returns the encoding of the main ** database. If the database is not initialized, it is initialized now. ** ** The second form of this pragma is a no-op if the main database file ** has not already been initialized. In this case it sets the default ** encoding that will be used for the main database file if a new file ** is created. If an existing main database file is opened, then the ** default text encoding for the existing database is used. ** ** In all cases new databases created using the ATTACH command are ** created to use the same default text encoding as the main database. If ** the main database has not been initialized and/or created when ATTACH ** is executed, this is done before the ATTACH operation. ** ** In the second form this pragma sets the text encoding to be used in ** new database files created using this database handle. It is only ** useful if invoked immediately after the main database i */ case PragTyp_ENCODING: { static const struct EncName { char *zName; u8 enc; } encnames[] = { { "UTF8", SQLITE_UTF8 }, { "UTF-8", SQLITE_UTF8 }, /* Must be element [1] */ { "UTF-16le", SQLITE_UTF16LE }, /* Must be element [2] */ { "UTF-16be", SQLITE_UTF16BE }, /* Must be element [3] */ { "UTF16le", SQLITE_UTF16LE }, { "UTF16be", SQLITE_UTF16BE }, { "UTF-16", 0 }, /* SQLITE_UTF16NATIVE */ { "UTF16", 0 }, /* SQLITE_UTF16NATIVE */ { 0, 0 } }; const struct EncName *pEnc; if( !zRight ){ /* "PRAGMA encoding" */ if( sqlite3ReadSchema(pParse) ) goto pragma_out; assert( encnames[SQLITE_UTF8].enc==SQLITE_UTF8 ); assert( encnames[SQLITE_UTF16LE].enc==SQLITE_UTF16LE ); assert( encnames[SQLITE_UTF16BE].enc==SQLITE_UTF16BE ); returnSingleText(v, encnames[ENC(pParse->db)].zName); }else{ /* "PRAGMA encoding = XXX" */ /* Only change the value of sqlite.enc if the database handle is not ** initialized. If the main database exists, the new sqlite.enc value ** will be overwritten when the schema is next loaded. If it does not ** already exists, it will be created to use the new encoding value. */ if( !(DbHasProperty(db, 0, DB_SchemaLoaded)) || DbHasProperty(db, 0, DB_Empty) ){ for(pEnc=&encnames[0]; pEnc->zName; pEnc++){ if( 0==sqlite3StrICmp(zRight, pEnc->zName) ){ SCHEMA_ENC(db) = ENC(db) = pEnc->enc ? pEnc->enc : SQLITE_UTF16NATIVE; break; } } if( !pEnc->zName ){ sqlite3ErrorMsg(pParse, "unsupported encoding: %s", zRight); } } } } break; #endif /* SQLITE_OMIT_UTF16 */ #ifndef SQLITE_OMIT_SCHEMA_VERSION_PRAGMAS /* ** PRAGMA [schema.]schema_version ** PRAGMA [schema.]schema_version = <integer> ** ** PRAGMA [schema.]user_version ** PRAGMA [schema.]user_version = <integer> ** ** PRAGMA [schema.]freelist_count ** ** PRAGMA [schema.]data_version ** ** PRAGMA [schema.]application_id ** PRAGMA [schema.]application_id = <integer> ** ** The pragma's schema_version and user_version are used to set or get ** the value of the schema-version and user-version, respectively. Both ** the schema-version and the user-version are 32-bit signed integers ** stored in the database header. ** ** The schema-cookie is usually only manipulated internally by SQLite. It ** is incremented by SQLite whenever the database schema is modified (by ** creating or dropping a table or index). The schema version is used by ** SQLite each time a query is executed to ensure that the internal cache ** of the schema used when compiling the SQL query matches the schema of ** the database against which the compiled query is actually executed. ** Subverting this mechanism by using "PRAGMA schema_version" to modify ** the schema-version is potentially dangerous and may lead to program ** crashes or database corruption. Use with caution! ** ** The user-version is not used internally by SQLite. It may be used by ** applications for any purpose. */ case PragTyp_HEADER_VALUE: { int iCookie = pPragma->iArg; /* Which cookie to read or write */ sqlite3VdbeUsesBtree(v, iDb); if( zRight && (pPragma->mPragFlg & PragFlg_ReadOnly)==0 ){ /* Write the specified cookie value */ static const VdbeOpList setCookie[] = { { OP_Transaction, 0, 1, 0}, /* 0 */ { OP_SetCookie, 0, 0, 0}, /* 1 */ }; VdbeOp *aOp; sqlite3VdbeVerifyNoMallocRequired(v, ArraySize(setCookie)); aOp = sqlite3VdbeAddOpList(v, ArraySize(setCookie), setCookie, 0); if( ONLY_IF_REALLOC_STRESS(aOp==0) ) break; aOp[0].p1 = iDb; aOp[1].p1 = iDb; aOp[1].p2 = iCookie; aOp[1].p3 = sqlite3Atoi(zRight); }else{ /* Read the specified cookie value */ static const VdbeOpList readCookie[] = { { OP_Transaction, 0, 0, 0}, /* 0 */ { OP_ReadCookie, 0, 1, 0}, /* 1 */ { OP_ResultRow, 1, 1, 0} }; VdbeOp *aOp; sqlite3VdbeVerifyNoMallocRequired(v, ArraySize(readCookie)); aOp = sqlite3VdbeAddOpList(v, ArraySize(readCookie),readCookie,0); if( ONLY_IF_REALLOC_STRESS(aOp==0) ) break; aOp[0].p1 = iDb; aOp[1].p1 = iDb; aOp[1].p3 = iCookie; sqlite3VdbeReusable(v); } } break; #endif /* SQLITE_OMIT_SCHEMA_VERSION_PRAGMAS */ #ifndef SQLITE_OMIT_COMPILEOPTION_DIAGS /* ** PRAGMA compile_options ** ** Return the names of all compile-time options used in this build, ** one option per row. */ case PragTyp_COMPILE_OPTIONS: { int i = 0; const char *zOpt; pParse->nMem = 1; while( (zOpt = sqlite3_compileoption_get(i++))!=0 ){ sqlite3VdbeLoadString(v, 1, zOpt); sqlite3VdbeAddOp2(v, OP_ResultRow, 1, 1); } sqlite3VdbeReusable(v); } break; #endif /* SQLITE_OMIT_COMPILEOPTION_DIAGS */ #ifndef SQLITE_OMIT_WAL /* ** PRAGMA [schema.]wal_checkpoint = passive|full|restart|truncate ** ** Checkpoint the database. */ case PragTyp_WAL_CHECKPOINT: { int iBt = (pId2->z?iDb:SQLITE_MAX_ATTACHED); int eMode = SQLITE_CHECKPOINT_PASSIVE; if( zRight ){ if( sqlite3StrICmp(zRight, "full")==0 ){ eMode = SQLITE_CHECKPOINT_FULL; }else if( sqlite3StrICmp(zRight, "restart")==0 ){ eMode = SQLITE_CHECKPOINT_RESTART; }else if( sqlite3StrICmp(zRight, "truncate")==0 ){ eMode = SQLITE_CHECKPOINT_TRUNCATE; } } pParse->nMem = 3; sqlite3VdbeAddOp3(v, OP_Checkpoint, iBt, eMode, 1); sqlite3VdbeAddOp2(v, OP_ResultRow, 1, 3); } break; /* ** PRAGMA wal_autocheckpoint ** PRAGMA wal_autocheckpoint = N ** ** Configure a database connection to automatically checkpoint a database ** after accumulating N frames in the log. Or query for the current value ** of N. */ case PragTyp_WAL_AUTOCHECKPOINT: { if( zRight ){ sqlite3_wal_autocheckpoint(db, sqlite3Atoi(zRight)); } returnSingleInt(v, db->xWalCallback==sqlite3WalDefaultHook ? SQLITE_PTR_TO_INT(db->pWalArg) : 0); } break; #endif /* ** PRAGMA shrink_memory ** ** IMPLEMENTATION-OF: R-23445-46109 This pragma causes the database ** connection on which it is invoked to free up as much memory as it ** can, by calling sqlite3_db_release_memory(). */ case PragTyp_SHRINK_MEMORY: { sqlite3_db_release_memory(db); break; } /* ** PRAGMA optimize ** PRAGMA optimize(MASK) ** PRAGMA schema.optimize ** PRAGMA schema.optimize(MASK) ** ** Attempt to optimize the database. All schemas are optimized in the first ** two forms, and only the specified schema is optimized in the latter two. ** ** The details of optimizations performed by this pragma are expected ** to change and improve over time. Applications should anticipate that ** this pragma will perform new optimizations in future releases. ** ** The optional argument is a bitmask of optimizations to perform: ** ** 0x0001 Debugging mode. Do not actually perform any optimizations ** but instead return one line of text for each optimization ** that would have been done. Off by default. ** ** 0x0002 Run ANALYZE on tables that might benefit. On by default. ** See below for additional information. ** ** 0x0004 (Not yet implemented) Record usage and performance ** information from the current session in the ** database file so that it will be available to "optimize" ** pragmas run by future database connections. ** ** 0x0008 (Not yet implemented) Create indexes that might have ** been helpful to recent queries ** ** The default MASK is and always shall be 0xfffe. 0xfffe means perform all ** of the optimizations listed above except Debug Mode, including new ** optimizations that have not yet been invented. If new optimizations are ** ever added that should be off by default, those off-by-default ** optimizations will have bitmasks of 0x10000 or larger. ** ** DETERMINATION OF WHEN TO RUN ANALYZE ** ** In the current implementation, a table is analyzed if only if all of ** the following are true: ** ** (1) MASK bit 0x02 is set. ** ** (2) The query planner used sqlite_stat1-style statistics for one or ** more indexes of the table at some point during the lifetime of ** the current connection. ** ** (3) One or more indexes of the table are currently unanalyzed OR ** the number of rows in the table has increased by 25 times or more ** since the last time ANALYZE was run. ** ** The rules for when tables are analyzed are likely to change in ** future releases. */ case PragTyp_OPTIMIZE: { int iDbLast; /* Loop termination point for the schema loop */ int iTabCur; /* Cursor for a table whose size needs checking */ HashElem *k; /* Loop over tables of a schema */ Schema *pSchema; /* The current schema */ Table *pTab; /* A table in the schema */ Index *pIdx; /* An index of the table */ LogEst szThreshold; /* Size threshold above which reanalysis is needd */ char *zSubSql; /* SQL statement for the OP_SqlExec opcode */ u32 opMask; /* Mask of operations to perform */ if( zRight ){ opMask = (u32)sqlite3Atoi(zRight); if( (opMask & 0x02)==0 ) break; }else{ opMask = 0xfffe; } iTabCur = pParse->nTab++; for(iDbLast = zDb?iDb:db->nDb-1; iDb<=iDbLast; iDb++){ if( iDb==1 ) continue; sqlite3CodeVerifySchema(pParse, iDb); pSchema = db->aDb[iDb].pSchema; for(k=sqliteHashFirst(&pSchema->tblHash); k; k=sqliteHashNext(k)){ pTab = (Table*)sqliteHashData(k); /* If table pTab has not been used in a way that would benefit from ** having analysis statistics during the current session, then skip it. ** This also has the effect of skipping virtual tables and views */ if( (pTab->tabFlags & TF_StatsUsed)==0 ) continue; /* Reanalyze if the table is 25 times larger than the last analysis */ szThreshold = pTab->nRowLogEst + 46; assert( sqlite3LogEst(25)==46 ); for(pIdx=pTab->pIndex; pIdx; pIdx=pIdx->pNext){ if( !pIdx->hasStat1 ){ szThreshold = 0; /* Always analyze if any index lacks statistics */ break; } } if( szThreshold ){ sqlite3OpenTable(pParse, iTabCur, iDb, pTab, OP_OpenRead); sqlite3VdbeAddOp3(v, OP_IfSmaller, iTabCur, sqlite3VdbeCurrentAddr(v)+2+(opMask&1), szThreshold); VdbeCoverage(v); } zSubSql = sqlite3MPrintf(db, "ANALYZE \"%w\".\"%w\"", db->aDb[iDb].zDbSName, pTab->zName); if( opMask & 0x01 ){ int r1 = sqlite3GetTempReg(pParse); sqlite3VdbeAddOp4(v, OP_String8, 0, r1, 0, zSubSql, P4_DYNAMIC); sqlite3VdbeAddOp2(v, OP_ResultRow, r1, 1); }else{ sqlite3VdbeAddOp4(v, OP_SqlExec, 0, 0, 0, zSubSql, P4_DYNAMIC); } } } sqlite3VdbeAddOp0(v, OP_Expire); break; } /* ** PRAGMA busy_timeout ** PRAGMA busy_timeout = N ** ** Call sqlite3_busy_timeout(db, N). Return the current timeout value ** if one is set. If no busy handler or a different busy handler is set ** then 0 is returned. Setting the busy_timeout to 0 or negative ** disables the timeout. */ /*case PragTyp_BUSY_TIMEOUT*/ default: { assert( pPragma->ePragTyp==PragTyp_BUSY_TIMEOUT ); if( zRight ){ sqlite3_busy_timeout(db, sqlite3Atoi(zRight)); } returnSingleInt(v, db->busyTimeout); break; } /* ** PRAGMA soft_heap_limit ** PRAGMA soft_heap_limit = N ** ** IMPLEMENTATION-OF: R-26343-45930 This pragma invokes the ** sqlite3_soft_heap_limit64() interface with the argument N, if N is ** specified and is a non-negative integer. ** IMPLEMENTATION-OF: R-64451-07163 The soft_heap_limit pragma always ** returns the same integer that would be returned by the ** sqlite3_soft_heap_limit64(-1) C-language function. */ case PragTyp_SOFT_HEAP_LIMIT: { sqlite3_int64 N; if( zRight && sqlite3DecOrHexToI64(zRight, &N)==SQLITE_OK ){ sqlite3_soft_heap_limit64(N); } returnSingleInt(v, sqlite3_soft_heap_limit64(-1)); break; } /* ** PRAGMA hard_heap_limit ** PRAGMA hard_heap_limit = N ** ** Invoke sqlite3_hard_heap_limit64() to query or set the hard heap ** limit. The hard heap limit can be activated or lowered by this ** pragma, but not raised or deactivated. Only the ** sqlite3_hard_heap_limit64() C-language API can raise or deactivate ** the hard heap limit. This allows an application to set a heap limit ** constraint that cannot be relaxed by an untrusted SQL script. */ case PragTyp_HARD_HEAP_LIMIT: { sqlite3_int64 N; if( zRight && sqlite3DecOrHexToI64(zRight, &N)==SQLITE_OK ){ sqlite3_int64 iPrior = sqlite3_hard_heap_limit64(-1); if( N>0 && (iPrior==0 || iPrior>N) ) sqlite3_hard_heap_limit64(N); } returnSingleInt(v, sqlite3_hard_heap_limit64(-1)); break; } /* ** PRAGMA threads ** PRAGMA threads = N ** ** Configure the maximum number of worker threads. Return the new ** maximum, which might be less than requested. */ case PragTyp_THREADS: { sqlite3_int64 N; if( zRight && sqlite3DecOrHexToI64(zRight, &N)==SQLITE_OK && N>=0 ){ sqlite3_limit(db, SQLITE_LIMIT_WORKER_THREADS, (int)(N&0x7fffffff)); } returnSingleInt(v, sqlite3_limit(db, SQLITE_LIMIT_WORKER_THREADS, -1)); break; } #if defined(SQLITE_DEBUG) || defined(SQLITE_TEST) /* ** Report the current state of file logs for all databases */ case PragTyp_LOCK_STATUS: { static const char *const azLockName[] = { "unlocked", "shared", "reserved", "pending", "exclusive" }; int i; pParse->nMem = 2; for(i=0; i<db->nDb; i++){ Btree *pBt; const char *zState = "unknown"; int j; if( db->aDb[i].zDbSName==0 ) continue; pBt = db->aDb[i].pBt; if( pBt==0 || sqlite3BtreePager(pBt)==0 ){ zState = "closed"; }else if( sqlite3_file_control(db, i ? db->aDb[i].zDbSName : 0, SQLITE_FCNTL_LOCKSTATE, &j)==SQLITE_OK ){ zState = azLockName[j]; } sqlite3VdbeMultiLoad(v, 1, "ss", db->aDb[i].zDbSName, zState); } break; } #endif #ifdef SQLITE_HAS_CODEC /* Pragma iArg ** ---------- ------ ** key 0 ** rekey 1 ** hexkey 2 ** hexrekey 3 ** textkey 4 ** textrekey 5 */ case PragTyp_KEY: { if( zRight ){ char zBuf[40]; const char *zKey = zRight; int n; if( pPragma->iArg==2 || pPragma->iArg==3 ){ u8 iByte; int i; for(i=0, iByte=0; i<sizeof(zBuf)*2 && sqlite3Isxdigit(zRight[i]); i++){ iByte = (iByte<<4) + sqlite3HexToInt(zRight[i]); if( (i&1)!=0 ) zBuf[i/2] = iByte; } zKey = zBuf; n = i/2; }else{ n = pPragma->iArg<4 ? sqlite3Strlen30(zRight) : -1; } if( (pPragma->iArg & 1)==0 ){ rc = sqlite3_key_v2(db, zDb, zKey, n); }else{ rc = sqlite3_rekey_v2(db, zDb, zKey, n); } if( rc==SQLITE_OK && n!=0 ){ sqlite3VdbeSetNumCols(v, 1); sqlite3VdbeSetColName(v, 0, COLNAME_NAME, "ok", SQLITE_STATIC); returnSingleText(v, "ok"); } } break; } #endif #if defined(SQLITE_HAS_CODEC) || defined(SQLITE_ENABLE_CEROD) case PragTyp_ACTIVATE_EXTENSIONS: if( zRight ){ #ifdef SQLITE_HAS_CODEC if( sqlite3StrNICmp(zRight, "see-", 4)==0 ){ sqlite3_activate_see(&zRight[4]); } #endif #ifdef SQLITE_ENABLE_CEROD if( sqlite3StrNICmp(zRight, "cerod-", 6)==0 ){ sqlite3_activate_cerod(&zRight[6]); } #endif } break; #endif } /* End of the PRAGMA switch */ /* The following block is a no-op unless SQLITE_DEBUG is defined. Its only ** purpose is to execute assert() statements to verify that if the ** PragFlg_NoColumns1 flag is set and the caller specified an argument ** to the PRAGMA, the implementation has not added any OP_ResultRow ** instructions to the VM. */ if( (pPragma->mPragFlg & PragFlg_NoColumns1) && zRight ){ sqlite3VdbeVerifyNoResultRow(v); } pragma_out: sqlite3DbFree(db, zLeft); sqlite3DbFree(db, zRight); }
0
[ "CWE-754" ]
sqlite
ebd70eedd5d6e6a890a670b5ee874a5eae86b4dd
150,257,316,541,417,460,000,000,000,000,000,000,000
1,905
Fix the NOT NULL verification logic in PRAGMA integrity_check so that it works for generated columns whose value is the result of a comparison operator. Ticket [bd8c280671ba44a7] FossilOrigin-Name: f3b39c71b88cb6721f443de56cdce4c08252453a5e340b00a2bd88dc10c42400
TEST_F(QuotedString_ExtractFrom_Tests, EmptyDoubleQuotedString) { whenInputIs("\"\""); resultMustBe(""); trailingMustBe(""); }
0
[ "CWE-415", "CWE-119" ]
ArduinoJson
5e7b9ec688d79e7b16ec7064e1d37e8481a31e72
207,936,348,729,426,130,000,000,000,000,000,000,000
6
Fix buffer overflow (pull request #81)
static int copy_from_read_buf(struct tty_struct *tty, unsigned char __user **b, size_t *nr) { struct n_tty_data *ldata = tty->disc_data; int retval; size_t n; bool is_eof; size_t head = smp_load_acquire(&ldata->commit_head); size_t tail = ldata->read_tail & (N_TTY_BUF_SIZE - 1); retval = 0; n = min(head - ldata->read_tail, N_TTY_BUF_SIZE - tail); n = min(*nr, n); if (n) { const unsigned char *from = read_buf_addr(ldata, tail); retval = copy_to_user(*b, from, n); n -= retval; is_eof = n == 1 && *from == EOF_CHAR(tty); tty_audit_add_data(tty, from, n); smp_store_release(&ldata->read_tail, ldata->read_tail + n); /* Turn single EOF into zero-length read */ if (L_EXTPROC(tty) && ldata->icanon && is_eof && (head == ldata->read_tail)) n = 0; *b += n; *nr -= n; } return retval; }
0
[ "CWE-704" ]
linux
966031f340185eddd05affcf72b740549f056348
299,378,636,736,764,170,000,000,000,000,000,000,000
31
n_tty: fix EXTPROC vs ICANON interaction with TIOCINQ (aka FIONREAD) We added support for EXTPROC back in 2010 in commit 26df6d13406d ("tty: Add EXTPROC support for LINEMODE") and the intent was to allow it to override some (all?) ICANON behavior. Quoting from that original commit message: There is a new bit in the termios local flag word, EXTPROC. When this bit is set, several aspects of the terminal driver are disabled. Input line editing, character echo, and mapping of signals are all disabled. This allows the telnetd to turn off these functions when in linemode, but still keep track of what state the user wants the terminal to be in. but the problem turns out that "several aspects of the terminal driver are disabled" is a bit ambiguous, and you can really confuse the n_tty layer by setting EXTPROC and then causing some of the ICANON invariants to no longer be maintained. This fixes at least one such case (TIOCINQ) becoming unhappy because of the confusion over whether ICANON really means ICANON when EXTPROC is set. This basically makes TIOCINQ match the case of read: if EXTPROC is set, we ignore ICANON. Also, make sure to reset the ICANON state ie EXTPROC changes, not just if ICANON changes. Fixes: 26df6d13406d ("tty: Add EXTPROC support for LINEMODE") Reported-by: Tetsuo Handa <[email protected]> Reported-by: syzkaller <[email protected]> Cc: Jiri Slaby <[email protected]> Signed-off-by: Linus Torvalds <[email protected]> Signed-off-by: Greg Kroah-Hartman <[email protected]>
unsigned BytecodeModuleGenerator::getIdentifierID(StringRef str) const { return stringTable_.getIdentifierID(str); }
0
[ "CWE-125", "CWE-787" ]
hermes
091835377369c8fd5917d9b87acffa721ad2a168
264,870,671,075,883,640,000,000,000,000,000,000,000
3
Correctly restore whether or not a function is an inner generator Summary: If a generator was large enough to be lazily compiled, we would lose that information when reconstituting the function's context. This meant the function was generated as a regular function instead of a generator. #utd-hermes-ignore-android Reviewed By: tmikov Differential Revision: D23580247 fbshipit-source-id: af5628bf322cbdc7c7cdfbb5f8d0756328518ea1
sequenceIsOwned(Oid seqId, char deptype, Oid *tableId, int32 *colId) { bool ret = false; Relation depRel; ScanKeyData key[2]; SysScanDesc scan; HeapTuple tup; depRel = table_open(DependRelationId, AccessShareLock); ScanKeyInit(&key[0], Anum_pg_depend_classid, BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(RelationRelationId)); ScanKeyInit(&key[1], Anum_pg_depend_objid, BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(seqId)); scan = systable_beginscan(depRel, DependDependerIndexId, true, NULL, 2, key); while (HeapTupleIsValid((tup = systable_getnext(scan)))) { Form_pg_depend depform = (Form_pg_depend) GETSTRUCT(tup); if (depform->refclassid == RelationRelationId && depform->deptype == deptype) { *tableId = depform->refobjid; *colId = depform->refobjsubid; ret = true; break; /* no need to keep scanning */ } } systable_endscan(scan); table_close(depRel, AccessShareLock); return ret; }
0
[ "CWE-94" ]
postgres
b9b21acc766db54d8c337d508d0fe2f5bf2daab0
92,297,799,477,061,600,000,000,000,000,000,000,000
42
In extensions, don't replace objects not belonging to the extension. Previously, if an extension script did CREATE OR REPLACE and there was an existing object not belonging to the extension, it would overwrite the object and adopt it into the extension. This is problematic, first because the overwrite is probably unintentional, and second because we didn't change the object's ownership. Thus a hostile user could create an object in advance of an expected CREATE EXTENSION command, and would then have ownership rights on an extension object, which could be modified for trojan-horse-type attacks. Hence, forbid CREATE OR REPLACE of an existing object unless it already belongs to the extension. (Note that we've always forbidden replacing an object that belongs to some other extension; only the behavior for previously-free-standing objects changes here.) For the same reason, also fail CREATE IF NOT EXISTS when there is an existing object that doesn't belong to the extension. Our thanks to Sven Klemm for reporting this problem. Security: CVE-2022-2625
static int mxf_read_primer_pack(void *arg, AVIOContext *pb, int tag, int size, UID uid, int64_t klv_offset) { MXFContext *mxf = arg; int item_num = avio_rb32(pb); int item_len = avio_rb32(pb); if (item_len != 18) { avpriv_request_sample(pb, "Primer pack item length %d", item_len); return AVERROR_PATCHWELCOME; } if (item_num > 65536) { av_log(mxf->fc, AV_LOG_ERROR, "item_num %d is too large\n", item_num); return AVERROR_INVALIDDATA; } mxf->local_tags = av_calloc(item_num, item_len); if (!mxf->local_tags) return AVERROR(ENOMEM); mxf->local_tags_count = item_num; avio_read(pb, mxf->local_tags, item_num*item_len); return 0; }
1
[ "CWE-20" ]
FFmpeg
a4e85b2e1c8d5b4bf0091157bbdeb0e457fb7b8f
151,855,178,391,715,600,000,000,000,000,000,000,000
21
avformat/mxfdec: Fix Sign error in mxf_read_primer_pack() Fixes: 20170829B.mxf Co-Author: 张洪亮(望初)" <[email protected]> Found-by: Xiaohei and Wangchu from Alibaba Security Team Signed-off-by: Michael Niedermayer <[email protected]> (cherry picked from commit 9d00fb9d70ee8c0cc7002b89318c5be00f1bbdad) Signed-off-by: Michael Niedermayer <[email protected]>
xmlParserInputBufferCreateFd(int fd, xmlCharEncoding enc) { xmlParserInputBufferPtr ret; if (fd < 0) return(NULL); ret = xmlAllocParserInputBuffer(enc); if (ret != NULL) { ret->context = (void *) (long) fd; ret->readcallback = xmlFdRead; ret->closecallback = xmlFdClose; } return(ret); }
0
[ "CWE-134" ]
libxml2
4472c3a5a5b516aaf59b89be602fbce52756c3e9
323,558,292,573,301,600,000,000,000,000,000,000,000
14
Fix some format string warnings with possible format string vulnerability For https://bugzilla.gnome.org/show_bug.cgi?id=761029 Decorate every method in libxml2 with the appropriate LIBXML_ATTR_FORMAT(fmt,args) macro and add some cleanups following the reports.
int sqlite3VdbeAddOp0(Vdbe *p, int op){ return sqlite3VdbeAddOp3(p, op, 0, 0, 0); }
0
[ "CWE-755" ]
sqlite
8654186b0236d556aa85528c2573ee0b6ab71be3
2,639,822,107,341,082,000,000,000,000,000,000,000
3
When an error occurs while rewriting the parser tree for window functions in the sqlite3WindowRewrite() routine, make sure that pParse->nErr is set, and make sure that this shuts down any subsequent code generation that might depend on the transformations that were implemented. This fixes a problem discovered by the Yongheng and Rui fuzzer. FossilOrigin-Name: e2bddcd4c55ba3cbe0130332679ff4b048630d0ced9a8899982edb5a3569ba7f
void input_set_abs_params(struct input_dev *dev, unsigned int axis, int min, int max, int fuzz, int flat) { struct input_absinfo *absinfo; input_alloc_absinfo(dev); if (!dev->absinfo) return; absinfo = &dev->absinfo[axis]; absinfo->minimum = min; absinfo->maximum = max; absinfo->fuzz = fuzz; absinfo->flat = flat; __set_bit(EV_ABS, dev->evbit); __set_bit(axis, dev->absbit); }
0
[ "CWE-703", "CWE-787" ]
linux
cb222aed03d798fc074be55e59d9a112338ee784
53,362,303,417,859,690,000,000,000,000,000,000,000
18
Input: add safety guards to input_set_keycode() If we happen to have a garbage in input device's keycode table with values too big we'll end up doing clear_bit() with offset way outside of our bitmaps, damaging other objects within an input device or even outside of it. Let's add sanity checks to the returned old keycodes. Reported-by: [email protected] Reported-by: [email protected] Link: https://lore.kernel.org/r/20191207212757.GA245964@dtor-ws Signed-off-by: Dmitry Torokhov <[email protected]>
regcomp (preg, pattern, cflags) regex_t *_Restrict_ preg; const char *_Restrict_ pattern; int cflags; { reg_errcode_t ret; reg_syntax_t syntax = ((cflags & REG_EXTENDED) ? RE_SYNTAX_POSIX_EXTENDED : RE_SYNTAX_POSIX_BASIC); preg->buffer = NULL; preg->allocated = 0; preg->used = 0; /* Try to allocate space for the fastmap. */ preg->fastmap = re_malloc (char, SBC_MAX); if (BE (preg->fastmap == NULL, 0)) return REG_ESPACE; syntax |= (cflags & REG_ICASE) ? RE_ICASE : 0; /* If REG_NEWLINE is set, newlines are treated differently. */ if (cflags & REG_NEWLINE) { /* REG_NEWLINE implies neither . nor [^...] match newline. */ syntax &= ~RE_DOT_NEWLINE; syntax |= RE_HAT_LISTS_NOT_NEWLINE; /* It also changes the matching behavior. */ preg->newline_anchor = 1; } else preg->newline_anchor = 0; preg->no_sub = !!(cflags & REG_NOSUB); preg->translate = NULL; ret = re_compile_internal (preg, pattern, strlen (pattern), syntax); /* POSIX doesn't distinguish between an unmatched open-group and an unmatched close-group: both are REG_EPAREN. */ if (ret == REG_ERPAREN) ret = REG_EPAREN; /* We have already checked preg->fastmap != NULL. */ if (BE (ret == REG_NOERROR, 1)) /* Compute the fastmap now, since regexec cannot modify the pattern buffer. This function never fails in this implementation. */ (void) re_compile_fastmap (preg); else { /* Some error occurred while compiling the expression. */ re_free (preg->fastmap); preg->fastmap = NULL; } return (int) ret; }
0
[ "CWE-19" ]
gnulib
5513b40999149090987a0341c018d05d3eea1272
336,244,779,230,974,940,000,000,000,000,000,000,000
54
Diagnose ERE '()|\1' Problem reported by Hanno Böck in: http://bugs.gnu.org/21513 * lib/regcomp.c (parse_reg_exp): While parsing alternatives, keep track of the set of previously-completed subexpressions available before the first alternative, and restore this set just before parsing each subsequent alternative. This lets us diagnose the invalid back-reference in the ERE '()|\1'.
static int check_stack_access(struct bpf_verifier_env *env, const struct bpf_reg_state *reg, int off, int size) { /* Stack accesses must be at a fixed offset, so that we * can determine what type of data were returned. See * check_stack_read(). */ if (!tnum_is_const(reg->var_off)) { char tn_buf[48]; tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); verbose(env, "variable stack access var_off=%s off=%d size=%d", tn_buf, off, size); return -EACCES; } if (off >= 0 || off < -MAX_BPF_STACK) { verbose(env, "invalid stack off=%d size=%d\n", off, size); return -EACCES; } return 0; }
0
[ "CWE-703", "CWE-189" ]
linux
979d63d50c0c0f7bc537bf821e056cc9fe5abd38
275,426,000,689,158,420,000,000,000,000,000,000,000
24
bpf: prevent out of bounds speculation on pointer arithmetic Jann reported that the original commit back in b2157399cc98 ("bpf: prevent out-of-bounds speculation") was not sufficient to stop CPU from speculating out of bounds memory access: While b2157399cc98 only focussed on masking array map access for unprivileged users for tail calls and data access such that the user provided index gets sanitized from BPF program and syscall side, there is still a more generic form affected from BPF programs that applies to most maps that hold user data in relation to dynamic map access when dealing with unknown scalars or "slow" known scalars as access offset, for example: - Load a map value pointer into R6 - Load an index into R7 - Do a slow computation (e.g. with a memory dependency) that loads a limit into R8 (e.g. load the limit from a map for high latency, then mask it to make the verifier happy) - Exit if R7 >= R8 (mispredicted branch) - Load R0 = R6[R7] - Load R0 = R6[R0] For unknown scalars there are two options in the BPF verifier where we could derive knowledge from in order to guarantee safe access to the memory: i) While </>/<=/>= variants won't allow to derive any lower or upper bounds from the unknown scalar where it would be safe to add it to the map value pointer, it is possible through ==/!= test however. ii) another option is to transform the unknown scalar into a known scalar, for example, through ALU ops combination such as R &= <imm> followed by R |= <imm> or any similar combination where the original information from the unknown scalar would be destroyed entirely leaving R with a constant. The initial slow load still precedes the latter ALU ops on that register, so the CPU executes speculatively from that point. Once we have the known scalar, any compare operation would work then. A third option only involving registers with known scalars could be crafted as described in [0] where a CPU port (e.g. Slow Int unit) would be filled with many dependent computations such that the subsequent condition depending on its outcome has to wait for evaluation on its execution port and thereby executing speculatively if the speculated code can be scheduled on a different execution port, or any other form of mistraining as described in [1], for example. Given this is not limited to only unknown scalars, not only map but also stack access is affected since both is accessible for unprivileged users and could potentially be used for out of bounds access under speculation. In order to prevent any of these cases, the verifier is now sanitizing pointer arithmetic on the offset such that any out of bounds speculation would be masked in a way where the pointer arithmetic result in the destination register will stay unchanged, meaning offset masked into zero similar as in array_index_nospec() case. With regards to implementation, there are three options that were considered: i) new insn for sanitation, ii) push/pop insn and sanitation as inlined BPF, iii) reuse of ax register and sanitation as inlined BPF. Option i) has the downside that we end up using from reserved bits in the opcode space, but also that we would require each JIT to emit masking as native arch opcodes meaning mitigation would have slow adoption till everyone implements it eventually which is counter-productive. Option ii) and iii) have both in common that a temporary register is needed in order to implement the sanitation as inlined BPF since we are not allowed to modify the source register. While a push / pop insn in ii) would be useful to have in any case, it requires once again that every JIT needs to implement it first. While possible, amount of changes needed would also be unsuitable for a -stable patch. Therefore, the path which has fewer changes, less BPF instructions for the mitigation and does not require anything to be changed in the JITs is option iii) which this work is pursuing. The ax register is already mapped to a register in all JITs (modulo arm32 where it's mapped to stack as various other BPF registers there) and used in constant blinding for JITs-only so far. It can be reused for verifier rewrites under certain constraints. The interpreter's tmp "register" has therefore been remapped into extending the register set with hidden ax register and reusing that for a number of instructions that needed the prior temporary variable internally (e.g. div, mod). This allows for zero increase in stack space usage in the interpreter, and enables (restricted) generic use in rewrites otherwise as long as such a patchlet does not make use of these instructions. The sanitation mask is dynamic and relative to the offset the map value or stack pointer currently holds. There are various cases that need to be taken under consideration for the masking, e.g. such operation could look as follows: ptr += val or val += ptr or ptr -= val. Thus, the value to be sanitized could reside either in source or in destination register, and the limit is different depending on whether the ALU op is addition or subtraction and depending on the current known and bounded offset. The limit is derived as follows: limit := max_value_size - (smin_value + off). For subtraction: limit := umax_value + off. This holds because we do not allow any pointer arithmetic that would temporarily go out of bounds or would have an unknown value with mixed signed bounds where it is unclear at verification time whether the actual runtime value would be either negative or positive. For example, we have a derived map pointer value with constant offset and bounded one, so limit based on smin_value works because the verifier requires that statically analyzed arithmetic on the pointer must be in bounds, and thus it checks if resulting smin_value + off and umax_value + off is still within map value bounds at time of arithmetic in addition to time of access. Similarly, for the case of stack access we derive the limit as follows: MAX_BPF_STACK + off for subtraction and -off for the case of addition where off := ptr_reg->off + ptr_reg->var_off.value. Subtraction is a special case for the masking which can be in form of ptr += -val, ptr -= -val, or ptr -= val. In the first two cases where we know that the value is negative, we need to temporarily negate the value in order to do the sanitation on a positive value where we later swap the ALU op, and restore original source register if the value was in source. The sanitation of pointer arithmetic alone is still not fully sufficient as is, since a scenario like the following could happen ... PTR += 0x1000 (e.g. K-based imm) PTR -= BIG_NUMBER_WITH_SLOW_COMPARISON PTR += 0x1000 PTR -= BIG_NUMBER_WITH_SLOW_COMPARISON [...] ... which under speculation could end up as ... PTR += 0x1000 PTR -= 0 [ truncated by mitigation ] PTR += 0x1000 PTR -= 0 [ truncated by mitigation ] [...] ... and therefore still access out of bounds. To prevent such case, the verifier is also analyzing safety for potential out of bounds access under speculative execution. Meaning, it is also simulating pointer access under truncation. We therefore "branch off" and push the current verification state after the ALU operation with known 0 to the verification stack for later analysis. Given the current path analysis succeeded it is likely that the one under speculation can be pruned. In any case, it is also subject to existing complexity limits and therefore anything beyond this point will be rejected. In terms of pruning, it needs to be ensured that the verification state from speculative execution simulation must never prune a non-speculative execution path, therefore, we mark verifier state accordingly at the time of push_stack(). If verifier detects out of bounds access under speculative execution from one of the possible paths that includes a truncation, it will reject such program. Given we mask every reg-based pointer arithmetic for unprivileged programs, we've been looking into how it could affect real-world programs in terms of size increase. As the majority of programs are targeted for privileged-only use case, we've unconditionally enabled masking (with its alu restrictions on top of it) for privileged programs for the sake of testing in order to check i) whether they get rejected in its current form, and ii) by how much the number of instructions and size will increase. We've tested this by using Katran, Cilium and test_l4lb from the kernel selftests. For Katran we've evaluated balancer_kern.o, Cilium bpf_lxc.o and an older test object bpf_lxc_opt_-DUNKNOWN.o and l4lb we've used test_l4lb.o as well as test_l4lb_noinline.o. We found that none of the programs got rejected by the verifier with this change, and that impact is rather minimal to none. balancer_kern.o had 13,904 bytes (1,738 insns) xlated and 7,797 bytes JITed before and after the change. Most complex program in bpf_lxc.o had 30,544 bytes (3,817 insns) xlated and 18,538 bytes JITed before and after and none of the other tail call programs in bpf_lxc.o had any changes either. For the older bpf_lxc_opt_-DUNKNOWN.o object we found a small increase from 20,616 bytes (2,576 insns) and 12,536 bytes JITed before to 20,664 bytes (2,582 insns) and 12,558 bytes JITed after the change. Other programs from that object file had similar small increase. Both test_l4lb.o had no change and remained at 6,544 bytes (817 insns) xlated and 3,401 bytes JITed and for test_l4lb_noinline.o constant at 5,080 bytes (634 insns) xlated and 3,313 bytes JITed. This can be explained in that LLVM typically optimizes stack based pointer arithmetic by using K-based operations and that use of dynamic map access is not overly frequent. However, in future we may decide to optimize the algorithm further under known guarantees from branch and value speculation. Latter seems also unclear in terms of prediction heuristics that today's CPUs apply as well as whether there could be collisions in e.g. the predictor's Value History/Pattern Table for triggering out of bounds access, thus masking is performed unconditionally at this point but could be subject to relaxation later on. We were generally also brainstorming various other approaches for mitigation, but the blocker was always lack of available registers at runtime and/or overhead for runtime tracking of limits belonging to a specific pointer. Thus, we found this to be minimally intrusive under given constraints. With that in place, a simple example with sanitized access on unprivileged load at post-verification time looks as follows: # bpftool prog dump xlated id 282 [...] 28: (79) r1 = *(u64 *)(r7 +0) 29: (79) r2 = *(u64 *)(r7 +8) 30: (57) r1 &= 15 31: (79) r3 = *(u64 *)(r0 +4608) 32: (57) r3 &= 1 33: (47) r3 |= 1 34: (2d) if r2 > r3 goto pc+19 35: (b4) (u32) r11 = (u32) 20479 | 36: (1f) r11 -= r2 | Dynamic sanitation for pointer 37: (4f) r11 |= r2 | arithmetic with registers 38: (87) r11 = -r11 | containing bounded or known 39: (c7) r11 s>>= 63 | scalars in order to prevent 40: (5f) r11 &= r2 | out of bounds speculation. 41: (0f) r4 += r11 | 42: (71) r4 = *(u8 *)(r4 +0) 43: (6f) r4 <<= r1 [...] For the case where the scalar sits in the destination register as opposed to the source register, the following code is emitted for the above example: [...] 16: (b4) (u32) r11 = (u32) 20479 17: (1f) r11 -= r2 18: (4f) r11 |= r2 19: (87) r11 = -r11 20: (c7) r11 s>>= 63 21: (5f) r2 &= r11 22: (0f) r2 += r0 23: (61) r0 = *(u32 *)(r2 +0) [...] JIT blinding example with non-conflicting use of r10: [...] d5: je 0x0000000000000106 _ d7: mov 0x0(%rax),%edi | da: mov $0xf153246,%r10d | Index load from map value and e0: xor $0xf153259,%r10 | (const blinded) mask with 0x1f. e7: and %r10,%rdi |_ ea: mov $0x2f,%r10d | f0: sub %rdi,%r10 | Sanitized addition. Both use r10 f3: or %rdi,%r10 | but do not interfere with each f6: neg %r10 | other. (Neither do these instructions f9: sar $0x3f,%r10 | interfere with the use of ax as temp fd: and %r10,%rdi | in interpreter.) 100: add %rax,%rdi |_ 103: mov 0x0(%rdi),%eax [...] Tested that it fixes Jann's reproducer, and also checked that test_verifier and test_progs suite with interpreter, JIT and JIT with hardening enabled on x86-64 and arm64 runs successfully. [0] Speculose: Analyzing the Security Implications of Speculative Execution in CPUs, Giorgi Maisuradze and Christian Rossow, https://arxiv.org/pdf/1801.04084.pdf [1] A Systematic Evaluation of Transient Execution Attacks and Defenses, Claudio Canella, Jo Van Bulck, Michael Schwarz, Moritz Lipp, Benjamin von Berg, Philipp Ortner, Frank Piessens, Dmitry Evtyushkin, Daniel Gruss, https://arxiv.org/pdf/1811.05441.pdf Fixes: b2157399cc98 ("bpf: prevent out-of-bounds speculation") Reported-by: Jann Horn <[email protected]> Signed-off-by: Daniel Borkmann <[email protected]> Acked-by: Alexei Starovoitov <[email protected]> Signed-off-by: Alexei Starovoitov <[email protected]>
static int unix_mkname(struct sockaddr_un * sunaddr, int len, unsigned *hashp) { if (len <= sizeof(short) || len > sizeof(*sunaddr)) return -EINVAL; if (!sunaddr || sunaddr->sun_family != AF_UNIX) return -EINVAL; if (sunaddr->sun_path[0]) { /* * This may look like an off by one error but it is a bit more * subtle. 108 is the longest valid AF_UNIX path for a binding. * sun_path[108] doesnt as such exist. However in kernel space * we are guaranteed that it is a valid memory location in our * kernel address buffer. */ ((char *)sunaddr)[len]=0; len = strlen(sunaddr->sun_path)+1+sizeof(short); return len; } *hashp = unix_hash_fold(csum_partial((char*)sunaddr, len, 0)); return len; }
0
[]
linux-2.6
6209344f5a3795d34b7f2c0061f49802283b6bdd
281,868,960,862,260,600,000,000,000,000,000,000,000
22
net: unix: fix inflight counting bug in garbage collector Previously I assumed that the receive queues of candidates don't change during the GC. This is only half true, nothing can be received from the queues (see comment in unix_gc()), but buffers could be added through the other half of the socket pair, which may still have file descriptors referring to it. This can result in inc_inflight_move_tail() erronously increasing the "inflight" counter for a unix socket for which dec_inflight() wasn't previously called. This in turn can trigger the "BUG_ON(total_refs < inflight_refs)" in a later garbage collection run. Fix this by only manipulating the "inflight" counter for sockets which are candidates themselves. Duplicating the file references in unix_attach_fds() is also needed to prevent a socket becoming a candidate for GC while the skb that contains it is not yet queued. Reported-by: Andrea Bittau <[email protected]> Signed-off-by: Miklos Szeredi <[email protected]> CC: [email protected] Signed-off-by: Linus Torvalds <[email protected]>
static const void *GetVirtualMetacontentFromCache(const Image *image) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); const void *magick_restrict metacontent; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); assert(id < (int) cache_info->number_threads); metacontent=GetVirtualMetacontentFromNexus(cache_info, cache_info->nexus_info[id]); return(metacontent); }
0
[ "CWE-119", "CWE-787" ]
ImageMagick
aecd0ada163a4d6c769cec178955d5f3e9316f2f
307,421,358,266,924,370,000,000,000,000,000,000,000
21
Set pixel cache to undefined if any resource limit is exceeded
NameValueParserStartElt(void * d, const char * name, int l) { struct NameValueParserData * data = (struct NameValueParserData *)d; data->topelt = 1; if(l>63) l = 63; memcpy(data->curelt, name, l); data->curelt[l] = '\0'; data->cdata = NULL; data->cdatalen = 0; }
0
[ "CWE-119", "CWE-787" ]
miniupnp
7aeb624b44f86d335841242ff427433190e7168a
276,067,605,382,209,330,000,000,000,000,000,000,000
11
properly initialize data structure for SOAP parsing in ParseNameValue() topelt field was not properly initialized. should fix #268
bool blk_get_queue(struct request_queue *q) { if (likely(!blk_queue_dying(q))) { __blk_get_queue(q); return true; } return false; }
0
[ "CWE-416", "CWE-703" ]
linux
54648cf1ec2d7f4b6a71767799c45676a138ca24
64,525,138,549,774,240,000,000,000,000,000,000,000
9
block: blk_init_allocated_queue() set q->fq as NULL in the fail case We find the memory use-after-free issue in __blk_drain_queue() on the kernel 4.14. After read the latest kernel 4.18-rc6 we think it has the same problem. Memory is allocated for q->fq in the blk_init_allocated_queue(). If the elevator init function called with error return, it will run into the fail case to free the q->fq. Then the __blk_drain_queue() uses the same memory after the free of the q->fq, it will lead to the unpredictable event. The patch is to set q->fq as NULL in the fail case of blk_init_allocated_queue(). Fixes: commit 7c94e1c157a2 ("block: introduce blk_flush_queue to drive flush machinery") Cc: <[email protected]> Reviewed-by: Ming Lei <[email protected]> Reviewed-by: Bart Van Assche <[email protected]> Signed-off-by: xiao jin <[email protected]> Signed-off-by: Jens Axboe <[email protected]>
char* MACH0_(get_filetype)(struct MACH0_(obj_t)* bin) { if (bin) { return MACH0_(get_filetype_from_hdr) (&bin->hdr); } return strdup ("Unknown"); }
0
[ "CWE-415", "CWE-125" ]
radare2
60208765887f5f008b3b9a883f3addc8bdb9c134
178,622,174,458,404,730,000,000,000,000,000,000,000
6
Fix #9970 - heap oobread in mach0 parser (#10026)
free_macro_sequence (void) { free_pattern_buffer (&macro_sequence_buf, &macro_sequence_regs); }
0
[]
m4
5345bb49077bfda9fabd048e563f9e7077fe335d
118,102,376,482,329,430,000,000,000,000,000,000,000
4
Minor security fix: Quote output of mkstemp. * src/builtin.c (mkstemp_helper): Produce quoted output. * doc/m4.texinfo (Mkstemp): Update the documentation and tests. * NEWS: Document this change. Signed-off-by: Eric Blake <[email protected]> (cherry picked from commit bd9900d65eb9cd5add0f107e94b513fa267495ba)
ContentSettingsObserver::ContentSettingsObserver( content::RenderFrame* render_frame, extensions::Dispatcher* extension_dispatcher, bool should_whitelist, service_manager::BinderRegistry* registry) : content::RenderFrameObserver(render_frame), content::RenderFrameObserverTracker<ContentSettingsObserver>( render_frame), #if BUILDFLAG(ENABLE_EXTENSIONS) extension_dispatcher_(extension_dispatcher), #endif content_settings_manager_(NULL), allow_running_insecure_content_(false), is_interstitial_page_(false), current_request_id_(0), should_whitelist_(should_whitelist) { ClearBlockedContentSettings(); render_frame->GetWebFrame()->SetContentSettingsClient(this); content::RenderFrame* main_frame = render_frame->GetRenderView()->GetMainRenderFrame(); // TODO(nasko): The main frame is not guaranteed to be in the same process // with this frame with --site-per-process. This code needs to be updated // to handle this case. See https://crbug.com/496670. if (main_frame && main_frame != render_frame) { // Copy all the settings from the main render frame to avoid race conditions // when initializing this data. See https://crbug.com/333308. ContentSettingsObserver* parent = ContentSettingsObserver::Get(main_frame); allow_running_insecure_content_ = parent->allow_running_insecure_content_; temporarily_allowed_plugins_ = parent->temporarily_allowed_plugins_; is_interstitial_page_ = parent->is_interstitial_page_; } }
0
[ "CWE-20", "CWE-200" ]
muon
c18663aa171c6cdf03da3e8c70df8663645b97c4
141,154,890,066,929,500,000,000,000,000,000,000,000
33
Issue: 15232 AllowScript should use atom::ContentSettingsManager like other Allow* methods in the observer
eap_chap_response(esp, id, hash, name, namelen) eap_state *esp; u_char id; u_char *hash; char *name; int namelen; { u_char *outp; int msglen; outp = outpacket_buf; MAKEHEADER(outp, PPP_EAP); PUTCHAR(EAP_RESPONSE, outp); PUTCHAR(id, outp); esp->es_client.ea_id = id; msglen = EAP_HEADERLEN + 2 * sizeof (u_char) + MD5_SIGNATURE_SIZE + namelen; PUTSHORT(msglen, outp); PUTCHAR(EAPT_MD5CHAP, outp); PUTCHAR(MD5_SIGNATURE_SIZE, outp); BCOPY(hash, outp, MD5_SIGNATURE_SIZE); INCPTR(MD5_SIGNATURE_SIZE, outp); if (namelen > 0) { BCOPY(name, outp, namelen); } output(esp->es_unit, outpacket_buf, PPP_HDRLEN + msglen); }
0
[ "CWE-120", "CWE-787" ]
ppp
8d7970b8f3db727fe798b65f3377fe6787575426
287,013,445,080,906,230,000,000,000,000,000,000,000
30
pppd: Fix bounds check in EAP code Given that we have just checked vallen < len, it can never be the case that vallen >= len + sizeof(rhostname). This fixes the check so we actually avoid overflowing the rhostname array. Reported-by: Ilja Van Sprundel <[email protected]> Signed-off-by: Paul Mackerras <[email protected]>
TEST_F(QueryPlannerTest, NoBlockingSortsAllowedTest) { params.options = QueryPlannerParams::NO_BLOCKING_SORT; runQuerySortProj(BSONObj(), BSON("x" << 1), BSONObj()); assertNumSolutions(0U); addIndex(BSON("x" << 1)); runQuerySortProj(BSONObj(), BSON("x" << 1), BSONObj()); assertNumSolutions(1U); assertSolutionExists( "{fetch: {filter: null, node: {ixscan: " "{filter: null, pattern: {x: 1}}}}}"); }
0
[]
mongo
ee97c0699fd55b498310996ee002328e533681a3
193,843,682,322,196,870,000,000,000,000,000,000,000
13
SERVER-36993 Fix crash due to incorrect $or pushdown for indexed $expr.
static void wake_up_sem_queue_do(struct list_head *pt) { struct sem_queue *q, *t; int did_something; did_something = !list_empty(pt); list_for_each_entry_safe(q, t, pt, list) { wake_up_process(q->sleeper); /* q can disappear immediately after writing q->status. */ smp_wmb(); q->status = q->pid; } if (did_something) preempt_enable(); }
0
[ "CWE-703", "CWE-189" ]
linux
6062a8dc0517bce23e3c2f7d2fea5e22411269a3
189,312,899,353,525,300,000,000,000,000,000,000,000
15
ipc,sem: fine grained locking for semtimedop Introduce finer grained locking for semtimedop, to handle the common case of a program wanting to manipulate one semaphore from an array with multiple semaphores. If the call is a semop manipulating just one semaphore in an array with multiple semaphores, only take the lock for that semaphore itself. If the call needs to manipulate multiple semaphores, or another caller is in a transaction that manipulates multiple semaphores, the sem_array lock is taken, as well as all the locks for the individual semaphores. On a 24 CPU system, performance numbers with the semop-multi test with N threads and N semaphores, look like this: vanilla Davidlohr's Davidlohr's + Davidlohr's + threads patches rwlock patches v3 patches 10 610652 726325 1783589 2142206 20 341570 365699 1520453 1977878 30 288102 307037 1498167 2037995 40 290714 305955 1612665 2256484 50 288620 312890 1733453 2650292 60 289987 306043 1649360 2388008 70 291298 306347 1723167 2717486 80 290948 305662 1729545 2763582 90 290996 306680 1736021 2757524 100 292243 306700 1773700 3059159 [[email protected]: do not call sem_lock when bogus sma] [[email protected]: make refcounter atomic] Signed-off-by: Rik van Riel <[email protected]> Suggested-by: Linus Torvalds <[email protected]> Acked-by: Davidlohr Bueso <[email protected]> Cc: Chegu Vinod <[email protected]> Cc: Jason Low <[email protected]> Reviewed-by: Michel Lespinasse <[email protected]> Cc: Peter Hurley <[email protected]> Cc: Stanislav Kinsbursky <[email protected]> Tested-by: Emmanuel Benisty <[email protected]> Tested-by: Sedat Dilek <[email protected]> Signed-off-by: Andrew Morton <[email protected]> Signed-off-by: Linus Torvalds <[email protected]>
TRIO_PUBLIC int trio_vasprintf TRIO_ARGS3((result, format, args), char** result, TRIO_CONST char* format, va_list args) { int status; trio_string_t* info; assert(VALID(format)); *result = NULL; info = trio_xstring_duplicate(""); if (info == NULL) { status = TRIO_ERROR_RETURN(TRIO_ENOMEM, 0); } else { status = TrioFormat(info, 0, TrioOutStreamStringDynamic, format, args, NULL, NULL); if (status >= 0) { trio_string_terminate(info); *result = trio_string_extract(info); } trio_string_destroy(info); } return status; }
0
[ "CWE-190", "CWE-125" ]
FreeRDP
05cd9ea2290d23931f615c1b004d4b2e69074e27
141,456,625,228,045,020,000,000,000,000,000,000,000
27
Fixed TrioParse and trio_length limts. CVE-2020-4030 thanks to @antonio-morales for finding this.
static void AuthNoneStartFunc(rfbClientPtr cl) { rfbClientAuthSucceeded(cl, rfbAuthNone); }
0
[ "CWE-787" ]
turbovnc
cea98166008301e614e0d36776bf9435a536136e
103,941,062,076,019,250,000,000,000,000,000,000,000
4
Server: Fix two issues identified by ASan 1. If the TLSPlain and X509Plain security types were both disabled, then rfbOptPamAuth() would overflow the name field in the secTypes structure when testing the "none" security type, since the name of that security type has less than five characters. This issue was innocuous, since the overflow was fully contained within the secTypes structure, but the ASan error caused Xvnc to abort, which made it difficult to detect other errors. 2. If an ill-behaved RFB client sent the TurboVNC Server a fence message with more than 64 bytes, then the TurboVNC Server would try to read that message and subsequently overflow the stack before it detected that the payload was too large. This could never have occurred with any of the VNC viewers that currently support the RFB flow control extensions (TigerVNC and TurboVNC, namely.) This issue was also innocuous, since the stack overflow affected two variables (newScreens and errMsg) that were never accessed before the function returned.
void AuthorizationSessionImpl::grantInternalAuthorization(OperationContext* opCtx) { grantInternalAuthorization(opCtx->getClient()); }
0
[ "CWE-613" ]
mongo
6dfb92b1299de04677d0bd2230e89a52eb01003c
108,585,660,486,847,440,000,000,000,000,000,000,000
3
SERVER-38984 Validate unique User ID on UserCache hit (cherry picked from commit e55d6e2292e5dbe2f97153251d8193d1cc89f5d7)
static bool subj_alt_hostcheck(struct Curl_easy *data, const char *match_pattern, size_t matchlen, const char *hostname, size_t hostlen, const char *dispname) { #ifdef CURL_DISABLE_VERBOSE_STRINGS (void)dispname; (void)data; #endif if(Curl_cert_hostcheck(match_pattern, matchlen, hostname, hostlen)) { infof(data, " subjectAltName: host \"%s\" matched cert's \"%s\"", dispname, match_pattern); return TRUE; } return FALSE; }
0
[]
curl
139a54ed0a172adaaf1a78d6f4fff50b2c3f9e08
108,938,052,161,965,760,000,000,000,000,000,000,000
18
openssl: don't leak the SRP credentials in redirects either Follow-up to 620ea21410030 Reported-by: Harry Sintonen Closes #8751
explicit GetSessionTensorOp(OpKernelConstruction* context) : OpKernel(context) {}
0
[ "CWE-476", "CWE-369" ]
tensorflow
ff70c47a396ef1e3cb73c90513da4f5cb71bebba
104,316,170,475,980,350,000,000,000,000,000,000,000
2
Fix `tf.raw_ops.GetSessionTensor` and `tf.raw_ops.DeleteSessionTensor` null pointer dereferences. PiperOrigin-RevId: 368294154 Change-Id: Ie10f07a0a9a1c2b685e08153d48a0ca4b93f9fc9
bool skip( uint64_t sz ) { _position += sz; return _position < _maxLength; }
0
[ "CWE-20" ]
mongo
6889d1658136c753998b4a408dc8d1a3ec28e3b9
78,947,693,338,962,520,000,000,000,000,000,000,000
4
SERVER-7769 - fast bson validate
static void trace_packet(const struct sk_buff *skb, unsigned int hook, const struct net_device *in, const struct net_device *out, const char *tablename, const struct xt_table_info *private, const struct ip6t_entry *e) { const void *table_base; const struct ip6t_entry *root; const char *hookname, *chainname, *comment; const struct ip6t_entry *iter; unsigned int rulenum = 0; table_base = private->entries[smp_processor_id()]; root = get_entry(table_base, private->hook_entry[hook]); hookname = chainname = hooknames[hook]; comment = comments[NF_IP6_TRACE_COMMENT_RULE]; xt_entry_foreach(iter, root, private->size - private->hook_entry[hook]) if (get_chainname_rulenum(iter, e, hookname, &chainname, &comment, &rulenum) != 0) break; nf_log_packet(AF_INET6, hook, skb, in, out, &trace_loginfo, "TRACE: %s:%s:%s:%u ", tablename, chainname, comment, rulenum); }
0
[ "CWE-200" ]
linux-2.6
6a8ab060779779de8aea92ce3337ca348f973f54
328,690,783,291,655,070,000,000,000,000,000,000,000
29
ipv6: netfilter: ip6_tables: fix infoleak to userspace Structures ip6t_replace, compat_ip6t_replace, and xt_get_revision are copied from userspace. Fields of these structs that are zero-terminated strings are not checked. When they are used as argument to a format string containing "%s" in request_module(), some sensitive information is leaked to userspace via argument of spawned modprobe process. The first bug was introduced before the git epoch; the second was introduced in 3bc3fe5e (v2.6.25-rc1); the third is introduced by 6b7d31fc (v2.6.15-rc1). To trigger the bug one should have CAP_NET_ADMIN. Signed-off-by: Vasiliy Kulikov <[email protected]> Signed-off-by: Patrick McHardy <[email protected]>
R_API RList *retrieve_all_method_access_string_and_value() { return retrieve_all_access_string_and_value (METHOD_ACCESS_FLAGS); }
0
[ "CWE-125" ]
radare2
e9ce0d64faf19fa4e9c260250fbdf25e3c11e152
168,879,201,774,388,900,000,000,000,000,000,000,000
3
Fix #10498 - Fix crash in fuzzed java files (#10511)