func
stringlengths 0
484k
| target
int64 0
1
| cwe
listlengths 0
4
| project
stringclasses 799
values | commit_id
stringlengths 40
40
| hash
float64 1,215,700,430,453,689,100,000,000B
340,281,914,521,452,260,000,000,000,000B
| size
int64 1
24k
| message
stringlengths 0
13.3k
|
---|---|---|---|---|---|---|---|
//! Return stream line of a 2d or 3d vector field.
CImg<floatT> get_streamline(const float x, const float y, const float z,
const float L=256, const float dl=0.1f,
const unsigned int interpolation_type=2, const bool is_backward_tracking=false,
const bool is_oriented_only=false) const {
if (_spectrum!=2 && _spectrum!=3)
throw CImgInstanceException(_cimg_instance
"streamline(): Instance is not a 2d or 3d vector field.",
cimg_instance);
if (_spectrum==2) {
if (is_oriented_only) {
typename CImg<T>::_functor4d_streamline2d_oriented func(*this);
return streamline(func,x,y,z,L,dl,interpolation_type,is_backward_tracking,true,
0,0,0,_width - 1.0f,_height - 1.0f,0.0f);
} else {
typename CImg<T>::_functor4d_streamline2d_directed func(*this);
return streamline(func,x,y,z,L,dl,interpolation_type,is_backward_tracking,false,
0,0,0,_width - 1.0f,_height - 1.0f,0.0f);
}
}
if (is_oriented_only) {
typename CImg<T>::_functor4d_streamline3d_oriented func(*this);
return streamline(func,x,y,z,L,dl,interpolation_type,is_backward_tracking,true,
0,0,0,_width - 1.0f,_height - 1.0f,_depth - 1.0f);
}
typename CImg<T>::_functor4d_streamline3d_directed func(*this);
return streamline(func,x,y,z,L,dl,interpolation_type,is_backward_tracking,false,
0,0,0,_width - 1.0f,_height - 1.0f,_depth - 1.0f);
| 0 |
[
"CWE-125"
] |
CImg
|
10af1e8c1ad2a58a0a3342a856bae63e8f257abb
| 237,667,482,793,293,770,000,000,000,000,000,000,000 | 28 |
Fix other issues in 'CImg<T>::load_bmp()'.
|
nautilus_application_startup (NautilusApplication *application,
gboolean kill_shell,
gboolean no_default_window,
gboolean no_desktop,
gboolean browser_window,
const char *geometry,
char **urls)
{
UniqueMessageData *message;
/* Check the user's ~/.nautilus directories and post warnings
* if there are problems.
*/
if (!kill_shell && !check_required_directories (application)) {
return;
}
if (kill_shell) {
if (unique_app_is_running (application->unique_app)) {
unique_app_send_message (application->unique_app,
UNIQUE_CLOSE, NULL);
}
} else {
/* If KDE desktop is running, then force no_desktop */
if (is_kdesktop_present ()) {
no_desktop = TRUE;
}
if (!no_desktop &&
!eel_preferences_get_boolean (NAUTILUS_PREFERENCES_SHOW_DESKTOP)) {
no_desktop = TRUE;
}
if (!no_desktop) {
if (unique_app_is_running (application->unique_app)) {
unique_app_send_message (application->unique_app,
COMMAND_START_DESKTOP, NULL);
} else {
nautilus_application_open_desktop (application);
}
}
if (!unique_app_is_running (application->unique_app)) {
finish_startup (application, no_desktop);
g_signal_connect (application->unique_app, "message-received", G_CALLBACK (message_received_cb), application);
}
/* Monitor the preference to show or hide the desktop */
eel_preferences_add_callback_while_alive (NAUTILUS_PREFERENCES_SHOW_DESKTOP,
desktop_changed_callback,
application,
G_OBJECT (application));
/* Monitor the preference to have the desktop */
/* point to the Unix home folder */
eel_preferences_add_callback_while_alive (NAUTILUS_PREFERENCES_DESKTOP_IS_HOME_DIR,
desktop_location_changed_callback,
NULL,
G_OBJECT (application));
/* Create the other windows. */
if (urls != NULL || !no_default_window) {
if (unique_app_is_running (application->unique_app)) {
message = unique_message_data_new ();
_unique_message_data_set_geometry_and_uris (message, geometry, urls);
if (browser_window) {
unique_app_send_message (application->unique_app,
COMMAND_OPEN_BROWSER, message);
} else {
unique_app_send_message (application->unique_app,
UNIQUE_OPEN, message);
}
unique_message_data_free (message);
} else {
open_windows (application, NULL,
urls,
geometry,
browser_window);
}
}
/* Load session info if availible */
nautilus_application_load_session (application);
}
}
| 0 |
[] |
nautilus
|
1e1c916f5537eb5e4144950f291f4a3962fc2395
| 136,308,332,993,044,700,000,000,000,000,000,000,000 | 86 |
Add "interactive" argument to nautilus_file_mark_desktop_file_trusted.
2009-02-24 Alexander Larsson <[email protected]>
* libnautilus-private/nautilus-file-operations.c:
* libnautilus-private/nautilus-file-operations.h:
* libnautilus-private/nautilus-mime-actions.c:
Add "interactive" argument to
nautilus_file_mark_desktop_file_trusted.
* src/nautilus-application.c:
Mark all desktopfiles on the desktop trusted on first
run.
svn path=/trunk/; revision=15009
|
slap_modrdn2mods(
Operation *op,
SlapReply *rs )
{
int a_cnt, d_cnt;
LDAPRDN old_rdn = NULL;
LDAPRDN new_rdn = NULL;
assert( !BER_BVISEMPTY( &op->oq_modrdn.rs_newrdn ) );
/* if requestDN is empty, silently reset deleteOldRDN */
if ( BER_BVISEMPTY( &op->o_req_dn ) ) op->orr_deleteoldrdn = 0;
if ( ldap_bv2rdn_x( &op->oq_modrdn.rs_newrdn, &new_rdn,
(char **)&rs->sr_text, LDAP_DN_FORMAT_LDAP, op->o_tmpmemctx ) ) {
Debug( LDAP_DEBUG_TRACE,
"%s slap_modrdn2mods: can't figure out "
"type(s)/value(s) of newrdn\n",
op->o_log_prefix, 0, 0 );
rs->sr_err = LDAP_INVALID_DN_SYNTAX;
rs->sr_text = "unknown type(s)/value(s) used in RDN";
goto done;
}
if ( op->oq_modrdn.rs_deleteoldrdn ) {
if ( ldap_bv2rdn_x( &op->o_req_dn, &old_rdn,
(char **)&rs->sr_text, LDAP_DN_FORMAT_LDAP, op->o_tmpmemctx ) ) {
Debug( LDAP_DEBUG_TRACE,
"%s slap_modrdn2mods: can't figure out "
"type(s)/value(s) of oldrdn\n",
op->o_log_prefix, 0, 0 );
rs->sr_err = LDAP_OTHER;
rs->sr_text = "cannot parse RDN from old DN";
goto done;
}
}
rs->sr_text = NULL;
/* Add new attribute values to the entry */
for ( a_cnt = 0; new_rdn[a_cnt]; a_cnt++ ) {
AttributeDescription *desc = NULL;
Modifications *mod_tmp;
rs->sr_err = slap_bv2ad( &new_rdn[a_cnt]->la_attr, &desc, &rs->sr_text );
if ( rs->sr_err != LDAP_SUCCESS ) {
Debug( LDAP_DEBUG_TRACE,
"%s slap_modrdn2mods: %s: %s (new)\n",
op->o_log_prefix,
rs->sr_text,
new_rdn[ a_cnt ]->la_attr.bv_val );
goto done;
}
if ( !desc->ad_type->sat_equality ) {
Debug( LDAP_DEBUG_TRACE,
"%s slap_modrdn2mods: %s: %s (new)\n",
op->o_log_prefix,
rs->sr_text,
new_rdn[ a_cnt ]->la_attr.bv_val );
rs->sr_text = "naming attribute has no equality matching rule";
rs->sr_err = LDAP_NAMING_VIOLATION;
goto done;
}
/* Apply modification */
mod_tmp = ( Modifications * )ch_malloc( sizeof( Modifications ) );
mod_tmp->sml_desc = desc;
BER_BVZERO( &mod_tmp->sml_type );
mod_tmp->sml_numvals = 1;
mod_tmp->sml_values = ( BerVarray )ch_malloc( 2 * sizeof( struct berval ) );
ber_dupbv( &mod_tmp->sml_values[0], &new_rdn[a_cnt]->la_value );
mod_tmp->sml_values[1].bv_val = NULL;
if( desc->ad_type->sat_equality->smr_normalize) {
mod_tmp->sml_nvalues = ( BerVarray )ch_malloc( 2 * sizeof( struct berval ) );
rs->sr_err = desc->ad_type->sat_equality->smr_normalize(
SLAP_MR_EQUALITY|SLAP_MR_VALUE_OF_ASSERTION_SYNTAX,
desc->ad_type->sat_syntax,
desc->ad_type->sat_equality,
&mod_tmp->sml_values[0],
&mod_tmp->sml_nvalues[0], NULL );
if (rs->sr_err != LDAP_SUCCESS) {
ch_free(mod_tmp->sml_nvalues);
ch_free(mod_tmp->sml_values[0].bv_val);
ch_free(mod_tmp->sml_values);
ch_free(mod_tmp);
goto done;
}
mod_tmp->sml_nvalues[1].bv_val = NULL;
} else {
mod_tmp->sml_nvalues = NULL;
}
mod_tmp->sml_op = SLAP_MOD_SOFTADD;
mod_tmp->sml_flags = 0;
mod_tmp->sml_next = op->orr_modlist;
op->orr_modlist = mod_tmp;
}
/* Remove old rdn value if required */
if ( op->orr_deleteoldrdn ) {
for ( d_cnt = 0; old_rdn[d_cnt]; d_cnt++ ) {
AttributeDescription *desc = NULL;
Modifications *mod_tmp;
rs->sr_err = slap_bv2ad( &old_rdn[d_cnt]->la_attr, &desc, &rs->sr_text );
if ( rs->sr_err != LDAP_SUCCESS ) {
Debug( LDAP_DEBUG_TRACE,
"%s slap_modrdn2mods: %s: %s (old)\n",
op->o_log_prefix,
rs->sr_text,
old_rdn[d_cnt]->la_attr.bv_val );
goto done;
}
/* Apply modification */
mod_tmp = ( Modifications * )ch_malloc( sizeof( Modifications ) );
mod_tmp->sml_desc = desc;
BER_BVZERO( &mod_tmp->sml_type );
mod_tmp->sml_numvals = 1;
mod_tmp->sml_values = ( BerVarray )ch_malloc( 2 * sizeof( struct berval ) );
ber_dupbv( &mod_tmp->sml_values[0], &old_rdn[d_cnt]->la_value );
mod_tmp->sml_values[1].bv_val = NULL;
if( desc->ad_type->sat_equality->smr_normalize) {
mod_tmp->sml_nvalues = ( BerVarray )ch_malloc( 2 * sizeof( struct berval ) );
(void) (*desc->ad_type->sat_equality->smr_normalize)(
SLAP_MR_EQUALITY|SLAP_MR_VALUE_OF_ASSERTION_SYNTAX,
desc->ad_type->sat_syntax,
desc->ad_type->sat_equality,
&mod_tmp->sml_values[0],
&mod_tmp->sml_nvalues[0], NULL );
mod_tmp->sml_nvalues[1].bv_val = NULL;
} else {
mod_tmp->sml_nvalues = NULL;
}
mod_tmp->sml_op = LDAP_MOD_DELETE;
mod_tmp->sml_flags = 0;
mod_tmp->sml_next = op->orr_modlist;
op->orr_modlist = mod_tmp;
}
}
done:
/* LDAP v2 supporting correct attribute handling. */
if ( rs->sr_err != LDAP_SUCCESS && op->orr_modlist != NULL ) {
Modifications *tmp;
for ( ; op->orr_modlist != NULL; op->orr_modlist = tmp ) {
tmp = op->orr_modlist->sml_next;
ch_free( op->orr_modlist );
}
}
if ( new_rdn != NULL ) {
ldap_rdnfree_x( new_rdn, op->o_tmpmemctx );
}
if ( old_rdn != NULL ) {
ldap_rdnfree_x( old_rdn, op->o_tmpmemctx );
}
return rs->sr_err;
}
| 1 |
[
"CWE-476"
] |
openldap
|
4c774220a752bf8e3284984890dc0931fe73165d
| 98,924,267,316,692,140,000,000,000,000,000,000,000 | 162 |
ITS#9370 check for equality rule on old_rdn
Just skip normalization if there's no equality rule. We accept
DNs without equality rules already.
|
PJ_DEF(int) pj_scan_get_char( pj_scanner *scanner )
{
register char *s = scanner->curptr;
int chr;
if (s >= scanner->end || !*s) {
pj_scan_syntax_err(scanner);
return 0;
}
chr = *s;
++s;
scanner->curptr = s;
if (PJ_SCAN_CHECK_EOF(s) && PJ_SCAN_IS_PROBABLY_SPACE(*s) &&
scanner->skip_ws)
{
pj_scan_skip_whitespace(scanner);
}
return chr;
}
| 0 |
[
"CWE-125"
] |
pjproject
|
077b465c33f0aec05a49cd2ca456f9a1b112e896
| 184,006,368,736,890,300,000,000,000,000,000,000,000 | 21 |
Merge pull request from GHSA-7fw8-54cv-r7pm
|
static int __init xen_blkif_init(void)
{
int rc = 0;
if (!xen_domain())
return -ENODEV;
if (xen_blkif_max_ring_order > XENBUS_MAX_RING_GRANT_ORDER) {
pr_info("Invalid max_ring_order (%d), will use default max: %d.\n",
xen_blkif_max_ring_order, XENBUS_MAX_RING_GRANT_ORDER);
xen_blkif_max_ring_order = XENBUS_MAX_RING_GRANT_ORDER;
}
if (xenblk_max_queues == 0)
xenblk_max_queues = num_online_cpus();
rc = xen_blkif_interface_init();
if (rc)
goto failed_init;
rc = xen_blkif_xenbus_init();
if (rc)
goto failed_init;
failed_init:
return rc;
}
| 0 |
[
"CWE-200"
] |
linux
|
089bc0143f489bd3a4578bdff5f4ca68fb26f341
| 326,995,858,249,293,300,000,000,000,000,000,000,000 | 27 |
xen-blkback: don't leak stack data via response ring
Rather than constructing a local structure instance on the stack, fill
the fields directly on the shared ring, just like other backends do.
Build on the fact that all response structure flavors are actually
identical (the old code did make this assumption too).
This is XSA-216.
Cc: [email protected]
Signed-off-by: Jan Beulich <[email protected]>
Reviewed-by: Konrad Rzeszutek Wilk <[email protected]>
Signed-off-by: Konrad Rzeszutek Wilk <[email protected]>
|
bool HtmlOutputDev::dumpDocOutline(PDFDoc* doc)
{
FILE * output = nullptr;
bool bClose = false;
if (!ok)
return false;
Outline *outline = doc->getOutline();
if (!outline)
return false;
const std::vector<OutlineItem*> *outlines = outline->getItems();
if (!outlines)
return false;
if (!complexMode || xml)
{
output = page;
}
else if (complexMode && !xml)
{
if (noframes)
{
output = page;
fputs("<hr/>\n", output);
}
else
{
GooString *str = Docname->copy();
str->append("-outline.html");
output = fopen(str->c_str(), "w");
delete str;
if (output == nullptr)
return false;
bClose = true;
GooString *htmlEncoding =
HtmlOutputDev::mapEncodingToHtml(globalParams->getTextEncodingName());
fprintf(output, "<html xmlns=\"http://www.w3.org/1999/xhtml\" " \
"lang=\"\" xml:lang=\"\">\n" \
"<head>\n" \
"<title>Document Outline</title>\n" \
"<meta http-equiv=\"Content-Type\" content=\"text/html; " \
"charset=%s\"/>\n" \
"</head>\n<body>\n", htmlEncoding->c_str());
delete htmlEncoding;
}
}
if (!xml)
{
bool done = newHtmlOutlineLevel(output, outlines);
if (done && !complexMode)
fputs("<hr/>\n", output);
if (bClose)
{
fputs("</body>\n</html>\n", output);
fclose(output);
}
}
else
newXmlOutlineLevel(output, outlines);
return true;
}
| 0 |
[
"CWE-824"
] |
poppler
|
30c731b487190c02afff3f036736a392eb60cd9a
| 225,647,474,995,486,180,000,000,000,000,000,000,000 | 68 |
Properly initialize HtmlOutputDev::page to avoid SIGSEGV upon error exit.
Closes #742
|
static void netback_changed(struct xenbus_device *dev,
enum xenbus_state backend_state)
{
struct netfront_info *np = dev_get_drvdata(&dev->dev);
struct net_device *netdev = np->netdev;
dev_dbg(&dev->dev, "%s\n", xenbus_strstate(backend_state));
wake_up_all(&module_wq);
switch (backend_state) {
case XenbusStateInitialising:
case XenbusStateInitialised:
case XenbusStateReconfiguring:
case XenbusStateReconfigured:
case XenbusStateUnknown:
break;
case XenbusStateInitWait:
if (dev->state != XenbusStateInitialising)
break;
if (xennet_connect(netdev) != 0)
break;
xenbus_switch_state(dev, XenbusStateConnected);
break;
case XenbusStateConnected:
netdev_notify_peers(netdev);
break;
case XenbusStateClosed:
if (dev->state == XenbusStateClosed)
break;
fallthrough; /* Missed the backend's CLOSING state */
case XenbusStateClosing:
xenbus_frontend_closed(dev);
break;
}
}
| 0 |
[] |
linux
|
f63c2c2032c2e3caad9add3b82cc6e91c376fd26
| 89,983,623,722,233,900,000,000,000,000,000,000,000 | 39 |
xen-netfront: restore __skb_queue_tail() positioning in xennet_get_responses()
The commit referenced below moved the invocation past the "next" label,
without any explanation. In fact this allows misbehaving backends undue
control over the domain the frontend runs in, as earlier detected errors
require the skb to not be freed (it may be retained for later processing
via xennet_move_rx_slot(), or it may simply be unsafe to have it freed).
This is CVE-2022-33743 / XSA-405.
Fixes: 6c5aa6fc4def ("xen networking: add basic XDP support for xen-netfront")
Signed-off-by: Jan Beulich <[email protected]>
Reviewed-by: Juergen Gross <[email protected]>
Signed-off-by: Juergen Gross <[email protected]>
|
R_API RList *r_bin_java_get_method_offsets(RBinJavaObj *bin) {
RBinJavaField *fm_type = NULL;
RList *the_list = r_list_new ();
RListIter *iter = NULL;
ut64 *paddr = NULL;
if (!bin) {
return the_list;
}
the_list->free = free;
r_list_foreach (bin->methods_list, iter, fm_type) {
paddr = R_NEW0 (ut64);
*paddr = fm_type->file_offset + bin->loadaddr;
r_list_append (the_list, paddr);
}
return the_list;
}
| 0 |
[
"CWE-119",
"CWE-788"
] |
radare2
|
6c4428f018d385fc80a33ecddcb37becea685dd5
| 331,418,756,721,573,000,000,000,000,000,000,000,000 | 16 |
Improve boundary checks to fix oobread segfaults ##crash
* Reported by Cen Zhang via huntr.dev
* Reproducer: bins/fuzzed/javaoob-havoc.class
|
void rds_recv_incoming(struct rds_connection *conn, __be32 saddr, __be32 daddr,
struct rds_incoming *inc, gfp_t gfp)
{
struct rds_sock *rs = NULL;
struct sock *sk;
unsigned long flags;
inc->i_conn = conn;
inc->i_rx_jiffies = jiffies;
rdsdebug("conn %p next %llu inc %p seq %llu len %u sport %u dport %u "
"flags 0x%x rx_jiffies %lu\n", conn,
(unsigned long long)conn->c_next_rx_seq,
inc,
(unsigned long long)be64_to_cpu(inc->i_hdr.h_sequence),
be32_to_cpu(inc->i_hdr.h_len),
be16_to_cpu(inc->i_hdr.h_sport),
be16_to_cpu(inc->i_hdr.h_dport),
inc->i_hdr.h_flags,
inc->i_rx_jiffies);
/*
* Sequence numbers should only increase. Messages get their
* sequence number as they're queued in a sending conn. They
* can be dropped, though, if the sending socket is closed before
* they hit the wire. So sequence numbers can skip forward
* under normal operation. They can also drop back in the conn
* failover case as previously sent messages are resent down the
* new instance of a conn. We drop those, otherwise we have
* to assume that the next valid seq does not come after a
* hole in the fragment stream.
*
* The headers don't give us a way to realize if fragments of
* a message have been dropped. We assume that frags that arrive
* to a flow are part of the current message on the flow that is
* being reassembled. This means that senders can't drop messages
* from the sending conn until all their frags are sent.
*
* XXX we could spend more on the wire to get more robust failure
* detection, arguably worth it to avoid data corruption.
*/
if (be64_to_cpu(inc->i_hdr.h_sequence) < conn->c_next_rx_seq &&
(inc->i_hdr.h_flags & RDS_FLAG_RETRANSMITTED)) {
rds_stats_inc(s_recv_drop_old_seq);
goto out;
}
conn->c_next_rx_seq = be64_to_cpu(inc->i_hdr.h_sequence) + 1;
if (rds_sysctl_ping_enable && inc->i_hdr.h_dport == 0) {
rds_stats_inc(s_recv_ping);
rds_send_pong(conn, inc->i_hdr.h_sport);
goto out;
}
rs = rds_find_bound(daddr, inc->i_hdr.h_dport);
if (!rs) {
rds_stats_inc(s_recv_drop_no_sock);
goto out;
}
/* Process extension headers */
rds_recv_incoming_exthdrs(inc, rs);
/* We can be racing with rds_release() which marks the socket dead. */
sk = rds_rs_to_sk(rs);
/* serialize with rds_release -> sock_orphan */
write_lock_irqsave(&rs->rs_recv_lock, flags);
if (!sock_flag(sk, SOCK_DEAD)) {
rdsdebug("adding inc %p to rs %p's recv queue\n", inc, rs);
rds_stats_inc(s_recv_queued);
rds_recv_rcvbuf_delta(rs, sk, inc->i_conn->c_lcong,
be32_to_cpu(inc->i_hdr.h_len),
inc->i_hdr.h_dport);
if (sock_flag(sk, SOCK_RCVTSTAMP))
do_gettimeofday(&inc->i_rx_tstamp);
rds_inc_addref(inc);
list_add_tail(&inc->i_item, &rs->rs_recv_queue);
__rds_wake_sk_sleep(sk);
} else {
rds_stats_inc(s_recv_drop_dead_sock);
}
write_unlock_irqrestore(&rs->rs_recv_lock, flags);
out:
if (rs)
rds_sock_put(rs);
}
| 0 |
[
"CWE-200"
] |
net
|
4116def2337991b39919f3b448326e21c40e0dbb
| 115,154,345,964,703,130,000,000,000,000,000,000,000 | 88 |
rds: fix an infoleak in rds_inc_info_copy
The last field "flags" of object "minfo" is not initialized.
Copying this object out may leak kernel stack data.
Assign 0 to it to avoid leak.
Signed-off-by: Kangjie Lu <[email protected]>
Acked-by: Santosh Shilimkar <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
static OPJ_BOOL opj_jp2_setup_end_header_reading(opj_jp2_t *jp2,
opj_event_mgr_t * p_manager)
{
/* preconditions */
assert(jp2 != 00);
assert(p_manager != 00);
if (! opj_procedure_list_add_procedure(jp2->m_procedure_list,
(opj_procedure)opj_jp2_read_header_procedure, p_manager)) {
return OPJ_FALSE;
}
/* DEVELOPER CORNER, add your custom procedures */
return OPJ_TRUE;
}
| 0 |
[
"CWE-20"
] |
openjpeg
|
4edb8c83374f52cd6a8f2c7c875e8ffacccb5fa5
| 18,715,607,914,760,636,000,000,000,000,000,000,000 | 15 |
Add support for generation of PLT markers in encoder
* -PLT switch added to opj_compress
* Add a opj_encoder_set_extra_options() function that
accepts a PLT=YES option, and could be expanded later
for other uses.
-------
Testing with a Sentinel2 10m band, T36JTT_20160914T074612_B02.jp2,
coming from S2A_MSIL1C_20160914T074612_N0204_R135_T36JTT_20160914T081456.SAFE
Decompress it to TIFF:
```
opj_uncompress -i T36JTT_20160914T074612_B02.jp2 -o T36JTT_20160914T074612_B02.tif
```
Recompress it with similar parameters as original:
```
opj_compress -n 5 -c [256,256],[256,256],[256,256],[256,256],[256,256] -t 1024,1024 -PLT -i T36JTT_20160914T074612_B02.tif -o T36JTT_20160914T074612_B02_PLT.jp2
```
Dump codestream detail with GDAL dump_jp2.py utility (https://github.com/OSGeo/gdal/blob/master/gdal/swig/python/samples/dump_jp2.py)
```
python dump_jp2.py T36JTT_20160914T074612_B02.jp2 > /tmp/dump_sentinel2_ori.txt
python dump_jp2.py T36JTT_20160914T074612_B02_PLT.jp2 > /tmp/dump_sentinel2_openjpeg_plt.txt
```
The diff between both show very similar structure, and identical number of packets in PLT markers
Now testing with Kakadu (KDU803_Demo_Apps_for_Linux-x86-64_200210)
Full file decompression:
```
kdu_expand -i T36JTT_20160914T074612_B02_PLT.jp2 -o tmp.tif
Consumed 121 tile-part(s) from a total of 121 tile(s).
Consumed 80,318,806 codestream bytes (excluding any file format) = 5.329697
bits/pel.
Processed using the multi-threaded environment, with
8 parallel threads of execution
```
Partial decompresson (presumably using PLT markers):
```
kdu_expand -i T36JTT_20160914T074612_B02.jp2 -o tmp.pgm -region "{0.5,0.5},{0.01,0.01}"
kdu_expand -i T36JTT_20160914T074612_B02_PLT.jp2 -o tmp2.pgm -region "{0.5,0.5},{0.01,0.01}"
diff tmp.pgm tmp2.pgm && echo "same !"
```
-------
Funded by ESA for S2-MPC project
|
struct nfs_commit_data *nfs_commitdata_alloc(void)
{
struct nfs_commit_data *p = mempool_alloc(nfs_commit_mempool, GFP_NOIO);
if (p) {
memset(p, 0, sizeof(*p));
INIT_LIST_HEAD(&p->pages);
}
return p;
}
| 0 |
[] |
linux
|
c7559663e42f4294ffe31fe159da6b6a66b35d61
| 276,143,229,661,947,600,000,000,000,000,000,000,000 | 10 |
NFS: Allow nfs_updatepage to extend a write under additional circumstances
Currently nfs_updatepage allows a write to be extended to cover a full
page only if we don't have a byte range lock lock on the file... but if
we have a write delegation on the file or if we have the whole file
locked for writing then we should be allowed to extend the write as
well.
Signed-off-by: Scott Mayhew <[email protected]>
[Trond: fix up call to nfs_have_delegation()]
Signed-off-by: Trond Myklebust <[email protected]>
|
static WORD_LIST *
call_expand_word_internal (w, q, i, c, e)
WORD_DESC *w;
int q, i, *c, *e;
{
WORD_LIST *result;
result = expand_word_internal (w, q, i, c, e);
if (result == &expand_word_error || result == &expand_word_fatal)
{
/* By convention, each time this error is returned, w->word has
already been freed (it sometimes may not be in the fatal case,
but that doesn't result in a memory leak because we're going
to exit in most cases). */
w->word = (char *)NULL;
last_command_exit_value = EXECUTION_FAILURE;
exp_jump_to_top_level ((result == &expand_word_error) ? DISCARD : FORCE_EOF);
/* NOTREACHED */
return (NULL);
}
else
return (result);
| 0 |
[
"CWE-20"
] |
bash
|
4f747edc625815f449048579f6e65869914dd715
| 329,957,529,111,928,600,000,000,000,000,000,000,000 | 22 |
Bash-4.4 patch 7
|
ieee802_15_4_if_print(netdissect_options *ndo,
const struct pcap_pkthdr *h, const u_char *p)
{
u_int caplen = h->caplen;
u_int hdrlen;
uint16_t fc;
uint8_t seq;
uint16_t panid = 0;
if (caplen < 3) {
ND_PRINT((ndo, "[|802.15.4]"));
return caplen;
}
hdrlen = 3;
fc = EXTRACT_LE_16BITS(p);
seq = EXTRACT_LE_8BITS(p + 2);
p += 3;
caplen -= 3;
ND_PRINT((ndo,"IEEE 802.15.4 %s packet ", ftypes[FC_FRAME_TYPE(fc)]));
if (ndo->ndo_vflag)
ND_PRINT((ndo,"seq %02x ", seq));
/*
* Destination address and PAN ID, if present.
*/
switch (FC_DEST_ADDRESSING_MODE(fc)) {
case FC_ADDRESSING_MODE_NONE:
if (fc & FC_PAN_ID_COMPRESSION) {
/*
* PAN ID compression; this requires that both
* the source and destination addresses be present,
* but the destination address is missing.
*/
ND_PRINT((ndo, "[|802.15.4]"));
return hdrlen;
}
if (ndo->ndo_vflag)
ND_PRINT((ndo,"none "));
break;
case FC_ADDRESSING_MODE_RESERVED:
if (ndo->ndo_vflag)
ND_PRINT((ndo,"reserved destination addressing mode"));
return hdrlen;
case FC_ADDRESSING_MODE_SHORT:
if (caplen < 2) {
ND_PRINT((ndo, "[|802.15.4]"));
return hdrlen;
}
panid = EXTRACT_LE_16BITS(p);
p += 2;
caplen -= 2;
hdrlen += 2;
if (caplen < 2) {
ND_PRINT((ndo, "[|802.15.4]"));
return hdrlen;
}
if (ndo->ndo_vflag)
ND_PRINT((ndo,"%04x:%04x ", panid, EXTRACT_LE_16BITS(p + 2)));
p += 2;
caplen -= 2;
hdrlen += 2;
break;
case FC_ADDRESSING_MODE_LONG:
if (caplen < 2) {
ND_PRINT((ndo, "[|802.15.4]"));
return hdrlen;
}
panid = EXTRACT_LE_16BITS(p);
p += 2;
caplen -= 2;
hdrlen += 2;
if (caplen < 8) {
ND_PRINT((ndo, "[|802.15.4]"));
return hdrlen;
}
if (ndo->ndo_vflag)
ND_PRINT((ndo,"%04x:%s ", panid, le64addr_string(ndo, p)));
p += 8;
caplen -= 8;
hdrlen += 8;
break;
}
if (ndo->ndo_vflag)
ND_PRINT((ndo,"< "));
/*
* Source address and PAN ID, if present.
*/
switch (FC_SRC_ADDRESSING_MODE(fc)) {
case FC_ADDRESSING_MODE_NONE:
if (ndo->ndo_vflag)
ND_PRINT((ndo,"none "));
break;
case FC_ADDRESSING_MODE_RESERVED:
if (ndo->ndo_vflag)
ND_PRINT((ndo,"reserved source addressing mode"));
return 0;
case FC_ADDRESSING_MODE_SHORT:
if (!(fc & FC_PAN_ID_COMPRESSION)) {
/*
* The source PAN ID is not compressed out, so
* fetch it. (Otherwise, we'll use the destination
* PAN ID, fetched above.)
*/
if (caplen < 2) {
ND_PRINT((ndo, "[|802.15.4]"));
return hdrlen;
}
panid = EXTRACT_LE_16BITS(p);
p += 2;
caplen -= 2;
hdrlen += 2;
}
if (caplen < 2) {
ND_PRINT((ndo, "[|802.15.4]"));
return hdrlen;
}
if (ndo->ndo_vflag)
ND_PRINT((ndo,"%04x:%04x ", panid, EXTRACT_LE_16BITS(p)));
p += 2;
caplen -= 2;
hdrlen += 2;
break;
case FC_ADDRESSING_MODE_LONG:
if (!(fc & FC_PAN_ID_COMPRESSION)) {
/*
* The source PAN ID is not compressed out, so
* fetch it. (Otherwise, we'll use the destination
* PAN ID, fetched above.)
*/
if (caplen < 2) {
ND_PRINT((ndo, "[|802.15.4]"));
return hdrlen;
}
panid = EXTRACT_LE_16BITS(p);
p += 2;
caplen -= 2;
hdrlen += 2;
}
if (caplen < 8) {
ND_PRINT((ndo, "[|802.15.4]"));
return hdrlen;
}
if (ndo->ndo_vflag)
ND_PRINT((ndo,"%04x:%s ", panid, le64addr_string(ndo, p)));
p += 8;
caplen -= 8;
hdrlen += 8;
break;
}
if (!ndo->ndo_suppress_default_print)
ND_DEFAULTPRINT(p, caplen);
return hdrlen;
}
| 1 |
[
"CWE-125",
"CWE-787"
] |
tcpdump
|
8512734883227c11568bb35da1d48b9f8466f43f
| 108,251,335,796,461,920,000,000,000,000,000,000,000 | 159 |
CVE-2017-13000/IEEE 802.15.4: Fix bug introduced two fixes prior.
We've already advanced the pointer past the PAN ID, if present; it now
points to the address, so don't add 2 to it.
This fixes a buffer over-read discovered by Forcepoint's security
researchers Otto Airamo & Antti Levomäki.
Add a test using the capture file supplied by the reporter(s).
|
callbacks_get_selected_row_index (void)
{
GtkTreeSelection *selection;
GtkTreeIter iter;
GtkListStore *list_store = (GtkListStore *) gtk_tree_view_get_model
((GtkTreeView *) screen.win.layerTree);
gint index=-1,i=0;
/* This will only work in single or browse selection mode! */
selection = gtk_tree_view_get_selection((GtkTreeView *) screen.win.layerTree);
if (gtk_tree_selection_get_selected(selection, NULL, &iter)) {
while (gtk_tree_model_iter_nth_child ((GtkTreeModel *)list_store,
&iter, NULL, i)){
if (gtk_tree_selection_iter_is_selected (selection, &iter)) {
return i;
}
i++;
}
}
return index;
}
| 0 |
[
"CWE-200"
] |
gerbv
|
319a8af890e4d0a5c38e6d08f510da8eefc42537
| 310,998,524,776,849,640,000,000,000,000,000,000,000 | 21 |
Remove local alias to parameter array
Normalizing access to `gerbv_simplified_amacro_t::parameter` as a step to fix CVE-2021-40402
|
static void test_fetch_nobuffs()
{
MYSQL_STMT *stmt;
MYSQL_BIND my_bind[4];
char str[4][50];
int rc;
myheader("test_fetch_nobuffs");
stmt= mysql_simple_prepare(mysql, "SELECT DATABASE(), CURRENT_USER(), \
CURRENT_DATE(), CURRENT_TIME()");
check_stmt(stmt);
rc= mysql_stmt_execute(stmt);
check_execute(stmt, rc);
rc= 0;
while (mysql_stmt_fetch(stmt) != MYSQL_NO_DATA)
rc++;
if (!opt_silent)
fprintf(stdout, "\n total rows : %d", rc);
DIE_UNLESS(rc == 1);
memset(my_bind, 0, sizeof(MYSQL_BIND));
my_bind[0].buffer_type= MYSQL_TYPE_STRING;
my_bind[0].buffer= (void *)str[0];
my_bind[0].buffer_length= sizeof(str[0]);
my_bind[1]= my_bind[2]= my_bind[3]= my_bind[0];
my_bind[1].buffer= (void *)str[1];
my_bind[2].buffer= (void *)str[2];
my_bind[3].buffer= (void *)str[3];
rc= mysql_stmt_bind_result(stmt, my_bind);
check_execute(stmt, rc);
rc= mysql_stmt_execute(stmt);
check_execute(stmt, rc);
rc= 0;
while (mysql_stmt_fetch(stmt) != MYSQL_NO_DATA)
{
rc++;
if (!opt_silent)
{
fprintf(stdout, "\n CURRENT_DATABASE(): %s", str[0]);
fprintf(stdout, "\n CURRENT_USER() : %s", str[1]);
fprintf(stdout, "\n CURRENT_DATE() : %s", str[2]);
fprintf(stdout, "\n CURRENT_TIME() : %s", str[3]);
}
}
if (!opt_silent)
fprintf(stdout, "\n total rows : %d", rc);
DIE_UNLESS(rc == 1);
mysql_stmt_close(stmt);
}
| 0 |
[
"CWE-284",
"CWE-295"
] |
mysql-server
|
3bd5589e1a5a93f9c224badf983cd65c45215390
| 339,072,578,823,362,230,000,000,000,000,000,000,000 | 57 |
WL#6791 : Redefine client --ssl option to imply enforced encryption
# Changed the meaning of the --ssl=1 option of all client binaries
to mean force ssl, not try ssl and fail over to eunecrypted
# Added a new MYSQL_OPT_SSL_ENFORCE mysql_options()
option to specify that an ssl connection is required.
# Added a new macro SSL_SET_OPTIONS() to the client
SSL handling headers that sets all the relevant SSL options at
once.
# Revamped all of the current native clients to use the new macro
# Removed some Windows line endings.
# Added proper handling of the new option into the ssl helper
headers.
# If SSL is mandatory assume that the media is secure enough
for the sha256 plugin to do unencrypted password exchange even
before establishing a connection.
# Set the default ssl cipher to DHE-RSA-AES256-SHA if none is
specified.
# updated test cases that require a non-default cipher to spawn
a mysql command line tool binary since mysqltest has no support
for specifying ciphers.
# updated the replication slave connection code to always enforce
SSL if any of the SSL config options is present.
# test cases added and updated.
# added a mysql_get_option() API to return mysql_options()
values. Used the new API inside the sha256 plugin.
# Fixed compilation warnings because of unused variables.
# Fixed test failures (mysql_ssl and bug13115401)
# Fixed whitespace issues.
# Fully implemented the mysql_get_option() function.
# Added a test case for mysql_get_option()
# fixed some trailing whitespace issues
# fixed some uint/int warnings in mysql_client_test.c
# removed shared memory option from non-windows get_options
tests
# moved MYSQL_OPT_LOCAL_INFILE to the uint options
|
gnutls_x509_crt_list_import2(gnutls_x509_crt_t ** certs,
unsigned int *size,
const gnutls_datum_t * data,
gnutls_x509_crt_fmt_t format,
unsigned int flags)
{
unsigned int init = 1024;
int ret;
*certs = gnutls_malloc(sizeof(gnutls_x509_crt_t) * init);
if (*certs == NULL) {
gnutls_assert();
return GNUTLS_E_MEMORY_ERROR;
}
ret =
gnutls_x509_crt_list_import(*certs, &init, data, format,
GNUTLS_X509_CRT_LIST_IMPORT_FAIL_IF_EXCEED);
if (ret == GNUTLS_E_SHORT_MEMORY_BUFFER) {
*certs =
gnutls_realloc_fast(*certs,
sizeof(gnutls_x509_crt_t) * init);
if (*certs == NULL) {
gnutls_assert();
return GNUTLS_E_MEMORY_ERROR;
}
ret =
gnutls_x509_crt_list_import(*certs, &init, data,
format, flags);
}
if (ret < 0) {
gnutls_free(*certs);
*certs = NULL;
return ret;
}
*size = init;
return 0;
}
| 0 |
[
"CWE-295"
] |
gnutls
|
6e76e9b9fa845b76b0b9a45f05f4b54a052578ff
| 324,757,879,642,229,020,000,000,000,000,000,000,000 | 41 |
on certificate import check whether the two signature algorithms match
|
S_ssc_finalize(pTHX_ RExC_state_t *pRExC_state, regnode_ssc *ssc)
{
/* The inversion list in the SSC is marked mortal; now we need a more
* permanent copy, which is stored the same way that is done in a regular
* ANYOF node, with the first NUM_ANYOF_CODE_POINTS code points in a bit
* map */
SV* invlist = invlist_clone(ssc->invlist);
PERL_ARGS_ASSERT_SSC_FINALIZE;
assert(is_ANYOF_SYNTHETIC(ssc));
/* The code in this file assumes that all but these flags aren't relevant
* to the SSC, except SSC_MATCHES_EMPTY_STRING, which should be cleared
* by the time we reach here */
assert(! (ANYOF_FLAGS(ssc)
& ~( ANYOF_COMMON_FLAGS
|ANYOF_SHARED_d_MATCHES_ALL_NON_UTF8_NON_ASCII_non_d_WARN_SUPER
|ANYOF_SHARED_d_UPPER_LATIN1_UTF8_STRING_MATCHES_non_d_RUNTIME_USER_PROP)));
populate_ANYOF_from_invlist( (regnode *) ssc, &invlist);
set_ANYOF_arg(pRExC_state, (regnode *) ssc, invlist,
NULL, NULL, NULL, FALSE);
/* Make sure is clone-safe */
ssc->invlist = NULL;
if (ANYOF_POSIXL_SSC_TEST_ANY_SET(ssc)) {
ANYOF_FLAGS(ssc) |= ANYOF_MATCHES_POSIXL;
}
if (RExC_contains_locale) {
OP(ssc) = ANYOFL;
}
assert(! (ANYOF_FLAGS(ssc) & ANYOF_LOCALE_FLAGS) || RExC_contains_locale);
}
| 0 |
[
"CWE-125"
] |
perl5
|
43b2f4ef399e2fd7240b4eeb0658686ad95f8e62
| 176,678,379,817,819,300,000,000,000,000,000,000,000 | 39 |
regcomp.c: Convert some strchr to memchr
This allows things to work properly in the face of embedded NULs.
See the branch merge message for more information.
|
int hugetlb_get_quota(struct address_space *mapping, long delta)
{
int ret = 0;
struct hugetlbfs_sb_info *sbinfo = HUGETLBFS_SB(mapping->host->i_sb);
if (sbinfo->free_blocks > -1) {
spin_lock(&sbinfo->stat_lock);
if (sbinfo->free_blocks - delta >= 0)
sbinfo->free_blocks -= delta;
else
ret = -ENOMEM;
spin_unlock(&sbinfo->stat_lock);
}
return ret;
}
| 1 |
[
"CWE-399"
] |
linux
|
90481622d75715bfcb68501280a917dbfe516029
| 165,672,267,029,961,000,000,000,000,000,000,000,000 | 16 |
hugepages: fix use after free bug in "quota" handling
hugetlbfs_{get,put}_quota() are badly named. They don't interact with the
general quota handling code, and they don't much resemble its behaviour.
Rather than being about maintaining limits on on-disk block usage by
particular users, they are instead about maintaining limits on in-memory
page usage (including anonymous MAP_PRIVATE copied-on-write pages)
associated with a particular hugetlbfs filesystem instance.
Worse, they work by having callbacks to the hugetlbfs filesystem code from
the low-level page handling code, in particular from free_huge_page().
This is a layering violation of itself, but more importantly, if the
kernel does a get_user_pages() on hugepages (which can happen from KVM
amongst others), then the free_huge_page() can be delayed until after the
associated inode has already been freed. If an unmount occurs at the
wrong time, even the hugetlbfs superblock where the "quota" limits are
stored may have been freed.
Andrew Barry proposed a patch to fix this by having hugepages, instead of
storing a pointer to their address_space and reaching the superblock from
there, had the hugepages store pointers directly to the superblock,
bumping the reference count as appropriate to avoid it being freed.
Andrew Morton rejected that version, however, on the grounds that it made
the existing layering violation worse.
This is a reworked version of Andrew's patch, which removes the extra, and
some of the existing, layering violation. It works by introducing the
concept of a hugepage "subpool" at the lower hugepage mm layer - that is a
finite logical pool of hugepages to allocate from. hugetlbfs now creates
a subpool for each filesystem instance with a page limit set, and a
pointer to the subpool gets added to each allocated hugepage, instead of
the address_space pointer used now. The subpool has its own lifetime and
is only freed once all pages in it _and_ all other references to it (i.e.
superblocks) are gone.
subpools are optional - a NULL subpool pointer is taken by the code to
mean that no subpool limits are in effect.
Previous discussion of this bug found in: "Fix refcounting in hugetlbfs
quota handling.". See: https://lkml.org/lkml/2011/8/11/28 or
http://marc.info/?l=linux-mm&m=126928970510627&w=1
v2: Fixed a bug spotted by Hillf Danton, and removed the extra parameter to
alloc_huge_page() - since it already takes the vma, it is not necessary.
Signed-off-by: Andrew Barry <[email protected]>
Signed-off-by: David Gibson <[email protected]>
Cc: Hugh Dickins <[email protected]>
Cc: Mel Gorman <[email protected]>
Cc: Minchan Kim <[email protected]>
Cc: Hillf Danton <[email protected]>
Cc: Paul Mackerras <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
|
static void GIFEncode(gdIOCtxPtr fp, int GWidth, int GHeight, int GInterlace, int Background, int Transparent, int BitsPerPixel, int *Red, int *Green, int *Blue, gdImagePtr im)
{
int B;
int RWidth, RHeight;
int LeftOfs, TopOfs;
int Resolution;
int ColorMapSize;
int InitCodeSize;
int i;
GifCtx ctx;
memset(&ctx, 0, sizeof(ctx));
ctx.Interlace = GInterlace;
ctx.in_count = 1;
ColorMapSize = 1 << BitsPerPixel;
RWidth = ctx.Width = GWidth;
RHeight = ctx.Height = GHeight;
LeftOfs = TopOfs = 0;
Resolution = BitsPerPixel;
/* Calculate number of bits we are expecting */
ctx.CountDown = (long)ctx.Width * (long)ctx.Height;
/* Indicate which pass we are on (if interlace) */
ctx.Pass = 0;
/* The initial code size */
if(BitsPerPixel <= 1) {
InitCodeSize = 2;
} else {
InitCodeSize = BitsPerPixel;
}
/* Set up the current x and y position */
ctx.curx = ctx.cury = 0;
/* Write the Magic header */
gdPutBuf(Transparent < 0 ? "GIF87a" : "GIF89a", 6, fp);
/* Write out the screen width and height */
gifPutWord(RWidth, fp);
gifPutWord(RHeight, fp);
/* Indicate that there is a global colour map */
/* Yes, there is a color map */
B = 0x80;
/* OR in the resolution */
B |= (Resolution - 1) << 4;
/* OR in the Bits per Pixel */
B |= (BitsPerPixel - 1);
/* Write it out */
gdPutC(B, fp);
/* Write out the Background colour */
gdPutC(Background, fp);
/* Byte of 0's (future expansion) */
gdPutC(0, fp);
/* Write out the Global Colour Map */
for(i = 0; i < ColorMapSize; ++i) {
gdPutC(Red[i], fp);
gdPutC(Green[i], fp);
gdPutC(Blue[i], fp);
}
/* Write out extension for transparent colour index, if necessary. */
if(Transparent >= 0) {
gdPutC('!', fp);
gdPutC(0xf9, fp);
gdPutC(4, fp);
gdPutC(1, fp);
gdPutC(0, fp);
gdPutC(0, fp);
gdPutC((unsigned char) Transparent, fp);
gdPutC(0, fp);
}
/* Write an Image separator */
gdPutC(',', fp);
/* Write the Image header */
gifPutWord(LeftOfs, fp);
gifPutWord(TopOfs, fp);
gifPutWord(ctx.Width, fp);
gifPutWord(ctx.Height, fp);
/* Write out whether or not the image is interlaced */
if(ctx.Interlace) {
gdPutC(0x40, fp);
} else {
gdPutC(0x00, fp);
}
/* Write out the initial code size */
gdPutC(InitCodeSize, fp);
/* Go and actually compress the data */
compress(InitCodeSize + 1, fp, im, &ctx);
/* Write out a Zero-length packet (to end the series) */
gdPutC(0, fp);
/* Write the GIF file terminator */
gdPutC(';', fp);
}
| 0 |
[
"CWE-415"
] |
libgd
|
553702980ae89c83f2d6e254d62cf82e204956d0
| 259,166,464,731,265,200,000,000,000,000,000,000,000 | 113 |
Fix #492: Potential double-free in gdImage*Ptr()
Whenever `gdImage*Ptr()` calls `gdImage*Ctx()` and the latter fails, we
must not call `gdDPExtractData()`; otherwise a double-free would
happen. Since `gdImage*Ctx()` are void functions, and we can't change
that for BC reasons, we're introducing static helpers which are used
internally.
We're adding a regression test for `gdImageJpegPtr()`, but not for
`gdImageGifPtr()` and `gdImageWbmpPtr()` since we don't know how to
trigger failure of the respective `gdImage*Ctx()` calls.
This potential security issue has been reported by Solmaz Salimi (aka.
Rooney).
|
cmsStage* CMSEXPORT cmsStageAllocCLut16bitGranular(cmsContext ContextID,
const cmsUInt32Number clutPoints[],
cmsUInt32Number inputChan,
cmsUInt32Number outputChan,
const cmsUInt16Number* Table)
{
cmsUInt32Number i, n;
_cmsStageCLutData* NewElem;
cmsStage* NewMPE;
NewMPE = _cmsStageAllocPlaceholder(ContextID, cmsSigCLutElemType, inputChan, outputChan,
EvaluateCLUTfloatIn16, CLUTElemDup, CLutElemTypeFree, NULL );
if (NewMPE == NULL) return NULL;
NewElem = (_cmsStageCLutData*) _cmsMallocZero(ContextID, sizeof(_cmsStageCLutData));
if (NewElem == NULL) {
cmsStageFree(NewMPE);
return NULL;
}
NewMPE ->Data = (void*) NewElem;
NewElem -> nEntries = n = outputChan * CubeSize(clutPoints, inputChan);
NewElem -> HasFloatValues = FALSE;
if (n == 0) {
cmsStageFree(NewMPE);
return NULL;
}
NewElem ->Tab.T = (cmsUInt16Number*) _cmsCalloc(ContextID, n, sizeof(cmsUInt16Number));
if (NewElem ->Tab.T == NULL) {
cmsStageFree(NewMPE);
return NULL;
}
if (Table != NULL) {
for (i=0; i < n; i++) {
NewElem ->Tab.T[i] = Table[i];
}
}
NewElem ->Params = _cmsComputeInterpParamsEx(ContextID, clutPoints, inputChan, outputChan, NewElem ->Tab.T, CMS_LERP_FLAGS_16BITS);
if (NewElem ->Params == NULL) {
cmsStageFree(NewMPE);
return NULL;
}
return NewMPE;
}
| 1 |
[] |
Little-CMS
|
5d98f40ed58f6e8eb9aee6dd2d9467bbc8551ee7
| 44,075,234,313,482,750,000,000,000,000,000,000,000 | 52 |
Fixed some menor issues for 2.4
|
static void ok_jpg_generate_huffman_table_lookups(ok_jpg_huffman_table *huff, bool is_ac_table) {
// Look up table for codes that use N bits or less (most of them)
for (int q = 0; q < HUFFMAN_LOOKUP_SIZE; q++) {
huff->lookup_num_bits[q] = 0;
for (uint8_t i = 0; i < HUFFMAN_LOOKUP_SIZE_BITS; i++) {
uint8_t num_bits = i + 1;
int code = q >> (HUFFMAN_LOOKUP_SIZE_BITS - num_bits);
if (code <= huff->maxcode[i]) {
huff->lookup_num_bits[q] = num_bits;
int j = huff->valptr[i];
j += code - huff->mincode[i];
huff->lookup_val[q] = huff->val[j];
break;
}
}
}
if (is_ac_table) {
// Additional lookup table to get both RS and extended value
for (int q = 0; q < HUFFMAN_LOOKUP_SIZE; q++) {
huff->lookup_ac_num_bits[q] = 0;
int num_bits = huff->lookup_num_bits[q];
if (num_bits > 0) {
int rs = huff->lookup_val[q];
int r = rs >> 4;
int s = rs & 0x0f;
int total_bits = num_bits;
if (s > 0) {
total_bits += s;
} else if (r > 0 && r < 0x0f) {
total_bits += r;
}
if (total_bits <= HUFFMAN_LOOKUP_SIZE_BITS) {
huff->lookup_ac_num_bits[q] = (uint8_t)total_bits;
if (s > 0) {
int v = (q >> (HUFFMAN_LOOKUP_SIZE_BITS - total_bits)) & ((1 << s) - 1);
huff->lookup_ac_val[q] = (int16_t)ok_jpg_extend(v, s);
} else if (r > 0 && r < 0x0f) {
int v = (q >> (HUFFMAN_LOOKUP_SIZE_BITS - total_bits)) & ((1 << r) - 1);
huff->lookup_ac_val[q] = (int16_t)((1 << r) + v - 1);
} else {
huff->lookup_ac_val[q] = 0;
}
}
}
}
}
}
| 0 |
[
"CWE-787"
] |
ok-file-formats
|
a9cc1711dd4ed6a215038f1c5c03af0ef52c3211
| 181,501,137,022,103,400,000,000,000,000,000,000,000 | 49 |
ok_jpg: Fix invalid DHT (#11)
|
static void mtrr_lookup_next(struct mtrr_iter *iter)
{
if (iter->fixed)
mtrr_lookup_fixed_next(iter);
else
mtrr_lookup_var_next(iter);
}
| 0 |
[
"CWE-284"
] |
linux
|
9842df62004f366b9fed2423e24df10542ee0dc5
| 304,406,445,909,149,500,000,000,000,000,000,000,000 | 7 |
KVM: MTRR: remove MSR 0x2f8
MSR 0x2f8 accessed the 124th Variable Range MTRR ever since MTRR support
was introduced by 9ba075a664df ("KVM: MTRR support").
0x2f8 became harmful when 910a6aae4e2e ("KVM: MTRR: exactly define the
size of variable MTRRs") shrinked the array of VR MTRRs from 256 to 8,
which made access to index 124 out of bounds. The surrounding code only
WARNs in this situation, thus the guest gained a limited read/write
access to struct kvm_arch_vcpu.
0x2f8 is not a valid VR MTRR MSR, because KVM has/advertises only 16 VR
MTRR MSRs, 0x200-0x20f. Every VR MTRR is set up using two MSRs, 0x2f8
was treated as a PHYSBASE and 0x2f9 would be its PHYSMASK, but 0x2f9 was
not implemented in KVM, therefore 0x2f8 could never do anything useful
and getting rid of it is safe.
This fixes CVE-2016-3713.
Fixes: 910a6aae4e2e ("KVM: MTRR: exactly define the size of variable MTRRs")
Cc: [email protected]
Reported-by: David Matlack <[email protected]>
Signed-off-by: Andy Honig <[email protected]>
Signed-off-by: Radim Krčmář <[email protected]>
Signed-off-by: Paolo Bonzini <[email protected]>
|
static int check_user_pass(ctrl_t *ctrl)
{
if (!ctrl->name[0])
return -1;
if (!strcmp("anonymous", ctrl->name))
return 1;
return 0;
}
| 0 |
[
"CWE-120",
"CWE-787"
] |
uftpd
|
0fb2c031ce0ace07cc19cd2cb2143c4b5a63c9dd
| 93,552,693,333,275,420,000,000,000,000,000,000,000 | 10 |
FTP: Fix buffer overflow in PORT parser, reported by Aaron Esau
Signed-off-by: Joachim Nilsson <[email protected]>
|
void skb_abort_seq_read(struct skb_seq_state *st)
{
if (st->frag_data)
kunmap_skb_frag(st->frag_data);
}
| 0 |
[] |
linux
|
e89e9cf539a28df7d0eb1d0a545368e9920b34ac
| 132,821,427,116,312,740,000,000,000,000,000,000,000 | 5 |
[IPv4/IPv6]: UFO Scatter-gather approach
Attached is kernel patch for UDP Fragmentation Offload (UFO) feature.
1. This patch incorporate the review comments by Jeff Garzik.
2. Renamed USO as UFO (UDP Fragmentation Offload)
3. udp sendfile support with UFO
This patches uses scatter-gather feature of skb to generate large UDP
datagram. Below is a "how-to" on changes required in network device
driver to use the UFO interface.
UDP Fragmentation Offload (UFO) Interface:
-------------------------------------------
UFO is a feature wherein the Linux kernel network stack will offload the
IP fragmentation functionality of large UDP datagram to hardware. This
will reduce the overhead of stack in fragmenting the large UDP datagram to
MTU sized packets
1) Drivers indicate their capability of UFO using
dev->features |= NETIF_F_UFO | NETIF_F_HW_CSUM | NETIF_F_SG
NETIF_F_HW_CSUM is required for UFO over ipv6.
2) UFO packet will be submitted for transmission using driver xmit routine.
UFO packet will have a non-zero value for
"skb_shinfo(skb)->ufo_size"
skb_shinfo(skb)->ufo_size will indicate the length of data part in each IP
fragment going out of the adapter after IP fragmentation by hardware.
skb->data will contain MAC/IP/UDP header and skb_shinfo(skb)->frags[]
contains the data payload. The skb->ip_summed will be set to CHECKSUM_HW
indicating that hardware has to do checksum calculation. Hardware should
compute the UDP checksum of complete datagram and also ip header checksum of
each fragmented IP packet.
For IPV6 the UFO provides the fragment identification-id in
skb_shinfo(skb)->ip6_frag_id. The adapter should use this ID for generating
IPv6 fragments.
Signed-off-by: Ananda Raju <[email protected]>
Signed-off-by: Rusty Russell <[email protected]> (forwarded)
Signed-off-by: Arnaldo Carvalho de Melo <[email protected]>
|
void exit_mmap(struct mm_struct *mm)
{
struct mmu_gather tlb;
struct vm_area_struct *vma;
unsigned long nr_accounted = 0;
/* mm's last user has gone, and its about to be pulled down */
mmu_notifier_release(mm);
if (mm->locked_vm) {
vma = mm->mmap;
while (vma) {
if (vma->vm_flags & VM_LOCKED)
munlock_vma_pages_all(vma);
vma = vma->vm_next;
}
}
arch_exit_mmap(mm);
vma = mm->mmap;
if (!vma) /* Can happen if dup_mmap() received an OOM */
return;
lru_add_drain();
flush_cache_mm(mm);
tlb_gather_mmu(&tlb, mm, 0, -1);
/* update_hiwater_rss(mm) here? but nobody should be looking */
/* Use -1 here to ensure all VMAs in the mm are unmapped */
unmap_vmas(&tlb, vma, 0, -1);
if (unlikely(mm_is_oom_victim(mm))) {
/*
* Wait for oom_reap_task() to stop working on this
* mm. Because MMF_OOM_SKIP is already set before
* calling down_read(), oom_reap_task() will not run
* on this "mm" post up_write().
*
* mm_is_oom_victim() cannot be set from under us
* either because victim->mm is already set to NULL
* under task_lock before calling mmput and oom_mm is
* set not NULL by the OOM killer only if victim->mm
* is found not NULL while holding the task_lock.
*/
set_bit(MMF_OOM_SKIP, &mm->flags);
down_write(&mm->mmap_sem);
up_write(&mm->mmap_sem);
}
free_pgtables(&tlb, vma, FIRST_USER_ADDRESS, USER_PGTABLES_CEILING);
tlb_finish_mmu(&tlb, 0, -1);
/*
* Walk the list again, actually closing and freeing it,
* with preemption enabled, without holding any MM locks.
*/
while (vma) {
if (vma->vm_flags & VM_ACCOUNT)
nr_accounted += vma_pages(vma);
vma = remove_vma(vma);
}
vm_unacct_memory(nr_accounted);
}
| 1 |
[
"CWE-476"
] |
linux
|
27ae357fa82be5ab73b2ef8d39dcb8ca2563483a
| 141,544,773,978,631,740,000,000,000,000,000,000,000 | 62 |
mm, oom: fix concurrent munlock and oom reaper unmap, v3
Since exit_mmap() is done without the protection of mm->mmap_sem, it is
possible for the oom reaper to concurrently operate on an mm until
MMF_OOM_SKIP is set.
This allows munlock_vma_pages_all() to concurrently run while the oom
reaper is operating on a vma. Since munlock_vma_pages_range() depends
on clearing VM_LOCKED from vm_flags before actually doing the munlock to
determine if any other vmas are locking the same memory, the check for
VM_LOCKED in the oom reaper is racy.
This is especially noticeable on architectures such as powerpc where
clearing a huge pmd requires serialize_against_pte_lookup(). If the pmd
is zapped by the oom reaper during follow_page_mask() after the check
for pmd_none() is bypassed, this ends up deferencing a NULL ptl or a
kernel oops.
Fix this by manually freeing all possible memory from the mm before
doing the munlock and then setting MMF_OOM_SKIP. The oom reaper can not
run on the mm anymore so the munlock is safe to do in exit_mmap(). It
also matches the logic that the oom reaper currently uses for
determining when to set MMF_OOM_SKIP itself, so there's no new risk of
excessive oom killing.
This issue fixes CVE-2018-1000200.
Link: http://lkml.kernel.org/r/[email protected]
Fixes: 212925802454 ("mm: oom: let oom_reap_task and exit_mmap run concurrently")
Signed-off-by: David Rientjes <[email protected]>
Suggested-by: Tetsuo Handa <[email protected]>
Acked-by: Michal Hocko <[email protected]>
Cc: Andrea Arcangeli <[email protected]>
Cc: <[email protected]> [4.14+]
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
|
static void remove_translation_table(FlowSource_t *fs, exporter_ipfix_domain_t *exporter, uint16_t id) {
input_translation_t *table, *parent;
syslog(LOG_INFO, "Process_ipfix: [%u] Withdraw template id: %i",
exporter->info.id, id);
parent = NULL;
table = exporter->input_translation_table;
while ( table && ( table->id != id ) ) {
parent = table;
table = table->next;
}
if ( table == NULL ) {
syslog(LOG_ERR, "Process_ipfix: [%u] Withdraw template id: %i. translation table not found",
exporter->info.id, id);
return;
}
dbg_printf("\n[%u] Withdraw template ID: %u\n", exporter->info.id, table->id);
// clear table cache, if this is the table to delete
if (exporter->current_table == table)
exporter->current_table = NULL;
if ( parent ) {
// remove table from list
parent->next = table->next;
} else {
// last table removed
exporter->input_translation_table = NULL;
}
RemoveExtensionMap(fs, table->extension_info.map);
free(table->sequence);
free(table->extension_info.map);
free(table);
} // End of remove_translation_table
| 1 |
[] |
nfdump
|
ff0e855bd1f51bed9fc5d8559c64d3cfb475a5d8
| 285,933,219,564,491,280,000,000,000,000,000,000,000 | 39 |
Fix security issues in netflow_v9.c and ipfix.c
|
void execute_init_command(THD *thd, LEX_STRING *init_command,
mysql_rwlock_t *var_lock)
{
Vio* save_vio;
ulonglong save_client_capabilities;
mysql_rwlock_rdlock(var_lock);
if (!init_command->length)
{
mysql_rwlock_unlock(var_lock);
return;
}
/*
copy the value under a lock, and release the lock.
init_command has to be executed without a lock held,
as it may try to change itself
*/
size_t len= init_command->length;
char *buf= thd->strmake(init_command->str, len);
mysql_rwlock_unlock(var_lock);
THD_STAGE_INFO(thd, stage_execution_of_init_command);
save_client_capabilities= thd->client_capabilities;
thd->client_capabilities|= CLIENT_MULTI_QUERIES;
/*
We don't need return result of execution to client side.
To forbid this we should set thd->net.vio to 0.
*/
save_vio= thd->net.vio;
thd->net.vio= 0;
thd->clear_error(1);
dispatch_command(COM_QUERY, thd, buf, (uint)len, FALSE, FALSE);
thd->client_capabilities= save_client_capabilities;
thd->net.vio= save_vio;
}
| 0 |
[
"CWE-703"
] |
server
|
39feab3cd31b5414aa9b428eaba915c251ac34a2
| 105,168,513,657,641,300,000,000,000,000,000,000,000 | 37 |
MDEV-26412 Server crash in Item_field::fix_outer_field for INSERT SELECT
IF an INSERT/REPLACE SELECT statement contained an ON expression in the top
level select and this expression used a subquery with a column reference
that could not be resolved then an attempt to resolve this reference as
an outer reference caused a crash of the server. This happened because the
outer context field in the Name_resolution_context structure was not set
to NULL for such references. Rather it pointed to the first element in
the select_stack.
Note that starting from 10.4 we cannot use the SELECT_LEX::outer_select()
method when parsing a SELECT construct.
Approved by Oleksandr Byelkin <[email protected]>
|
static void test_json_append_escaped_data(void)
{
static const unsigned char test_input[] =
"\b\f\r\n\t\"\\\000\001\002-\xC3\xA4\xf0\x90\x90\xb7";
string_t *str = t_str_new(32);
test_begin("json_append_escaped()");
json_append_escaped_data(str, test_input, sizeof(test_input)-1);
test_assert(strcmp(str_c(str), "\\b\\f\\r\\n\\t\\\"\\\\\\u0000\\u0001\\u0002-\\u00e4\\ud801\\udc37") == 0);
test_end();
}
| 1 |
[] |
core
|
973769d74433de3c56c4ffdf4f343cb35d98e4f7
| 240,045,088,701,042,530,000,000,000,000,000,000,000 | 11 |
lib: json - Escape invalid UTF-8 as unicode bytes
This prevents dovecot from crashing if invalid UTF-8 input
is given.
|
static int link_initialized_and_synced(sd_netlink *rtnl, sd_netlink_message *m,
void *userdata) {
_cleanup_link_unref_ Link *link = userdata;
Network *network;
int r;
assert(link);
assert(link->ifname);
assert(link->manager);
if (link->state != LINK_STATE_PENDING)
return 1;
log_link_debug(link, "Link state is up-to-date");
r = link_new_bound_by_list(link);
if (r < 0)
return r;
r = link_handle_bound_by_list(link);
if (r < 0)
return r;
if (!link->network) {
r = network_get(link->manager, link->udev_device, link->ifname,
&link->mac, &network);
if (r == -ENOENT) {
link_enter_unmanaged(link);
return 1;
} else if (r < 0)
return r;
if (link->flags & IFF_LOOPBACK) {
if (network->link_local != ADDRESS_FAMILY_NO)
log_link_debug(link, "Ignoring link-local autoconfiguration for loopback link");
if (network->dhcp != ADDRESS_FAMILY_NO)
log_link_debug(link, "Ignoring DHCP clients for loopback link");
if (network->dhcp_server)
log_link_debug(link, "Ignoring DHCP server for loopback link");
}
r = network_apply(link->manager, network, link);
if (r < 0)
return r;
}
r = link_new_bound_to_list(link);
if (r < 0)
return r;
r = link_configure(link);
if (r < 0)
return r;
return 1;
}
| 0 |
[
"CWE-120"
] |
systemd
|
f5a8c43f39937d97c9ed75e3fe8621945b42b0db
| 26,915,754,680,520,165,000,000,000,000,000,000,000 | 58 |
networkd: IPv6 router discovery - follow IPv6AcceptRouterAdvertisemnt=
The previous behavior:
When DHCPv6 was enabled, router discover was performed first, and then DHCPv6 was
enabled only if the relevant flags were passed in the Router Advertisement message.
Moreover, router discovery was performed even if AcceptRouterAdvertisements=false,
moreover, even if router advertisements were accepted (by the kernel) the flags
indicating that DHCPv6 should be performed were ignored.
New behavior:
If RouterAdvertisements are accepted, and either no routers are found, or an
advertisement is received indicating DHCPv6 should be performed, the DHCPv6
client is started. Moreover, the DHCP option now truly enables the DHCPv6
client regardless of router discovery (though it will probably not be
very useful to get a lease withotu any routes, this seems the more consistent
approach).
The recommended default setting should be to set DHCP=ipv4 and to leave
IPv6AcceptRouterAdvertisements unset.
|
uint get_errcode_from_name(char *error_name, char *error_end)
{
/* SQL error as string */
st_error *e= global_error_names;
DBUG_ENTER("get_errcode_from_name");
DBUG_PRINT("enter", ("error_name: %s", error_name));
/* Loop through the array of known error names */
for (; e->name; e++)
{
/*
If we get a match, we need to check the length of the name we
matched against in case it was longer than what we are checking
(as in ER_WRONG_VALUE vs. ER_WRONG_VALUE_COUNT).
*/
if (!strncmp(error_name, e->name, (int) (error_end - error_name)) &&
(uint) strlen(e->name) == (uint) (error_end - error_name))
{
DBUG_RETURN(e->code);
}
}
if (!e->name)
die("Unknown SQL error name '%s'", error_name);
DBUG_RETURN(0);
}
| 0 |
[
"CWE-284",
"CWE-295"
] |
mysql-server
|
3bd5589e1a5a93f9c224badf983cd65c45215390
| 80,333,706,480,159,310,000,000,000,000,000,000,000 | 26 |
WL#6791 : Redefine client --ssl option to imply enforced encryption
# Changed the meaning of the --ssl=1 option of all client binaries
to mean force ssl, not try ssl and fail over to eunecrypted
# Added a new MYSQL_OPT_SSL_ENFORCE mysql_options()
option to specify that an ssl connection is required.
# Added a new macro SSL_SET_OPTIONS() to the client
SSL handling headers that sets all the relevant SSL options at
once.
# Revamped all of the current native clients to use the new macro
# Removed some Windows line endings.
# Added proper handling of the new option into the ssl helper
headers.
# If SSL is mandatory assume that the media is secure enough
for the sha256 plugin to do unencrypted password exchange even
before establishing a connection.
# Set the default ssl cipher to DHE-RSA-AES256-SHA if none is
specified.
# updated test cases that require a non-default cipher to spawn
a mysql command line tool binary since mysqltest has no support
for specifying ciphers.
# updated the replication slave connection code to always enforce
SSL if any of the SSL config options is present.
# test cases added and updated.
# added a mysql_get_option() API to return mysql_options()
values. Used the new API inside the sha256 plugin.
# Fixed compilation warnings because of unused variables.
# Fixed test failures (mysql_ssl and bug13115401)
# Fixed whitespace issues.
# Fully implemented the mysql_get_option() function.
# Added a test case for mysql_get_option()
# fixed some trailing whitespace issues
# fixed some uint/int warnings in mysql_client_test.c
# removed shared memory option from non-windows get_options
tests
# moved MYSQL_OPT_LOCAL_INFILE to the uint options
|
static void php_free_gd_image(zend_rsrc_list_entry *rsrc TSRMLS_DC)
{
gdImageDestroy((gdImagePtr) rsrc->ptr);
}
| 0 |
[
"CWE-703",
"CWE-189"
] |
php-src
|
2938329ce19cb8c4197dec146c3ec887c6f61d01
| 153,170,157,303,809,260,000,000,000,000,000,000,000 | 4 |
Fixed bug #66356 (Heap Overflow Vulnerability in imagecrop())
And also fixed the bug: arguments are altered after some calls
|
static size_t send_control_msg(VirtIOSerial *vser, void *buf, size_t len)
{
VirtQueueElement elem;
VirtQueue *vq;
vq = vser->c_ivq;
if (!virtio_queue_ready(vq)) {
return 0;
}
if (!virtqueue_pop(vq, &elem)) {
return 0;
}
/* TODO: detect a buffer that's too short, set NEEDS_RESET */
iov_from_buf(elem.in_sg, elem.in_num, 0, buf, len);
virtqueue_push(vq, &elem, len);
virtio_notify(VIRTIO_DEVICE(vser), vq);
return len;
}
| 0 |
[
"CWE-120",
"CWE-787"
] |
qemu
|
7882080388be5088e72c425b02223c02e6cb4295
| 179,752,260,130,633,400,000,000,000,000,000,000,000 | 20 |
virtio-serial: fix ANY_LAYOUT
Don't assume a specific layout for control messages.
Required by virtio 1.
Signed-off-by: Michael S. Tsirkin <[email protected]>
Reviewed-by: Amit Shah <[email protected]>
Reviewed-by: Jason Wang <[email protected]>
|
jpc_cstate_t *jpc_cstate_create()
{
jpc_cstate_t *cstate;
if (!(cstate = jas_malloc(sizeof(jpc_cstate_t)))) {
return 0;
}
cstate->numcomps = 0;
return cstate;
}
| 0 |
[] |
jasper
|
4031ca321d8cb5798c316ab39c7a5dc88a61fdd7
| 128,463,143,019,328,040,000,000,000,000,000,000,000 | 9 |
Incorporated changes from patch
jasper-1.900.3-libjasper-stepsizes-overflow.patch
|
inline Eigen::DSizes<int, 2> NByOne(int n) {
return Eigen::DSizes<int, 2>(n, 1);
}
| 0 |
[
"CWE-476",
"CWE-787"
] |
tensorflow
|
93f428fd1768df147171ed674fee1fc5ab8309ec
| 309,941,126,370,168,700,000,000,000,000,000,000,000 | 3 |
Fix nullptr deref and heap OOB access in binary cwise ops.
PiperOrigin-RevId: 387936777
Change-Id: I608b8074cec36a982cca622b7144cb2c43e6e19f
|
sds sdsgrowzero(sds s, size_t len) {
size_t curlen = sdslen(s);
if (len <= curlen) return s;
s = sdsMakeRoomFor(s,len-curlen);
if (s == NULL) return NULL;
/* Make sure added region doesn't contain garbage */
memset(s+curlen,0,(len-curlen+1)); /* also set trailing \0 byte */
sdssetlen(s, len);
return s;
}
| 0 |
[
"CWE-190"
] |
redis
|
d32f2e9999ce003bad0bd2c3bca29f64dcce4433
| 251,292,365,419,002,000,000,000,000,000,000,000,000 | 12 |
Fix integer overflow (CVE-2021-21309). (#8522)
On 32-bit systems, setting the proto-max-bulk-len config parameter to a high value may result with integer overflow and a subsequent heap overflow when parsing an input bulk (CVE-2021-21309).
This fix has two parts:
Set a reasonable limit to the config parameter.
Add additional checks to prevent the problem in other potential but unknown code paths.
|
int EC_POINT_make_affine(const EC_GROUP *group, EC_POINT *point, BN_CTX *ctx)
{
if (group->meth->make_affine == 0) {
ECerr(EC_F_EC_POINT_MAKE_AFFINE, ERR_R_SHOULD_NOT_HAVE_BEEN_CALLED);
return 0;
}
if (!ec_point_is_compat(point, group)) {
ECerr(EC_F_EC_POINT_MAKE_AFFINE, EC_R_INCOMPATIBLE_OBJECTS);
return 0;
}
return group->meth->make_affine(group, point, ctx);
}
| 0 |
[] |
openssl
|
30c22fa8b1d840036b8e203585738df62a03cec8
| 312,537,216,443,190,940,000,000,000,000,000,000,000 | 12 |
[crypto/ec] for ECC parameters with NULL or zero cofactor, compute it
The cofactor argument to EC_GROUP_set_generator is optional, and SCA
mitigations for ECC currently use it. So the library currently falls
back to very old SCA-vulnerable code if the cofactor is not present.
This PR allows EC_GROUP_set_generator to compute the cofactor for all
curves of cryptographic interest. Steering scalar multiplication to more
SCA-robust code.
This issue affects persisted private keys in explicit parameter form,
where the (optional) cofactor field is zero or absent.
It also affects curves not built-in to the library, but constructed
programatically with explicit parameters, then calling
EC_GROUP_set_generator with a nonsensical value (NULL, zero).
The very old scalar multiplication code is known to be vulnerable to
local uarch attacks, outside of the OpenSSL threat model. New results
suggest the code path is also vulnerable to traditional wall clock
timing attacks.
CVE-2019-1547
Reviewed-by: Matt Caswell <[email protected]>
Reviewed-by: Tomas Mraz <[email protected]>
Reviewed-by: Nicola Tuveri <[email protected]>
(Merged from https://github.com/openssl/openssl/pull/9781)
|
static int sanity_check_int_value(struct snd_card *card,
const struct snd_ctl_elem_value *control,
const struct snd_ctl_elem_info *info,
int i, bool print_error)
{
long long lval, lmin, lmax, lstep;
u64 rem;
switch (info->type) {
default:
case SNDRV_CTL_ELEM_TYPE_BOOLEAN:
lval = control->value.integer.value[i];
lmin = 0;
lmax = 1;
lstep = 0;
break;
case SNDRV_CTL_ELEM_TYPE_INTEGER:
lval = control->value.integer.value[i];
lmin = info->value.integer.min;
lmax = info->value.integer.max;
lstep = info->value.integer.step;
break;
case SNDRV_CTL_ELEM_TYPE_INTEGER64:
lval = control->value.integer64.value[i];
lmin = info->value.integer64.min;
lmax = info->value.integer64.max;
lstep = info->value.integer64.step;
break;
case SNDRV_CTL_ELEM_TYPE_ENUMERATED:
lval = control->value.enumerated.item[i];
lmin = 0;
lmax = info->value.enumerated.items - 1;
lstep = 0;
break;
}
if (lval < lmin || lval > lmax) {
if (print_error)
dev_err(card->dev,
"control %i:%i:%i:%s:%i: value out of range %lld (%lld/%lld) at count %i\n",
control->id.iface, control->id.device,
control->id.subdevice, control->id.name,
control->id.index, lval, lmin, lmax, i);
return -EINVAL;
}
if (lstep) {
div64_u64_rem(lval, lstep, &rem);
if (rem) {
if (print_error)
dev_err(card->dev,
"control %i:%i:%i:%s:%i: unaligned value %lld (step %lld) at count %i\n",
control->id.iface, control->id.device,
control->id.subdevice, control->id.name,
control->id.index, lval, lstep, i);
return -EINVAL;
}
}
return 0;
}
| 0 |
[
"CWE-416",
"CWE-125"
] |
linux
|
6ab55ec0a938c7f943a4edba3d6514f775983887
| 971,413,356,956,686,700,000,000,000,000,000,000 | 60 |
ALSA: control: Fix an out-of-bounds bug in get_ctl_id_hash()
Since the user can control the arguments provided to the kernel by the
ioctl() system call, an out-of-bounds bug occurs when the 'id->name'
provided by the user does not end with '\0'.
The following log can reveal it:
[ 10.002313] BUG: KASAN: stack-out-of-bounds in snd_ctl_find_id+0x36c/0x3a0
[ 10.002895] Read of size 1 at addr ffff888109f5fe28 by task snd/439
[ 10.004934] Call Trace:
[ 10.007140] snd_ctl_find_id+0x36c/0x3a0
[ 10.007489] snd_ctl_ioctl+0x6cf/0x10e0
Fix this by checking the bound of 'id->name' in the loop.
Fixes: c27e1efb61c5 ("ALSA: control: Use xarray for faster lookups")
Signed-off-by: Zheyu Ma <[email protected]>
Link: https://lore.kernel.org/r/[email protected]
Signed-off-by: Takashi Iwai <[email protected]>
|
int tls1_final_finish_mac(SSL *s,
const char *str, int slen, unsigned char *out)
{
unsigned int i;
EVP_MD_CTX ctx;
unsigned char buf[2*EVP_MAX_MD_SIZE];
unsigned char *q,buf2[12];
int idx;
long mask;
int err=0;
const EVP_MD *md;
q=buf;
if (s->s3->handshake_buffer)
if (!ssl3_digest_cached_records(s))
return 0;
EVP_MD_CTX_init(&ctx);
for (idx=0;ssl_get_handshake_digest(idx,&mask,&md);idx++)
{
if (mask & ssl_get_algorithm2(s))
{
int hashsize = EVP_MD_size(md);
if (hashsize < 0 || hashsize > (int)(sizeof buf - (size_t)(q-buf)))
{
/* internal error: 'buf' is too small for this cipersuite! */
err = 1;
}
else
{
EVP_MD_CTX_copy_ex(&ctx,s->s3->handshake_dgst[idx]);
EVP_DigestFinal_ex(&ctx,q,&i);
if (i != (unsigned int)hashsize) /* can't really happen */
err = 1;
q+=i;
}
}
}
if (!tls1_PRF(ssl_get_algorithm2(s),
str,slen, buf,(int)(q-buf), NULL,0, NULL,0, NULL,0,
s->session->master_key,s->session->master_key_length,
out,buf2,sizeof buf2))
err = 1;
EVP_MD_CTX_cleanup(&ctx);
if (err)
return 0;
else
return sizeof buf2;
}
| 1 |
[] |
openssl
|
0294b2be5f4c11e60620c0018674ff0e17b14238
| 26,070,744,355,786,520,000,000,000,000,000,000,000 | 53 |
Check EVP errors for handshake digests.
Partial mitigation of PR#3200
|
static int pdo_stmt_do_next_rowset(pdo_stmt_t *stmt TSRMLS_DC)
{
/* un-describe */
if (stmt->columns) {
int i;
struct pdo_column_data *cols = stmt->columns;
for (i = 0; i < stmt->column_count; i++) {
efree(cols[i].name);
}
efree(stmt->columns);
stmt->columns = NULL;
stmt->column_count = 0;
}
if (!stmt->methods->next_rowset(stmt TSRMLS_CC)) {
/* Set the executed flag to 0 to reallocate columns on next execute */
stmt->executed = 0;
return 0;
}
pdo_stmt_describe_columns(stmt TSRMLS_CC);
return 1;
| 0 |
[
"CWE-476"
] |
php-src
|
6045de69c7dedcba3eadf7c4bba424b19c81d00d
| 163,760,235,339,055,830,000,000,000,000,000,000,000 | 25 |
Fix bug #73331 - do not try to serialize/unserialize objects wddx can not handle
Proper soltion would be to call serialize/unserialize and deal with the result,
but this requires more work that should be done by wddx maintainer (not me).
|
static void perf_swevent_start_hrtimer(struct perf_event *event)
{
struct hw_perf_event *hwc = &event->hw;
s64 period;
if (!is_sampling_event(event))
return;
period = local64_read(&hwc->period_left);
if (period) {
if (period < 0)
period = 10000;
local64_set(&hwc->period_left, 0);
} else {
period = max_t(u64, 10000, hwc->sample_period);
}
__hrtimer_start_range_ns(&hwc->hrtimer,
ns_to_ktime(period), 0,
HRTIMER_MODE_REL_PINNED, 0);
}
| 0 |
[
"CWE-703",
"CWE-189"
] |
linux
|
8176cced706b5e5d15887584150764894e94e02f
| 43,058,227,905,137,440,000,000,000,000,000,000,000 | 21 |
perf: Treat attr.config as u64 in perf_swevent_init()
Trinity discovered that we fail to check all 64 bits of
attr.config passed by user space, resulting to out-of-bounds
access of the perf_swevent_enabled array in
sw_perf_event_destroy().
Introduced in commit b0a873ebb ("perf: Register PMU
implementations").
Signed-off-by: Tommi Rantala <[email protected]>
Cc: Peter Zijlstra <[email protected]>
Cc: [email protected]
Cc: Paul Mackerras <[email protected]>
Cc: Arnaldo Carvalho de Melo <[email protected]>
Link: http://lkml.kernel.org/r/[email protected]
Signed-off-by: Ingo Molnar <[email protected]>
|
void ldbLogSourceLine(int lnum) {
char *line = ldbGetSourceLine(lnum);
char *prefix;
int bp = ldbIsBreakpoint(lnum);
int current = ldb.currentline == lnum;
if (current && bp)
prefix = "->#";
else if (current)
prefix = "-> ";
else if (bp)
prefix = " #";
else
prefix = " ";
sds thisline = sdscatprintf(sdsempty(),"%s%-3d %s", prefix, lnum, line);
ldbLog(thisline);
}
| 0 |
[
"CWE-703",
"CWE-125"
] |
redis
|
6ac3c0b7abd35f37201ed2d6298ecef4ea1ae1dd
| 124,902,395,199,830,240,000,000,000,000,000,000,000 | 17 |
Fix protocol parsing on 'ldbReplParseCommand' (CVE-2021-32672)
The protocol parsing on 'ldbReplParseCommand' (LUA debugging)
Assumed protocol correctness. This means that if the following
is given:
*1
$100
test
The parser will try to read additional 94 unallocated bytes after
the client buffer.
This commit fixes this issue by validating that there are actually enough
bytes to read. It also limits the amount of data that can be sent by
the debugger client to 1M so the client will not be able to explode
the memory.
|
bool JBIG2Stream::readSymbolDictSeg(unsigned int segNum, unsigned int length, unsigned int *refSegs, unsigned int nRefSegs)
{
std::unique_ptr<JBIG2SymbolDict> symbolDict;
const JBIG2HuffmanTable *huffDHTable, *huffDWTable;
const JBIG2HuffmanTable *huffBMSizeTable, *huffAggInstTable;
JBIG2Segment *seg;
std::vector<JBIG2Segment *> codeTables;
JBIG2SymbolDict *inputSymbolDict;
unsigned int flags, sdTemplate, sdrTemplate, huff, refAgg;
unsigned int huffDH, huffDW, huffBMSize, huffAggInst;
unsigned int contextUsed, contextRetained;
int sdATX[4], sdATY[4], sdrATX[2], sdrATY[2];
unsigned int numExSyms, numNewSyms, numInputSyms, symCodeLen;
JBIG2Bitmap **bitmaps;
JBIG2Bitmap *collBitmap, *refBitmap;
unsigned int *symWidths;
unsigned int symHeight, symWidth, totalWidth, x, symID;
int dh = 0, dw, refAggNum, refDX = 0, refDY = 0, bmSize;
bool ex;
int run, cnt, c;
unsigned int i, j, k;
unsigned char *p;
symWidths = nullptr;
// symbol dictionary flags
if (!readUWord(&flags)) {
goto eofError;
}
sdTemplate = (flags >> 10) & 3;
sdrTemplate = (flags >> 12) & 1;
huff = flags & 1;
refAgg = (flags >> 1) & 1;
huffDH = (flags >> 2) & 3;
huffDW = (flags >> 4) & 3;
huffBMSize = (flags >> 6) & 1;
huffAggInst = (flags >> 7) & 1;
contextUsed = (flags >> 8) & 1;
contextRetained = (flags >> 9) & 1;
// symbol dictionary AT flags
if (!huff) {
if (sdTemplate == 0) {
if (!readByte(&sdATX[0]) || !readByte(&sdATY[0]) || !readByte(&sdATX[1]) || !readByte(&sdATY[1]) || !readByte(&sdATX[2]) || !readByte(&sdATY[2]) || !readByte(&sdATX[3]) || !readByte(&sdATY[3])) {
goto eofError;
}
} else {
if (!readByte(&sdATX[0]) || !readByte(&sdATY[0])) {
goto eofError;
}
}
}
// symbol dictionary refinement AT flags
if (refAgg && !sdrTemplate) {
if (!readByte(&sdrATX[0]) || !readByte(&sdrATY[0]) || !readByte(&sdrATX[1]) || !readByte(&sdrATY[1])) {
goto eofError;
}
}
// SDNUMEXSYMS and SDNUMNEWSYMS
if (!readULong(&numExSyms) || !readULong(&numNewSyms)) {
goto eofError;
}
// get referenced segments: input symbol dictionaries and code tables
numInputSyms = 0;
for (i = 0; i < nRefSegs; ++i) {
// This is need by bug 12014, returning false makes it not crash
// but we end up with a empty page while acroread is able to render
// part of it
if ((seg = findSegment(refSegs[i]))) {
if (seg->getType() == jbig2SegSymbolDict) {
j = ((JBIG2SymbolDict *)seg)->getSize();
if (numInputSyms > UINT_MAX - j) {
error(errSyntaxError, curStr->getPos(), "Too many input symbols in JBIG2 symbol dictionary");
goto eofError;
}
numInputSyms += j;
} else if (seg->getType() == jbig2SegCodeTable) {
codeTables.push_back(seg);
}
} else {
return false;
}
}
if (numInputSyms > UINT_MAX - numNewSyms) {
error(errSyntaxError, curStr->getPos(), "Too many input symbols in JBIG2 symbol dictionary");
goto eofError;
}
// compute symbol code length, per 6.5.8.2.3
// symCodeLen = ceil( log2( numInputSyms + numNewSyms ) )
i = numInputSyms + numNewSyms;
if (i <= 1) {
symCodeLen = huff ? 1 : 0;
} else {
--i;
symCodeLen = 0;
// i = floor((numSyms-1) / 2^symCodeLen)
while (i > 0) {
++symCodeLen;
i >>= 1;
}
}
// get the input symbol bitmaps
bitmaps = (JBIG2Bitmap **)gmallocn_checkoverflow(numInputSyms + numNewSyms, sizeof(JBIG2Bitmap *));
if (!bitmaps && (numInputSyms + numNewSyms > 0)) {
error(errSyntaxError, curStr->getPos(), "Too many input symbols in JBIG2 symbol dictionary");
goto eofError;
}
for (i = 0; i < numInputSyms + numNewSyms; ++i) {
bitmaps[i] = nullptr;
}
k = 0;
inputSymbolDict = nullptr;
for (i = 0; i < nRefSegs; ++i) {
seg = findSegment(refSegs[i]);
if (seg != nullptr && seg->getType() == jbig2SegSymbolDict) {
inputSymbolDict = (JBIG2SymbolDict *)seg;
for (j = 0; j < inputSymbolDict->getSize(); ++j) {
bitmaps[k++] = inputSymbolDict->getBitmap(j);
}
}
}
// get the Huffman tables
huffDHTable = huffDWTable = nullptr; // make gcc happy
huffBMSizeTable = huffAggInstTable = nullptr; // make gcc happy
i = 0;
if (huff) {
if (huffDH == 0) {
huffDHTable = huffTableD;
} else if (huffDH == 1) {
huffDHTable = huffTableE;
} else {
if (i >= codeTables.size()) {
goto codeTableError;
}
huffDHTable = ((JBIG2CodeTable *)codeTables[i++])->getHuffTable();
}
if (huffDW == 0) {
huffDWTable = huffTableB;
} else if (huffDW == 1) {
huffDWTable = huffTableC;
} else {
if (i >= codeTables.size()) {
goto codeTableError;
}
huffDWTable = ((JBIG2CodeTable *)codeTables[i++])->getHuffTable();
}
if (huffBMSize == 0) {
huffBMSizeTable = huffTableA;
} else {
if (i >= codeTables.size()) {
goto codeTableError;
}
huffBMSizeTable = ((JBIG2CodeTable *)codeTables[i++])->getHuffTable();
}
if (huffAggInst == 0) {
huffAggInstTable = huffTableA;
} else {
if (i >= codeTables.size()) {
goto codeTableError;
}
huffAggInstTable = ((JBIG2CodeTable *)codeTables[i++])->getHuffTable();
}
}
// set up the Huffman decoder
if (huff) {
huffDecoder->reset();
// set up the arithmetic decoder
} else {
if (contextUsed && inputSymbolDict) {
resetGenericStats(sdTemplate, inputSymbolDict->getGenericRegionStats());
} else {
resetGenericStats(sdTemplate, nullptr);
}
if (!resetIntStats(symCodeLen)) {
goto syntaxError;
}
arithDecoder->start();
}
// set up the arithmetic decoder for refinement/aggregation
if (refAgg) {
if (contextUsed && inputSymbolDict) {
resetRefinementStats(sdrTemplate, inputSymbolDict->getRefinementRegionStats());
} else {
resetRefinementStats(sdrTemplate, nullptr);
}
}
// allocate symbol widths storage
if (huff && !refAgg) {
symWidths = (unsigned int *)gmallocn_checkoverflow(numNewSyms, sizeof(unsigned int));
if (numNewSyms > 0 && !symWidths) {
goto syntaxError;
}
}
symHeight = 0;
i = 0;
while (i < numNewSyms) {
// read the height class delta height
if (huff) {
huffDecoder->decodeInt(&dh, huffDHTable);
} else {
arithDecoder->decodeInt(&dh, iadhStats);
}
if (dh < 0 && (unsigned int)-dh >= symHeight) {
error(errSyntaxError, curStr->getPos(), "Bad delta-height value in JBIG2 symbol dictionary");
goto syntaxError;
}
symHeight += dh;
if (unlikely(symHeight > 0x40000000)) {
error(errSyntaxError, curStr->getPos(), "Bad height value in JBIG2 symbol dictionary");
goto syntaxError;
}
symWidth = 0;
totalWidth = 0;
j = i;
// read the symbols in this height class
while (true) {
// read the delta width
if (huff) {
if (!huffDecoder->decodeInt(&dw, huffDWTable)) {
break;
}
} else {
if (!arithDecoder->decodeInt(&dw, iadwStats)) {
break;
}
}
if (dw < 0 && (unsigned int)-dw >= symWidth) {
error(errSyntaxError, curStr->getPos(), "Bad delta-height value in JBIG2 symbol dictionary");
goto syntaxError;
}
symWidth += dw;
if (i >= numNewSyms) {
error(errSyntaxError, curStr->getPos(), "Too many symbols in JBIG2 symbol dictionary");
goto syntaxError;
}
// using a collective bitmap, so don't read a bitmap here
if (huff && !refAgg) {
symWidths[i] = symWidth;
totalWidth += symWidth;
// refinement/aggregate coding
} else if (refAgg) {
if (huff) {
if (!huffDecoder->decodeInt(&refAggNum, huffAggInstTable)) {
break;
}
} else {
if (!arithDecoder->decodeInt(&refAggNum, iaaiStats)) {
break;
}
}
//~ This special case was added about a year before the final draft
//~ of the JBIG2 spec was released. I have encountered some old
//~ JBIG2 images that predate it.
//~ if (0) {
if (refAggNum == 1) {
if (huff) {
symID = huffDecoder->readBits(symCodeLen);
huffDecoder->decodeInt(&refDX, huffTableO);
huffDecoder->decodeInt(&refDY, huffTableO);
huffDecoder->decodeInt(&bmSize, huffTableA);
huffDecoder->reset();
arithDecoder->start();
} else {
if (iaidStats == nullptr) {
goto syntaxError;
}
symID = arithDecoder->decodeIAID(symCodeLen, iaidStats);
arithDecoder->decodeInt(&refDX, iardxStats);
arithDecoder->decodeInt(&refDY, iardyStats);
}
if (symID >= numInputSyms + i) {
error(errSyntaxError, curStr->getPos(), "Invalid symbol ID in JBIG2 symbol dictionary");
goto syntaxError;
}
refBitmap = bitmaps[symID];
if (unlikely(refBitmap == nullptr)) {
error(errSyntaxError, curStr->getPos(), "Invalid ref bitmap for symbol ID {0:ud} in JBIG2 symbol dictionary", symID);
goto syntaxError;
}
bitmaps[numInputSyms + i] = readGenericRefinementRegion(symWidth, symHeight, sdrTemplate, false, refBitmap, refDX, refDY, sdrATX, sdrATY).release();
//~ do we need to use the bmSize value here (in Huffman mode)?
} else {
bitmaps[numInputSyms + i] = readTextRegion(huff, true, symWidth, symHeight, refAggNum, 0, numInputSyms + i, nullptr, symCodeLen, bitmaps, 0, 0, 0, 1, 0, huffTableF, huffTableH, huffTableK, huffTableO, huffTableO,
huffTableO, huffTableO, huffTableA, sdrTemplate, sdrATX, sdrATY)
.release();
if (unlikely(!bitmaps[numInputSyms + i])) {
error(errSyntaxError, curStr->getPos(), "NULL bitmap in readTextRegion");
goto syntaxError;
}
}
// non-ref/agg coding
} else {
bitmaps[numInputSyms + i] = readGenericBitmap(false, symWidth, symHeight, sdTemplate, false, false, nullptr, sdATX, sdATY, 0).release();
if (unlikely(!bitmaps[numInputSyms + i])) {
error(errSyntaxError, curStr->getPos(), "NULL bitmap in readGenericBitmap");
goto syntaxError;
}
}
++i;
}
// read the collective bitmap
if (huff && !refAgg) {
huffDecoder->decodeInt(&bmSize, huffBMSizeTable);
huffDecoder->reset();
if (bmSize == 0) {
collBitmap = new JBIG2Bitmap(0, totalWidth, symHeight);
bmSize = symHeight * ((totalWidth + 7) >> 3);
p = collBitmap->getDataPtr();
if (unlikely(p == nullptr)) {
delete collBitmap;
goto syntaxError;
}
for (k = 0; k < (unsigned int)bmSize; ++k) {
if ((c = curStr->getChar()) == EOF) {
memset(p, 0, bmSize - k);
break;
}
*p++ = (unsigned char)c;
}
} else {
collBitmap = readGenericBitmap(true, totalWidth, symHeight, 0, false, false, nullptr, nullptr, nullptr, bmSize).release();
}
if (likely(collBitmap != nullptr)) {
x = 0;
for (; j < i; ++j) {
bitmaps[numInputSyms + j] = collBitmap->getSlice(x, 0, symWidths[j], symHeight);
x += symWidths[j];
}
delete collBitmap;
} else {
error(errSyntaxError, curStr->getPos(), "collBitmap was null");
goto syntaxError;
}
}
}
// create the symbol dict object
symbolDict = std::make_unique<JBIG2SymbolDict>(segNum, numExSyms);
if (!symbolDict->isOk()) {
goto syntaxError;
}
// exported symbol list
i = j = 0;
ex = false;
run = 0; // initialize it once in case the first decodeInt fails
// we do not want to use uninitialized memory
while (i < numInputSyms + numNewSyms) {
if (huff) {
huffDecoder->decodeInt(&run, huffTableA);
} else {
arithDecoder->decodeInt(&run, iaexStats);
}
if (i + run > numInputSyms + numNewSyms || (ex && j + run > numExSyms)) {
error(errSyntaxError, curStr->getPos(), "Too many exported symbols in JBIG2 symbol dictionary");
for (; j < numExSyms; ++j) {
symbolDict->setBitmap(j, nullptr);
}
goto syntaxError;
}
if (ex) {
for (cnt = 0; cnt < run; ++cnt) {
symbolDict->setBitmap(j++, new JBIG2Bitmap(bitmaps[i++]));
}
} else {
i += run;
}
ex = !ex;
}
if (j != numExSyms) {
error(errSyntaxError, curStr->getPos(), "Too few symbols in JBIG2 symbol dictionary");
for (; j < numExSyms; ++j) {
symbolDict->setBitmap(j, nullptr);
}
goto syntaxError;
}
for (i = 0; i < numNewSyms; ++i) {
delete bitmaps[numInputSyms + i];
}
gfree(bitmaps);
if (symWidths) {
gfree(symWidths);
}
// save the arithmetic decoder stats
if (!huff && contextRetained) {
symbolDict->setGenericRegionStats(genericRegionStats->copy());
if (refAgg) {
symbolDict->setRefinementRegionStats(refinementRegionStats->copy());
}
}
// store the new symbol dict
segments.push_back(std::move(symbolDict));
return true;
codeTableError:
error(errSyntaxError, curStr->getPos(), "Missing code table in JBIG2 symbol dictionary");
syntaxError:
for (i = 0; i < numNewSyms; ++i) {
if (bitmaps[numInputSyms + i]) {
delete bitmaps[numInputSyms + i];
}
}
gfree(bitmaps);
if (symWidths) {
gfree(symWidths);
}
return false;
eofError:
error(errSyntaxError, curStr->getPos(), "Unexpected EOF in JBIG2 stream");
return false;
}
| 0 |
[
"CWE-476",
"CWE-190"
] |
poppler
|
27354e9d9696ee2bc063910a6c9a6b27c5184a52
| 68,362,592,719,100,150,000,000,000,000,000,000,000 | 436 |
JBIG2Stream: Fix crash on broken file
https://github.com/jeffssh/CVE-2021-30860
Thanks to David Warren for the heads up
|
static int shmem_getpage(struct inode *inode, unsigned long idx,
struct page **pagep, enum sgp_type sgp, int *type)
{
struct address_space *mapping = inode->i_mapping;
struct shmem_inode_info *info = SHMEM_I(inode);
struct shmem_sb_info *sbinfo;
struct page *filepage = *pagep;
struct page *swappage;
swp_entry_t *entry;
swp_entry_t swap;
int error;
if (idx >= SHMEM_MAX_INDEX)
return -EFBIG;
if (type)
*type = 0;
/*
* Normally, filepage is NULL on entry, and either found
* uptodate immediately, or allocated and zeroed, or read
* in under swappage, which is then assigned to filepage.
* But shmem_readpage and shmem_write_begin pass in a locked
* filepage, which may be found not uptodate by other callers
* too, and may need to be copied from the swappage read in.
*/
repeat:
if (!filepage)
filepage = find_lock_page(mapping, idx);
if (filepage && PageUptodate(filepage))
goto done;
error = 0;
if (sgp == SGP_QUICK)
goto failed;
spin_lock(&info->lock);
shmem_recalc_inode(inode);
entry = shmem_swp_alloc(info, idx, sgp);
if (IS_ERR(entry)) {
spin_unlock(&info->lock);
error = PTR_ERR(entry);
goto failed;
}
swap = *entry;
if (swap.val) {
/* Look it up and read it in.. */
swappage = lookup_swap_cache(swap);
if (!swappage) {
shmem_swp_unmap(entry);
/* here we actually do the io */
if (type && !(*type & VM_FAULT_MAJOR)) {
__count_vm_event(PGMAJFAULT);
*type |= VM_FAULT_MAJOR;
}
spin_unlock(&info->lock);
swappage = shmem_swapin(info, swap, idx);
if (!swappage) {
spin_lock(&info->lock);
entry = shmem_swp_alloc(info, idx, sgp);
if (IS_ERR(entry))
error = PTR_ERR(entry);
else {
if (entry->val == swap.val)
error = -ENOMEM;
shmem_swp_unmap(entry);
}
spin_unlock(&info->lock);
if (error)
goto failed;
goto repeat;
}
wait_on_page_locked(swappage);
page_cache_release(swappage);
goto repeat;
}
/* We have to do this with page locked to prevent races */
if (TestSetPageLocked(swappage)) {
shmem_swp_unmap(entry);
spin_unlock(&info->lock);
wait_on_page_locked(swappage);
page_cache_release(swappage);
goto repeat;
}
if (PageWriteback(swappage)) {
shmem_swp_unmap(entry);
spin_unlock(&info->lock);
wait_on_page_writeback(swappage);
unlock_page(swappage);
page_cache_release(swappage);
goto repeat;
}
if (!PageUptodate(swappage)) {
shmem_swp_unmap(entry);
spin_unlock(&info->lock);
unlock_page(swappage);
page_cache_release(swappage);
error = -EIO;
goto failed;
}
if (filepage) {
shmem_swp_set(info, entry, 0);
shmem_swp_unmap(entry);
delete_from_swap_cache(swappage);
spin_unlock(&info->lock);
copy_highpage(filepage, swappage);
unlock_page(swappage);
page_cache_release(swappage);
flush_dcache_page(filepage);
SetPageUptodate(filepage);
set_page_dirty(filepage);
swap_free(swap);
} else if (!(error = move_from_swap_cache(
swappage, idx, mapping))) {
info->flags |= SHMEM_PAGEIN;
shmem_swp_set(info, entry, 0);
shmem_swp_unmap(entry);
spin_unlock(&info->lock);
filepage = swappage;
swap_free(swap);
} else {
shmem_swp_unmap(entry);
spin_unlock(&info->lock);
unlock_page(swappage);
page_cache_release(swappage);
if (error == -ENOMEM) {
/* let kswapd refresh zone for GFP_ATOMICs */
congestion_wait(WRITE, HZ/50);
}
goto repeat;
}
} else if (sgp == SGP_READ && !filepage) {
shmem_swp_unmap(entry);
filepage = find_get_page(mapping, idx);
if (filepage &&
(!PageUptodate(filepage) || TestSetPageLocked(filepage))) {
spin_unlock(&info->lock);
wait_on_page_locked(filepage);
page_cache_release(filepage);
filepage = NULL;
goto repeat;
}
spin_unlock(&info->lock);
} else {
shmem_swp_unmap(entry);
sbinfo = SHMEM_SB(inode->i_sb);
if (sbinfo->max_blocks) {
spin_lock(&sbinfo->stat_lock);
if (sbinfo->free_blocks == 0 ||
shmem_acct_block(info->flags)) {
spin_unlock(&sbinfo->stat_lock);
spin_unlock(&info->lock);
error = -ENOSPC;
goto failed;
}
sbinfo->free_blocks--;
inode->i_blocks += BLOCKS_PER_PAGE;
spin_unlock(&sbinfo->stat_lock);
} else if (shmem_acct_block(info->flags)) {
spin_unlock(&info->lock);
error = -ENOSPC;
goto failed;
}
if (!filepage) {
spin_unlock(&info->lock);
filepage = shmem_alloc_page(mapping_gfp_mask(mapping),
info,
idx);
if (!filepage) {
shmem_unacct_blocks(info->flags, 1);
shmem_free_blocks(inode, 1);
error = -ENOMEM;
goto failed;
}
spin_lock(&info->lock);
entry = shmem_swp_alloc(info, idx, sgp);
if (IS_ERR(entry))
error = PTR_ERR(entry);
else {
swap = *entry;
shmem_swp_unmap(entry);
}
if (error || swap.val || 0 != add_to_page_cache_lru(
filepage, mapping, idx, GFP_ATOMIC)) {
spin_unlock(&info->lock);
page_cache_release(filepage);
shmem_unacct_blocks(info->flags, 1);
shmem_free_blocks(inode, 1);
filepage = NULL;
if (error)
goto failed;
goto repeat;
}
info->flags |= SHMEM_PAGEIN;
}
info->alloced++;
spin_unlock(&info->lock);
flush_dcache_page(filepage);
SetPageUptodate(filepage);
}
done:
if (*pagep != filepage) {
*pagep = filepage;
if (sgp != SGP_FAULT)
unlock_page(filepage);
}
return 0;
failed:
if (*pagep != filepage) {
unlock_page(filepage);
page_cache_release(filepage);
}
return error;
}
| 1 |
[
"CWE-200"
] |
linux-2.6
|
e84e2e132c9c66d8498e7710d4ea532d1feaaac5
| 337,224,798,820,921,130,000,000,000,000,000,000,000 | 221 |
tmpfs: restore missing clear_highpage
tmpfs was misconverted to __GFP_ZERO in 2.6.11. There's an unusual case in
which shmem_getpage receives the page from its caller instead of allocating.
We must cover this case by clear_highpage before SetPageUptodate, as before.
Signed-off-by: Hugh Dickins <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
|
static void test_bug42373()
{
int rc;
MYSQL con;
MYSQL_STMT *stmt;
DBUG_ENTER("test_bug42373");
myheader("test_42373");
rc= mysql_query(mysql, "DROP PROCEDURE IF EXISTS p1");
myquery(rc);
rc= mysql_query(mysql, "CREATE PROCEDURE p1()"
" BEGIN"
" SELECT 1;"
" INSERT INTO t1 VALUES (2);"
"END;");
myquery(rc);
rc= mysql_query(mysql, "DROP TABLE IF EXISTS t1");
myquery(rc);
rc= mysql_query(mysql, "CREATE TABLE t1 (a INT)");
myquery(rc);
/* Try with a stored procedure. */
DIE_UNLESS(mysql_client_init(&con));
mysql_options(&con, MYSQL_INIT_COMMAND, "CALL p1()");
DIE_UNLESS(mysql_real_connect(&con, opt_host, opt_user, opt_password,
current_db, opt_port, opt_unix_socket,
CLIENT_MULTI_STATEMENTS|CLIENT_MULTI_RESULTS));
stmt= mysql_simple_prepare(&con, "SELECT a FROM t1");
check_stmt(stmt);
rc= mysql_stmt_execute(stmt);
check_execute(stmt, rc);
rc= my_process_stmt_result(stmt);
DIE_UNLESS(rc == 1);
mysql_stmt_close(stmt);
/* Now try with a multi-statement. */
DIE_UNLESS(mysql_client_init(&con));
mysql_options(&con, MYSQL_INIT_COMMAND,
"SELECT 3; INSERT INTO t1 VALUES (4)");
DIE_UNLESS(mysql_real_connect(&con, opt_host, opt_user, opt_password,
current_db, opt_port, opt_unix_socket,
CLIENT_MULTI_STATEMENTS|CLIENT_MULTI_RESULTS));
stmt= mysql_simple_prepare(&con, "SELECT a FROM t1");
check_stmt(stmt);
rc= mysql_stmt_execute(stmt);
check_execute(stmt, rc);
rc= my_process_stmt_result(stmt);
DIE_UNLESS(rc == 2);
mysql_stmt_close(stmt);
mysql_close(&con);
rc= mysql_query(mysql, "DROP TABLE t1");
myquery(rc);
rc= mysql_query(mysql, "DROP PROCEDURE p1");
myquery(rc);
DBUG_VOID_RETURN;
}
| 0 |
[
"CWE-416"
] |
server
|
eef21014898d61e77890359d6546d4985d829ef6
| 247,922,637,536,102,000,000,000,000,000,000,000,000 | 75 |
MDEV-11933 Wrong usage of linked list in mysql_prune_stmt_list
mysql_prune_stmt_list() was walking the list following
element->next pointers, but inside the loop it was invoking
list_add(element) that modified element->next. So, mysql_prune_stmt_list()
failed to visit and reset all elements, and some of them were left
with pointers to invalid MYSQL.
|
static int sd_format_disk_name(char *prefix, int index, char *buf, int buflen)
{
const int base = 'z' - 'a' + 1;
char *begin = buf + strlen(prefix);
char *end = buf + buflen;
char *p;
int unit;
p = end - 1;
*p = '\0';
unit = base;
do {
if (p == begin)
return -EINVAL;
*--p = 'a' + (index % unit);
index = (index / unit) - 1;
} while (index >= 0);
memmove(begin, p, end - p);
memcpy(buf, prefix, strlen(prefix));
return 0;
}
| 0 |
[
"CWE-284",
"CWE-264"
] |
linux
|
0bfc96cb77224736dfa35c3c555d37b3646ef35e
| 323,352,674,342,519,600,000,000,000,000,000,000,000 | 23 |
block: fail SCSI passthrough ioctls on partition devices
Linux allows executing the SG_IO ioctl on a partition or LVM volume, and
will pass the command to the underlying block device. This is
well-known, but it is also a large security problem when (via Unix
permissions, ACLs, SELinux or a combination thereof) a program or user
needs to be granted access only to part of the disk.
This patch lets partitions forward a small set of harmless ioctls;
others are logged with printk so that we can see which ioctls are
actually sent. In my tests only CDROM_GET_CAPABILITY actually occurred.
Of course it was being sent to a (partition on a) hard disk, so it would
have failed with ENOTTY and the patch isn't changing anything in
practice. Still, I'm treating it specially to avoid spamming the logs.
In principle, this restriction should include programs running with
CAP_SYS_RAWIO. If for example I let a program access /dev/sda2 and
/dev/sdb, it still should not be able to read/write outside the
boundaries of /dev/sda2 independent of the capabilities. However, for
now programs with CAP_SYS_RAWIO will still be allowed to send the
ioctls. Their actions will still be logged.
This patch does not affect the non-libata IDE driver. That driver
however already tests for bd != bd->bd_contains before issuing some
ioctl; it could be restricted further to forbid these ioctls even for
programs running with CAP_SYS_ADMIN/CAP_SYS_RAWIO.
Cc: [email protected]
Cc: Jens Axboe <[email protected]>
Cc: James Bottomley <[email protected]>
Signed-off-by: Paolo Bonzini <[email protected]>
[ Make it also print the command name when warning - Linus ]
Signed-off-by: Linus Torvalds <[email protected]>
|
void PackLinuxElf32::updateLoader(OutputFile * /*fo*/)
{
unsigned start = linker->getSymbolOffset("_start");
unsigned vbase = get_te32(&elfout.phdr[C_TEXT].p_vaddr);
set_te32(&elfout.ehdr.e_entry, start + sz_pack2 + vbase);
}
| 0 |
[
"CWE-476",
"CWE-415"
] |
upx
|
90279abdfcd235172eab99651043051188938dcc
| 266,635,919,378,015,650,000,000,000,000,000,000,000 | 6 |
PackLinuxElf::canUnpack must checkEhdr() for ELF input
https://github.com/upx/upx/issues/485
modified: p_lx_elf.cpp
|
void rpm_sysfs_remove(struct device *dev)
{
sysfs_unmerge_group(&dev->kobj, &pm_runtime_attr_group);
}
| 0 |
[
"CWE-787"
] |
linux
|
aa838896d87af561a33ecefea1caa4c15a68bc47
| 149,728,805,752,409,810,000,000,000,000,000,000,000 | 4 |
drivers core: Use sysfs_emit and sysfs_emit_at for show(device *...) functions
Convert the various sprintf fmaily calls in sysfs device show functions
to sysfs_emit and sysfs_emit_at for PAGE_SIZE buffer safety.
Done with:
$ spatch -sp-file sysfs_emit_dev.cocci --in-place --max-width=80 .
And cocci script:
$ cat sysfs_emit_dev.cocci
@@
identifier d_show;
identifier dev, attr, buf;
@@
ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf)
{
<...
return
- sprintf(buf,
+ sysfs_emit(buf,
...);
...>
}
@@
identifier d_show;
identifier dev, attr, buf;
@@
ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf)
{
<...
return
- snprintf(buf, PAGE_SIZE,
+ sysfs_emit(buf,
...);
...>
}
@@
identifier d_show;
identifier dev, attr, buf;
@@
ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf)
{
<...
return
- scnprintf(buf, PAGE_SIZE,
+ sysfs_emit(buf,
...);
...>
}
@@
identifier d_show;
identifier dev, attr, buf;
expression chr;
@@
ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf)
{
<...
return
- strcpy(buf, chr);
+ sysfs_emit(buf, chr);
...>
}
@@
identifier d_show;
identifier dev, attr, buf;
identifier len;
@@
ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf)
{
<...
len =
- sprintf(buf,
+ sysfs_emit(buf,
...);
...>
return len;
}
@@
identifier d_show;
identifier dev, attr, buf;
identifier len;
@@
ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf)
{
<...
len =
- snprintf(buf, PAGE_SIZE,
+ sysfs_emit(buf,
...);
...>
return len;
}
@@
identifier d_show;
identifier dev, attr, buf;
identifier len;
@@
ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf)
{
<...
len =
- scnprintf(buf, PAGE_SIZE,
+ sysfs_emit(buf,
...);
...>
return len;
}
@@
identifier d_show;
identifier dev, attr, buf;
identifier len;
@@
ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf)
{
<...
- len += scnprintf(buf + len, PAGE_SIZE - len,
+ len += sysfs_emit_at(buf, len,
...);
...>
return len;
}
@@
identifier d_show;
identifier dev, attr, buf;
expression chr;
@@
ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf)
{
...
- strcpy(buf, chr);
- return strlen(buf);
+ return sysfs_emit(buf, chr);
}
Signed-off-by: Joe Perches <[email protected]>
Link: https://lore.kernel.org/r/3d033c33056d88bbe34d4ddb62afd05ee166ab9a.1600285923.git.joe@perches.com
Signed-off-by: Greg Kroah-Hartman <[email protected]>
|
xmlParseElementChildrenContentDeclPriv(xmlParserCtxtPtr ctxt, int inputchk,
int depth) {
xmlElementContentPtr ret = NULL, cur = NULL, last = NULL, op = NULL;
const xmlChar *elem;
xmlChar type = 0;
if (((depth > 128) && ((ctxt->options & XML_PARSE_HUGE) == 0)) ||
(depth > 2048)) {
xmlFatalErrMsgInt(ctxt, XML_ERR_ELEMCONTENT_NOT_FINISHED,
"xmlParseElementChildrenContentDecl : depth %d too deep, use XML_PARSE_HUGE\n",
depth);
return(NULL);
}
SKIP_BLANKS;
GROW;
if (RAW == '(') {
int inputid = ctxt->input->id;
/* Recurse on first child */
NEXT;
SKIP_BLANKS;
cur = ret = xmlParseElementChildrenContentDeclPriv(ctxt, inputid,
depth + 1);
SKIP_BLANKS;
GROW;
} else {
elem = xmlParseName(ctxt);
if (elem == NULL) {
xmlFatalErr(ctxt, XML_ERR_ELEMCONTENT_NOT_STARTED, NULL);
return(NULL);
}
cur = ret = xmlNewDocElementContent(ctxt->myDoc, elem, XML_ELEMENT_CONTENT_ELEMENT);
if (cur == NULL) {
xmlErrMemory(ctxt, NULL);
return(NULL);
}
GROW;
if (RAW == '?') {
cur->ocur = XML_ELEMENT_CONTENT_OPT;
NEXT;
} else if (RAW == '*') {
cur->ocur = XML_ELEMENT_CONTENT_MULT;
NEXT;
} else if (RAW == '+') {
cur->ocur = XML_ELEMENT_CONTENT_PLUS;
NEXT;
} else {
cur->ocur = XML_ELEMENT_CONTENT_ONCE;
}
GROW;
}
SKIP_BLANKS;
SHRINK;
while (RAW != ')') {
/*
* Each loop we parse one separator and one element.
*/
if (RAW == ',') {
if (type == 0) type = CUR;
/*
* Detect "Name | Name , Name" error
*/
else if (type != CUR) {
xmlFatalErrMsgInt(ctxt, XML_ERR_SEPARATOR_REQUIRED,
"xmlParseElementChildrenContentDecl : '%c' expected\n",
type);
if ((last != NULL) && (last != ret))
xmlFreeDocElementContent(ctxt->myDoc, last);
if (ret != NULL)
xmlFreeDocElementContent(ctxt->myDoc, ret);
return(NULL);
}
NEXT;
op = xmlNewDocElementContent(ctxt->myDoc, NULL, XML_ELEMENT_CONTENT_SEQ);
if (op == NULL) {
if ((last != NULL) && (last != ret))
xmlFreeDocElementContent(ctxt->myDoc, last);
xmlFreeDocElementContent(ctxt->myDoc, ret);
return(NULL);
}
if (last == NULL) {
op->c1 = ret;
if (ret != NULL)
ret->parent = op;
ret = cur = op;
} else {
cur->c2 = op;
if (op != NULL)
op->parent = cur;
op->c1 = last;
if (last != NULL)
last->parent = op;
cur =op;
last = NULL;
}
} else if (RAW == '|') {
if (type == 0) type = CUR;
/*
* Detect "Name , Name | Name" error
*/
else if (type != CUR) {
xmlFatalErrMsgInt(ctxt, XML_ERR_SEPARATOR_REQUIRED,
"xmlParseElementChildrenContentDecl : '%c' expected\n",
type);
if ((last != NULL) && (last != ret))
xmlFreeDocElementContent(ctxt->myDoc, last);
if (ret != NULL)
xmlFreeDocElementContent(ctxt->myDoc, ret);
return(NULL);
}
NEXT;
op = xmlNewDocElementContent(ctxt->myDoc, NULL, XML_ELEMENT_CONTENT_OR);
if (op == NULL) {
if ((last != NULL) && (last != ret))
xmlFreeDocElementContent(ctxt->myDoc, last);
if (ret != NULL)
xmlFreeDocElementContent(ctxt->myDoc, ret);
return(NULL);
}
if (last == NULL) {
op->c1 = ret;
if (ret != NULL)
ret->parent = op;
ret = cur = op;
} else {
cur->c2 = op;
if (op != NULL)
op->parent = cur;
op->c1 = last;
if (last != NULL)
last->parent = op;
cur =op;
last = NULL;
}
} else {
xmlFatalErr(ctxt, XML_ERR_ELEMCONTENT_NOT_FINISHED, NULL);
if ((last != NULL) && (last != ret))
xmlFreeDocElementContent(ctxt->myDoc, last);
if (ret != NULL)
xmlFreeDocElementContent(ctxt->myDoc, ret);
return(NULL);
}
GROW;
SKIP_BLANKS;
GROW;
if (RAW == '(') {
int inputid = ctxt->input->id;
/* Recurse on second child */
NEXT;
SKIP_BLANKS;
last = xmlParseElementChildrenContentDeclPriv(ctxt, inputid,
depth + 1);
SKIP_BLANKS;
} else {
elem = xmlParseName(ctxt);
if (elem == NULL) {
xmlFatalErr(ctxt, XML_ERR_ELEMCONTENT_NOT_STARTED, NULL);
if (ret != NULL)
xmlFreeDocElementContent(ctxt->myDoc, ret);
return(NULL);
}
last = xmlNewDocElementContent(ctxt->myDoc, elem, XML_ELEMENT_CONTENT_ELEMENT);
if (last == NULL) {
if (ret != NULL)
xmlFreeDocElementContent(ctxt->myDoc, ret);
return(NULL);
}
if (RAW == '?') {
last->ocur = XML_ELEMENT_CONTENT_OPT;
NEXT;
} else if (RAW == '*') {
last->ocur = XML_ELEMENT_CONTENT_MULT;
NEXT;
} else if (RAW == '+') {
last->ocur = XML_ELEMENT_CONTENT_PLUS;
NEXT;
} else {
last->ocur = XML_ELEMENT_CONTENT_ONCE;
}
}
SKIP_BLANKS;
GROW;
}
if ((cur != NULL) && (last != NULL)) {
cur->c2 = last;
if (last != NULL)
last->parent = cur;
}
if ((ctxt->validate) && (ctxt->input->id != inputchk)) {
xmlValidityError(ctxt, XML_ERR_ENTITY_BOUNDARY,
"Element content declaration doesn't start and stop in the same entity\n",
NULL, NULL);
}
NEXT;
if (RAW == '?') {
if (ret != NULL) {
if ((ret->ocur == XML_ELEMENT_CONTENT_PLUS) ||
(ret->ocur == XML_ELEMENT_CONTENT_MULT))
ret->ocur = XML_ELEMENT_CONTENT_MULT;
else
ret->ocur = XML_ELEMENT_CONTENT_OPT;
}
NEXT;
} else if (RAW == '*') {
if (ret != NULL) {
ret->ocur = XML_ELEMENT_CONTENT_MULT;
cur = ret;
/*
* Some normalization:
* (a | b* | c?)* == (a | b | c)*
*/
while ((cur != NULL) && (cur->type == XML_ELEMENT_CONTENT_OR)) {
if ((cur->c1 != NULL) &&
((cur->c1->ocur == XML_ELEMENT_CONTENT_OPT) ||
(cur->c1->ocur == XML_ELEMENT_CONTENT_MULT)))
cur->c1->ocur = XML_ELEMENT_CONTENT_ONCE;
if ((cur->c2 != NULL) &&
((cur->c2->ocur == XML_ELEMENT_CONTENT_OPT) ||
(cur->c2->ocur == XML_ELEMENT_CONTENT_MULT)))
cur->c2->ocur = XML_ELEMENT_CONTENT_ONCE;
cur = cur->c2;
}
}
NEXT;
} else if (RAW == '+') {
if (ret != NULL) {
int found = 0;
if ((ret->ocur == XML_ELEMENT_CONTENT_OPT) ||
(ret->ocur == XML_ELEMENT_CONTENT_MULT))
ret->ocur = XML_ELEMENT_CONTENT_MULT;
else
ret->ocur = XML_ELEMENT_CONTENT_PLUS;
/*
* Some normalization:
* (a | b*)+ == (a | b)*
* (a | b?)+ == (a | b)*
*/
while ((cur != NULL) && (cur->type == XML_ELEMENT_CONTENT_OR)) {
if ((cur->c1 != NULL) &&
((cur->c1->ocur == XML_ELEMENT_CONTENT_OPT) ||
(cur->c1->ocur == XML_ELEMENT_CONTENT_MULT))) {
cur->c1->ocur = XML_ELEMENT_CONTENT_ONCE;
found = 1;
}
if ((cur->c2 != NULL) &&
((cur->c2->ocur == XML_ELEMENT_CONTENT_OPT) ||
(cur->c2->ocur == XML_ELEMENT_CONTENT_MULT))) {
cur->c2->ocur = XML_ELEMENT_CONTENT_ONCE;
found = 1;
}
cur = cur->c2;
}
if (found)
ret->ocur = XML_ELEMENT_CONTENT_MULT;
}
NEXT;
}
return(ret);
}
| 0 |
[
"CWE-125"
] |
libxml2
|
77404b8b69bc122d12231807abf1a837d121b551
| 306,168,545,409,280,630,000,000,000,000,000,000,000 | 264 |
Make sure the parser returns when getting a Stop order
patch backported from chromiun bug fixes, assuming author is Chris
|
best_name (char *const *name, int const *ignore)
{
enum nametype i;
int components[3];
int components_min = INT_MAX;
size_t basename_len[3];
size_t basename_len_min = SIZE_MAX;
size_t len[3];
size_t len_min = SIZE_MAX;
for (i = OLD; i <= INDEX; i++)
if (name[i] && !ignore[i])
{
/* Take the names with the fewest prefix components. */
components[i] = prefix_components (name[i], false);
if (components_min < components[i])
continue;
components_min = components[i];
/* Of those, take the names with the shortest basename. */
basename_len[i] = base_len (name[i]);
if (basename_len_min < basename_len[i])
continue;
basename_len_min = basename_len[i];
/* Of those, take the shortest names. */
len[i] = strlen (name[i]);
if (len_min < len[i])
continue;
len_min = len[i];
}
/* Of those, take the first name. */
for (i = OLD; i <= INDEX; i++)
if (name[i] && !ignore[i]
&& name_is_valid (name[i])
&& components[i] == components_min
&& basename_len[i] == basename_len_min
&& len[i] == len_min)
break;
return i;
}
| 0 |
[
"CWE-59"
] |
patch
|
44a987e02f04b9d81a0db4a611145cad1093a2d3
| 335,161,332,480,336,430,000,000,000,000,000,000,000 | 43 |
Add line number overflow checking
* bootstrap.conf: use intprops module.
* src/common.h: Define LINENUM_MIN and LINENUM_MAX macros.
* src/pch.c (another_hunk): Add line number overflow checking. Based on Robert
C. Seacord's INT32-C document for integer overflow checking and Tobias
Stoeckmann's "integer overflows and oob memory access" patch for FreeBSD.
|
bool CZNC::UpdateModule(const CString& sModule) {
CModule* pModule;
map<CUser*, CString> musLoaded;
map<CIRCNetwork*, CString> mnsLoaded;
// Unload the module for every user and network
for (const auto& it : m_msUsers) {
CUser* pUser = it.second;
pModule = pUser->GetModules().FindModule(sModule);
if (pModule) {
musLoaded[pUser] = pModule->GetArgs();
pUser->GetModules().UnloadModule(sModule);
}
// See if the user has this module loaded to a network
vector<CIRCNetwork*> vNetworks = pUser->GetNetworks();
for (CIRCNetwork* pNetwork : vNetworks) {
pModule = pNetwork->GetModules().FindModule(sModule);
if (pModule) {
mnsLoaded[pNetwork] = pModule->GetArgs();
pNetwork->GetModules().UnloadModule(sModule);
}
}
}
// Unload the global module
bool bGlobal = false;
CString sGlobalArgs;
pModule = GetModules().FindModule(sModule);
if (pModule) {
bGlobal = true;
sGlobalArgs = pModule->GetArgs();
GetModules().UnloadModule(sModule);
}
// Lets reload everything
bool bError = false;
CString sErr;
// Reload the global module
if (bGlobal) {
if (!GetModules().LoadModule(sModule, sGlobalArgs,
CModInfo::GlobalModule, nullptr, nullptr,
sErr)) {
DEBUG("Failed to reload [" << sModule << "] globally [" << sErr
<< "]");
bError = true;
}
}
// Reload the module for all users
for (const auto& it : musLoaded) {
CUser* pUser = it.first;
const CString& sArgs = it.second;
if (!pUser->GetModules().LoadModule(
sModule, sArgs, CModInfo::UserModule, pUser, nullptr, sErr)) {
DEBUG("Failed to reload [" << sModule << "] for ["
<< pUser->GetUserName() << "] [" << sErr
<< "]");
bError = true;
}
}
// Reload the module for all networks
for (const auto& it : mnsLoaded) {
CIRCNetwork* pNetwork = it.first;
const CString& sArgs = it.second;
if (!pNetwork->GetModules().LoadModule(
sModule, sArgs, CModInfo::NetworkModule, pNetwork->GetUser(),
pNetwork, sErr)) {
DEBUG("Failed to reload ["
<< sModule << "] for [" << pNetwork->GetUser()->GetUserName()
<< "/" << pNetwork->GetName() << "] [" << sErr << "]");
bError = true;
}
}
return !bError;
}
| 0 |
[
"CWE-20"
] |
znc
|
64613bc8b6b4adf1e32231f9844d99cd512b8973
| 8,337,625,731,466,455,000,000,000,000,000,000,000 | 84 |
Don't crash if user specified invalid encoding.
This is CVE-2019-9917
|
Mat_WriteCompressedEmptyVariable5(mat_t *mat,const char *name,int rank,
size_t *dims,z_streamp z)
{
mat_uint32_t array_flags;
int array_flags_type = MAT_T_UINT32, dims_array_type = MAT_T_INT32;
int array_flags_size = 8;
int i, err;
size_t nBytes, empty_matrix_max_buf_size;
mat_uint32_t comp_buf[512];
mat_uint32_t uncomp_buf[512] = {0,};
int buf_size = 512;
size_t byteswritten = 0, buf_size_bytes;
if ( NULL == mat || NULL == z)
return byteswritten;
buf_size_bytes = buf_size*sizeof(*comp_buf);
/* Array Flags */
array_flags = MAT_C_DOUBLE;
uncomp_buf[0] = MAT_T_MATRIX;
err = GetEmptyMatrixMaxBufSize(name, rank, &empty_matrix_max_buf_size);
if (err || empty_matrix_max_buf_size > UINT32_MAX)
return byteswritten;
uncomp_buf[1] = empty_matrix_max_buf_size;
z->next_in = ZLIB_BYTE_PTR(uncomp_buf);
z->avail_in = 8;
do {
z->next_out = ZLIB_BYTE_PTR(comp_buf);
z->avail_out = buf_size_bytes;
deflate(z,Z_NO_FLUSH);
byteswritten += fwrite(comp_buf,1,buf_size_bytes-z->avail_out,(FILE*)mat->fp);
} while ( z->avail_out == 0 );
uncomp_buf[0] = array_flags_type;
uncomp_buf[1] = array_flags_size;
uncomp_buf[2] = array_flags;
uncomp_buf[3] = 0;
/* Rank and Dimension */
nBytes = rank * 4;
uncomp_buf[4] = dims_array_type;
uncomp_buf[5] = nBytes;
for ( i = 0; i < rank; i++ ) {
mat_int32_t dim;
dim = dims[i];
uncomp_buf[6+i] = dim;
}
if ( rank % 2 != 0 ) {
int pad4 = 0;
uncomp_buf[6+i] = pad4;
i++;
}
z->next_in = ZLIB_BYTE_PTR(uncomp_buf);
z->avail_in = (6+i)*sizeof(*uncomp_buf);
do {
z->next_out = ZLIB_BYTE_PTR(comp_buf);
z->avail_out = buf_size_bytes;
deflate(z,Z_NO_FLUSH);
byteswritten += fwrite(comp_buf,1,buf_size_bytes-z->avail_out,(FILE*)mat->fp);
} while ( z->avail_out == 0 );
/* Name of variable */
if ( NULL == name ) {
mat_int16_t array_name_type = MAT_T_INT8;
uncomp_buf[0] = array_name_type;
uncomp_buf[1] = 0;
z->next_in = ZLIB_BYTE_PTR(uncomp_buf);
z->avail_in = 8;
do {
z->next_out = ZLIB_BYTE_PTR(comp_buf);
z->avail_out = buf_size_bytes;
deflate(z,Z_NO_FLUSH);
byteswritten += fwrite(comp_buf,1,buf_size_bytes-z->avail_out,(FILE*)mat->fp);
} while ( z->avail_out == 0 );
} else if ( strlen(name) <= 4 ) {
mat_int16_t array_name_len = (mat_int16_t)strlen(name);
mat_int16_t array_name_type = MAT_T_INT8;
memset(uncomp_buf,0,8);
uncomp_buf[0] = (array_name_len << 16) | array_name_type;
memcpy(uncomp_buf+1,name,array_name_len);
if ( array_name_len % 4 )
array_name_len += 4-(array_name_len % 4);
z->next_in = ZLIB_BYTE_PTR(uncomp_buf);
z->avail_in = 8;
do {
z->next_out = ZLIB_BYTE_PTR(comp_buf);
z->avail_out = buf_size_bytes;
deflate(z,Z_NO_FLUSH);
byteswritten += fwrite(comp_buf,1,buf_size_bytes-z->avail_out,
(FILE*)mat->fp);
} while ( z->avail_out == 0 );
} else {
mat_int32_t array_name_len = (mat_int32_t)strlen(name);
mat_int32_t array_name_type = MAT_T_INT8;
memset(uncomp_buf,0,buf_size*sizeof(*uncomp_buf));
uncomp_buf[0] = array_name_type;
uncomp_buf[1] = array_name_len;
memcpy(uncomp_buf+2,name,array_name_len);
if ( array_name_len % 8 )
array_name_len += 8-(array_name_len % 8);
z->next_in = ZLIB_BYTE_PTR(uncomp_buf);
z->avail_in = 8+array_name_len;
do {
z->next_out = ZLIB_BYTE_PTR(comp_buf);
z->avail_out = buf_size_bytes;
deflate(z,Z_NO_FLUSH);
byteswritten += fwrite(comp_buf,1,buf_size_bytes-z->avail_out,
(FILE*)mat->fp);
} while ( z->avail_out == 0 );
}
byteswritten += WriteCompressedData(mat,z,NULL,0,MAT_T_DOUBLE);
return byteswritten;
}
| 0 |
[
"CWE-190",
"CWE-401"
] |
matio
|
5fa49ef9fc4368fe3d19b5fdaa36d8fa5e7f4606
| 216,555,867,068,776,740,000,000,000,000,000,000,000 | 118 |
Fix integer addition overflow
As reported by https://github.com/tbeu/matio/issues/121
|
void controller::write_item(std::shared_ptr<rss_item> item, const std::string& filename) {
std::fstream f;
f.open(filename.c_str(),std::fstream::out);
if (!f.is_open())
throw exception(errno);
write_item(item, f);
}
| 0 |
[
"CWE-943",
"CWE-787"
] |
newsbeuter
|
96e9506ae9e252c548665152d1b8968297128307
| 220,350,889,662,962,900,000,000,000,000,000,000,000 | 8 |
Sanitize inputs to bookmark-cmd (#591)
Newsbeuter didn't properly shell-escape the arguments passed to
bookmarking command, which allows a remote attacker to perform remote
code execution by crafting an RSS item whose title and/or URL contain
something interpretable by the shell (most notably subshell
invocations.)
This has been reported by Jeriko One <[email protected]>, complete with
PoC and a patch.
This vulnerability was assigned CVE-2017-12904.
|
xmlXPathNodeSetCreateSize(int size) {
xmlNodeSetPtr ret;
ret = (xmlNodeSetPtr) xmlMalloc(sizeof(xmlNodeSet));
if (ret == NULL) {
xmlXPathErrMemory(NULL, "creating nodeset\n");
return(NULL);
}
memset(ret, 0 , (size_t) sizeof(xmlNodeSet));
if (size < XML_NODESET_DEFAULT)
size = XML_NODESET_DEFAULT;
ret->nodeTab = (xmlNodePtr *) xmlMalloc(size * sizeof(xmlNodePtr));
if (ret->nodeTab == NULL) {
xmlXPathErrMemory(NULL, "creating nodeset\n");
xmlFree(ret);
return(NULL);
}
memset(ret->nodeTab, 0 , size * (size_t) sizeof(xmlNodePtr));
ret->nodeMax = size;
return(ret);
}
| 0 |
[
"CWE-119"
] |
libxml2
|
91d19754d46acd4a639a8b9e31f50f31c78f8c9c
| 194,774,840,064,872,860,000,000,000,000,000,000,000 | 21 |
Fix the semantic of XPath axis for namespace/attribute context nodes
The processing of namespace and attributes nodes was not compliant
to the XPath-1.0 specification
|
const CImg<T>& back() const {
return *(_data + _width - 1);
}
| 0 |
[
"CWE-770"
] |
cimg
|
619cb58dd90b4e03ac68286c70ed98acbefd1c90
| 67,795,036,699,991,000,000,000,000,000,000,000,000 | 3 |
CImg<>::load_bmp() and CImg<>::load_pandore(): Check that dimensions encoded in file does not exceed file size.
|
addSess(ptcplstn_t *pLstn, int sock, prop_t *peerName, prop_t *peerIP)
{
DEFiRet;
ptcpsess_t *pSess = NULL;
ptcpsrv_t *pSrv = pLstn->pSrv;
int pmsg_size_factor;
CHKmalloc(pSess = malloc(sizeof(ptcpsess_t)));
pSess->next = NULL;
if(pLstn->pSrv->inst->startRegex == NULL) {
pmsg_size_factor = 1;
pSess->pMsg_save = NULL;
} else {
pmsg_size_factor = 2;
pSess->pMsg = NULL;
CHKmalloc(pSess->pMsg_save = malloc(1 + iMaxLine * pmsg_size_factor));
}
CHKmalloc(pSess->pMsg = malloc(1 + iMaxLine * pmsg_size_factor));
pSess->pLstn = pLstn;
pSess->sock = sock;
pSess->bSuppOctetFram = pLstn->bSuppOctetFram;
pSess->bSPFramingFix = pLstn->bSPFramingFix;
pSess->inputState = eAtStrtFram;
pSess->iMsg = 0;
pSess->iCurrLine = 1;
pSess->bzInitDone = 0;
pSess->bAtStrtOfFram = 1;
pSess->peerName = peerName;
pSess->peerIP = peerIP;
pSess->compressionMode = pLstn->pSrv->compressionMode;
/* add to start of server's listener list */
pSess->prev = NULL;
pthread_mutex_lock(&pSrv->mutSessLst);
int iTCPSessMax = pSrv->inst->iTCPSessMax;
if (iTCPSessMax > 0 && pSrv->iTCPSessCnt >= iTCPSessMax) {
pthread_mutex_unlock(&pSrv->mutSessLst);
LogError(0, RS_RET_MAX_SESS_REACHED,
"too many tcp sessions - dropping incoming request");
ABORT_FINALIZE(RS_RET_MAX_SESS_REACHED);
}
pSrv->iTCPSessCnt++;
pSess->next = pSrv->pSess;
if(pSrv->pSess != NULL)
pSrv->pSess->prev = pSess;
pSrv->pSess = pSess;
pthread_mutex_unlock(&pSrv->mutSessLst);
CHKiRet(addEPollSock(epolld_sess, pSess, sock, &pSess->epd));
finalize_it:
if(iRet != RS_RET_OK) {
if(pSess != NULL) {
if (pSess->next != NULL) {
unlinkSess(pSess);
}
free(pSess->pMsg_save);
free(pSess->pMsg);
free(pSess);
}
}
RETiRet;
}
| 0 |
[
"CWE-787"
] |
rsyslog
|
89955b0bcb1ff105e1374aad7e0e993faa6a038f
| 95,706,106,995,535,630,000,000,000,000,000,000,000 | 66 |
net bugfix: potential buffer overrun
|
static void gf_filter_parse_args(GF_Filter *filter, const char *args, GF_FilterArgType arg_type, Bool for_script)
{
u32 i=0;
char szSecName[200];
char szEscape[7];
char szSrc[5], szDst[5];
Bool has_meta_args = GF_FALSE;
const GF_FilterArgs *f_args = NULL;
if (!filter) return;
if (!for_script) {
if (!filter->freg->private_size) {
if (filter->freg->args && filter->freg->args[0].arg_name) {
GF_LOG(GF_LOG_ERROR, GF_LOG_FILTER, ("Filter with arguments but no private stack size, no arg passing\n"));
}
} else {
filter->filter_udta = gf_malloc(filter->freg->private_size);
if (!filter->filter_udta) {
GF_LOG(GF_LOG_ERROR, GF_LOG_FILTER, ("Failed to allocate private data stack\n"));
return;
}
memset(filter->filter_udta, 0, filter->freg->private_size);
}
}
sprintf(szEscape, "%cgpac%c", filter->session->sep_args, filter->session->sep_args);
sprintf(szSrc, "src%c", filter->session->sep_name);
sprintf(szDst, "dst%c", filter->session->sep_name);
snprintf(szSecName, 200, "filter@%s", filter->freg->name);
//instantiate all args with defauts value
f_args = filter->freg->args;
if (for_script)
f_args = filter->instance_args;
i=0;
while (f_args) {
GF_PropertyValue argv;
const char *def_val;
const GF_FilterArgs *a = &f_args[i];
if (!a || !a->arg_name) break;
i++;
if (a->flags & GF_FS_ARG_META) {
has_meta_args = GF_TRUE;
continue;
}
def_val = gf_filter_load_arg_config(filter, szSecName, a->arg_name, a->arg_default_val);
if (!def_val) continue;
argv = gf_filter_parse_prop_solve_env_var(filter, a->arg_type, a->arg_name, def_val, a->min_max_enum);
if (argv.type != GF_PROP_FORBIDEN) {
if (!for_script && (a->offset_in_private>=0)) {
gf_filter_set_arg(filter, a, &argv);
} else if (filter->freg->update_arg) {
FSESS_CHECK_THREAD(filter)
filter->freg->update_arg(filter, a->arg_name, &argv);
gf_props_reset_single(&argv);
}
} else {
GF_LOG(GF_LOG_ERROR, GF_LOG_FILTER, ("Failed to parse argument %s value %s\n", a->arg_name, a->arg_default_val));
}
}
//handle meta filter options, not exposed in registry
if (has_meta_args && filter->freg->update_arg && !for_script)
gf_filter_load_meta_args_config(szSecName, filter);
filter_parse_dyn_args(filter, args, arg_type, for_script, szSrc, szDst, szEscape, szSecName, has_meta_args, 0);
}
| 0 |
[
"CWE-787"
] |
gpac
|
da37ec8582266983d0ec4b7550ec907401ec441e
| 69,685,615,348,320,010,000,000,000,000,000,000,000 | 72 |
fixed crashes for very long path - cf #1908
|
MODULE_ENTRY (fill_vtable) (GdkPixbufModule *module)
{
module->load = gdk_pixbuf__jpeg_image_load;
module->begin_load = gdk_pixbuf__jpeg_image_begin_load;
module->stop_load = gdk_pixbuf__jpeg_image_stop_load;
module->load_increment = gdk_pixbuf__jpeg_image_load_increment;
module->save = gdk_pixbuf__jpeg_image_save;
module->save_to_callback = gdk_pixbuf__jpeg_image_save_to_callback;
module->is_save_option_supported = gdk_pixbuf__jpeg_is_save_option_supported;
}
| 0 |
[
"CWE-787"
] |
gdk-pixbuf
|
c2a40a92fe3df4111ed9da51fe3368c079b86926
| 334,258,237,251,558,640,000,000,000,000,000,000,000 | 10 |
jpeg: Throw error when number of color components is unsupported
Explicitly check "3" or "4" output color components.
gdk-pixbuf assumed that the value of output_components to be either
3 or 4, but not an invalid value (9) or an unsupported value (1).
The way the buffer size was deduced was using a naive "== 4" check,
with a 1, 3 or 9 color component picture getting the same buffer size,
a size just sufficient for 3 color components, causing invalid writes
later when libjpeg-turbo was decoding the image.
CVE-2017-2862
Sent by from Marcin 'Icewall' Noga of Cisco Talos
https://bugzilla.gnome.org/show_bug.cgi?id=784866
|
static struct page *linear_to_page(struct page *page, unsigned int *len,
unsigned int *offset,
struct sock *sk)
{
struct page_frag *pfrag = sk_page_frag(sk);
if (!sk_page_frag_refill(sk, pfrag))
return NULL;
*len = min_t(unsigned int, *len, pfrag->size - pfrag->offset);
memcpy(page_address(pfrag->page) + pfrag->offset,
page_address(page) + *offset, *len);
*offset = pfrag->offset;
pfrag->offset += *len;
return pfrag->page;
}
| 0 |
[
"CWE-416"
] |
net
|
36d5fe6a000790f56039afe26834265db0a3ad4c
| 221,242,716,443,029,030,000,000,000,000,000,000,000 | 18 |
core, nfqueue, openvswitch: Orphan frags in skb_zerocopy and handle errors
skb_zerocopy can copy elements of the frags array between skbs, but it doesn't
orphan them. Also, it doesn't handle errors, so this patch takes care of that
as well, and modify the callers accordingly. skb_tx_error() is also added to
the callers so they will signal the failed delivery towards the creator of the
skb.
Signed-off-by: Zoltan Kiss <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
static void row_dim_delete(zval *object, zval *offset TSRMLS_DC)
{
php_error_docref(NULL TSRMLS_CC, E_WARNING, "Cannot delete properties from a PDORow");
| 0 |
[
"CWE-476"
] |
php-src
|
6045de69c7dedcba3eadf7c4bba424b19c81d00d
| 50,022,217,375,606,460,000,000,000,000,000,000,000 | 4 |
Fix bug #73331 - do not try to serialize/unserialize objects wddx can not handle
Proper soltion would be to call serialize/unserialize and deal with the result,
but this requires more work that should be done by wddx maintainer (not me).
|
void stco_del(GF_Box *s)
{
GF_ChunkOffsetBox *ptr = (GF_ChunkOffsetBox *)s;
if (ptr == NULL) return;
if (ptr->offsets) gf_free(ptr->offsets);
gf_free(ptr);
}
| 0 |
[
"CWE-400",
"CWE-401"
] |
gpac
|
d2371b4b204f0a3c0af51ad4e9b491144dd1225c
| 238,976,245,116,608,020,000,000,000,000,000,000,000 | 7 |
prevent dref memleak on invalid input (#1183)
|
static int cap_file_set_fowner(struct file *file)
{
return 0;
}
| 0 |
[] |
linux-2.6
|
ee18d64c1f632043a02e6f5ba5e045bb26a5465f
| 289,563,780,507,314,000,000,000,000,000,000,000,000 | 4 |
KEYS: Add a keyctl to install a process's session keyring on its parent [try #6]
Add a keyctl to install a process's session keyring onto its parent. This
replaces the parent's session keyring. Because the COW credential code does
not permit one process to change another process's credentials directly, the
change is deferred until userspace next starts executing again. Normally this
will be after a wait*() syscall.
To support this, three new security hooks have been provided:
cred_alloc_blank() to allocate unset security creds, cred_transfer() to fill in
the blank security creds and key_session_to_parent() - which asks the LSM if
the process may replace its parent's session keyring.
The replacement may only happen if the process has the same ownership details
as its parent, and the process has LINK permission on the session keyring, and
the session keyring is owned by the process, and the LSM permits it.
Note that this requires alteration to each architecture's notify_resume path.
This has been done for all arches barring blackfin, m68k* and xtensa, all of
which need assembly alteration to support TIF_NOTIFY_RESUME. This allows the
replacement to be performed at the point the parent process resumes userspace
execution.
This allows the userspace AFS pioctl emulation to fully emulate newpag() and
the VIOCSETTOK and VIOCSETTOK2 pioctls, all of which require the ability to
alter the parent process's PAG membership. However, since kAFS doesn't use
PAGs per se, but rather dumps the keys into the session keyring, the session
keyring of the parent must be replaced if, for example, VIOCSETTOK is passed
the newpag flag.
This can be tested with the following program:
#include <stdio.h>
#include <stdlib.h>
#include <keyutils.h>
#define KEYCTL_SESSION_TO_PARENT 18
#define OSERROR(X, S) do { if ((long)(X) == -1) { perror(S); exit(1); } } while(0)
int main(int argc, char **argv)
{
key_serial_t keyring, key;
long ret;
keyring = keyctl_join_session_keyring(argv[1]);
OSERROR(keyring, "keyctl_join_session_keyring");
key = add_key("user", "a", "b", 1, keyring);
OSERROR(key, "add_key");
ret = keyctl(KEYCTL_SESSION_TO_PARENT);
OSERROR(ret, "KEYCTL_SESSION_TO_PARENT");
return 0;
}
Compiled and linked with -lkeyutils, you should see something like:
[dhowells@andromeda ~]$ keyctl show
Session Keyring
-3 --alswrv 4043 4043 keyring: _ses
355907932 --alswrv 4043 -1 \_ keyring: _uid.4043
[dhowells@andromeda ~]$ /tmp/newpag
[dhowells@andromeda ~]$ keyctl show
Session Keyring
-3 --alswrv 4043 4043 keyring: _ses
1055658746 --alswrv 4043 4043 \_ user: a
[dhowells@andromeda ~]$ /tmp/newpag hello
[dhowells@andromeda ~]$ keyctl show
Session Keyring
-3 --alswrv 4043 4043 keyring: hello
340417692 --alswrv 4043 4043 \_ user: a
Where the test program creates a new session keyring, sticks a user key named
'a' into it and then installs it on its parent.
Signed-off-by: David Howells <[email protected]>
Signed-off-by: James Morris <[email protected]>
|
int bnep_add_connection(struct bnep_connadd_req *req, struct socket *sock)
{
struct net_device *dev;
struct bnep_session *s, *ss;
u8 dst[ETH_ALEN], src[ETH_ALEN];
int err;
BT_DBG("");
baswap((void *) dst, &l2cap_pi(sock->sk)->chan->dst);
baswap((void *) src, &l2cap_pi(sock->sk)->chan->src);
/* session struct allocated as private part of net_device */
dev = alloc_netdev(sizeof(struct bnep_session),
(*req->device) ? req->device : "bnep%d",
NET_NAME_UNKNOWN,
bnep_net_setup);
if (!dev)
return -ENOMEM;
down_write(&bnep_session_sem);
ss = __bnep_get_session(dst);
if (ss && ss->state == BT_CONNECTED) {
err = -EEXIST;
goto failed;
}
s = netdev_priv(dev);
/* This is rx header therefore addresses are swapped.
* ie. eh.h_dest is our local address. */
memcpy(s->eh.h_dest, &src, ETH_ALEN);
memcpy(s->eh.h_source, &dst, ETH_ALEN);
memcpy(dev->dev_addr, s->eh.h_dest, ETH_ALEN);
s->dev = dev;
s->sock = sock;
s->role = req->role;
s->state = BT_CONNECTED;
s->msg.msg_flags = MSG_NOSIGNAL;
#ifdef CONFIG_BT_BNEP_MC_FILTER
/* Set default mc filter */
set_bit(bnep_mc_hash(dev->broadcast), (ulong *) &s->mc_filter);
#endif
#ifdef CONFIG_BT_BNEP_PROTO_FILTER
/* Set default protocol filter */
bnep_set_default_proto_filter(s);
#endif
SET_NETDEV_DEV(dev, bnep_get_device(s));
SET_NETDEV_DEVTYPE(dev, &bnep_type);
err = register_netdev(dev);
if (err)
goto failed;
__bnep_link_session(s);
__module_get(THIS_MODULE);
s->task = kthread_run(bnep_session, s, "kbnepd %s", dev->name);
if (IS_ERR(s->task)) {
/* Session thread start failed, gotta cleanup. */
module_put(THIS_MODULE);
unregister_netdev(dev);
__bnep_unlink_session(s);
err = PTR_ERR(s->task);
goto failed;
}
up_write(&bnep_session_sem);
strcpy(req->device, dev->name);
return 0;
failed:
up_write(&bnep_session_sem);
free_netdev(dev);
return err;
}
| 1 |
[
"CWE-20",
"CWE-284"
] |
linux
|
71bb99a02b32b4cc4265118e85f6035ca72923f0
| 252,311,578,201,598,440,000,000,000,000,000,000,000 | 82 |
Bluetooth: bnep: bnep_add_connection() should verify that it's dealing with l2cap socket
same story as cmtp
Signed-off-by: Al Viro <[email protected]>
Signed-off-by: Marcel Holtmann <[email protected]>
|
void cli_icongroupset_add(const char *groupname, icon_groupset *set, unsigned int type, cli_ctx *ctx) {
struct icon_matcher *matcher;
unsigned int i, j;
if(type>1 || !ctx || !ctx->engine || !(matcher = ctx->engine->iconcheck) || !matcher->group_counts[type])
return;
j = matcher->group_counts[type];
if(groupname[0] == '*' && !groupname[1]) {
set->v[type][0] = set->v[type][1] = set->v[type][2] = set->v[type][3] = ~0;
return;
}
for(i=0; i<j; i++) {
if(!strcmp(groupname, matcher->group_names[type][i]))
break;
}
if(i == j)
cli_dbgmsg("cli_icongroupset_add: failed to locate icon group%u %s\n", type, groupname);
else {
j = i % 64;
i /= 64;
set->v[type][i] |= (uint64_t)1<<j;
}
}
| 0 |
[
"CWE-189"
] |
clamav-devel
|
3cbd8b5668bd0f262a8c00b1fd57eb03c117b00a
| 38,854,884,175,643,300,000,000,000,000,000,000,000 | 24 |
libclamav/pe_icons.c: introduce LOGPARSEICONDETAILS define to reduce parseicon logging in default build
|
Macho_Binary_t** macho_parse(const char *file) {
FatBinary* fat = Parser::parse(file).release();
size_t nb_bin = fat->size();
auto** c_macho_binaries = static_cast<Macho_Binary_t**>(
malloc((fat->size() + 1) * sizeof(Macho_Binary_t**)));
for (size_t i = 0; i < nb_bin; ++i) {
Binary* binary = fat->take(i).release();
if (binary != nullptr) {
c_macho_binaries[i] = static_cast<Macho_Binary_t*>(malloc(sizeof(Macho_Binary_t)));
init_c_binary(c_macho_binaries[i], binary);
}
}
c_macho_binaries[nb_bin] = nullptr;
delete fat;
return c_macho_binaries;
}
| 0 |
[
"CWE-787"
] |
LIEF
|
0033b6312fd311b2e45e379c04a83d77c1e58578
| 286,066,633,750,241,380,000,000,000,000,000,000,000 | 20 |
Resolve #767
|
static void kvm_timer_init(void)
{
int cpu;
max_tsc_khz = tsc_khz;
register_hotcpu_notifier(&kvmclock_cpu_notifier_block);
if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) {
#ifdef CONFIG_CPU_FREQ
struct cpufreq_policy policy;
memset(&policy, 0, sizeof(policy));
cpu = get_cpu();
cpufreq_get_policy(&policy, cpu);
if (policy.cpuinfo.max_freq)
max_tsc_khz = policy.cpuinfo.max_freq;
put_cpu();
#endif
cpufreq_register_notifier(&kvmclock_cpufreq_notifier_block,
CPUFREQ_TRANSITION_NOTIFIER);
}
pr_debug("kvm: max_tsc_khz = %ld\n", max_tsc_khz);
for_each_online_cpu(cpu)
smp_call_function_single(cpu, tsc_khz_changed, NULL, 1);
}
| 0 |
[] |
kvm
|
0769c5de24621141c953fbe1f943582d37cb4244
| 141,892,820,798,614,870,000,000,000,000,000,000,000 | 23 |
KVM: x86: extend "struct x86_emulate_ops" with "get_cpuid"
In order to be able to proceed checks on CPU-specific properties
within the emulator, function "get_cpuid" is introduced.
With "get_cpuid" it is possible to virtually call the guests
"cpuid"-opcode without changing the VM's context.
[mtosatti: cleanup/beautify code]
Signed-off-by: Stephan Baerwolf <[email protected]>
Signed-off-by: Marcelo Tosatti <[email protected]>
|
int main()
{
gdImagePtr im;
void *data;
int size = 0;
im = gdImageCreate(100, 100);
gdTestAssert(im != NULL);
data = gdImagePngPtr(im, &size);
gdTestAssert(data == NULL);
gdImageDestroy(im);
return gdNumFailures();
}
| 0 |
[
"CWE-415"
] |
libgd
|
56ce6ef068b954ad28379e83cca04feefc51320c
| 204,602,565,788,657,500,000,000,000,000,000,000,000 | 16 |
Fix #381: libgd double-free vulnerability
The issue is that `gdImagePngCtxEx` (which is called by `gdImagePngPtr`
and the other PNG output functions to do the real work) does not return
whether it succeeded or failed, so this is not checked in
`gdImagePngPtr` and the function wrongly assumes everything is okay,
which is not, in this case, because the palette image contains no
palette entries.
We can't change the signature of `gdImagePngCtxEx` for API
compatibility reasons, so we introduce the static helper
`_gdImagePngCtxEx` which returns success respective failure, so
`gdImagePngPtr` and `gdImagePngPtrEx` can check the return value. We
leave it solely to libpng for now to report warnings regarding the
failing write.
CVE-2017-6362
(cherry picked from commit 2207e3c88a06a5c42230907554ab1e9f2ec021ea)
|
expand(struct archive_read *a, int64_t end)
{
static const unsigned char lengthbases[] =
{ 0, 1, 2, 3, 4, 5, 6,
7, 8, 10, 12, 14, 16, 20,
24, 28, 32, 40, 48, 56, 64,
80, 96, 112, 128, 160, 192, 224 };
static const unsigned char lengthbits[] =
{ 0, 0, 0, 0, 0, 0, 0,
0, 1, 1, 1, 1, 2, 2,
2, 2, 3, 3, 3, 3, 4,
4, 4, 4, 5, 5, 5, 5 };
static const unsigned int offsetbases[] =
{ 0, 1, 2, 3, 4, 6,
8, 12, 16, 24, 32, 48,
64, 96, 128, 192, 256, 384,
512, 768, 1024, 1536, 2048, 3072,
4096, 6144, 8192, 12288, 16384, 24576,
32768, 49152, 65536, 98304, 131072, 196608,
262144, 327680, 393216, 458752, 524288, 589824,
655360, 720896, 786432, 851968, 917504, 983040,
1048576, 1310720, 1572864, 1835008, 2097152, 2359296,
2621440, 2883584, 3145728, 3407872, 3670016, 3932160 };
static const unsigned char offsetbits[] =
{ 0, 0, 0, 0, 1, 1, 2, 2, 3, 3, 4, 4,
5, 5, 6, 6, 7, 7, 8, 8, 9, 9, 10, 10,
11, 11, 12, 12, 13, 13, 14, 14, 15, 15, 16, 16,
16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18 };
static const unsigned char shortbases[] =
{ 0, 4, 8, 16, 32, 64, 128, 192 };
static const unsigned char shortbits[] =
{ 2, 2, 3, 4, 5, 6, 6, 6 };
int symbol, offs, len, offsindex, lensymbol, i, offssymbol, lowoffsetsymbol;
unsigned char newfile;
struct rar *rar = (struct rar *)(a->format->data);
struct rar_br *br = &(rar->br);
if (rar->filterstart < end)
end = rar->filterstart;
while (1)
{
if (rar->output_last_match &&
lzss_position(&rar->lzss) + rar->lastlength <= end)
{
lzss_emit_match(rar, rar->lastoffset, rar->lastlength);
rar->output_last_match = 0;
}
if(rar->is_ppmd_block || rar->output_last_match ||
lzss_position(&rar->lzss) >= end)
return lzss_position(&rar->lzss);
if ((symbol = read_next_symbol(a, &rar->maincode)) < 0)
return (ARCHIVE_FATAL);
rar->output_last_match = 0;
if (symbol < 256)
{
lzss_emit_literal(rar, symbol);
continue;
}
else if (symbol == 256)
{
if (!rar_br_read_ahead(a, br, 1))
goto truncated_data;
newfile = !rar_br_bits(br, 1);
rar_br_consume(br, 1);
if(newfile)
{
rar->start_new_block = 1;
if (!rar_br_read_ahead(a, br, 1))
goto truncated_data;
rar->start_new_table = rar_br_bits(br, 1);
rar_br_consume(br, 1);
return lzss_position(&rar->lzss);
}
else
{
if (parse_codes(a) != ARCHIVE_OK)
return (ARCHIVE_FATAL);
continue;
}
}
else if(symbol==257)
{
archive_set_error(&a->archive, ARCHIVE_ERRNO_MISC,
"Parsing filters is unsupported.");
return (ARCHIVE_FAILED);
}
else if(symbol==258)
{
if(rar->lastlength == 0)
continue;
offs = rar->lastoffset;
len = rar->lastlength;
}
else if (symbol <= 262)
{
offsindex = symbol - 259;
offs = rar->oldoffset[offsindex];
if ((lensymbol = read_next_symbol(a, &rar->lengthcode)) < 0)
goto bad_data;
if (lensymbol > (int)(sizeof(lengthbases)/sizeof(lengthbases[0])))
goto bad_data;
if (lensymbol > (int)(sizeof(lengthbits)/sizeof(lengthbits[0])))
goto bad_data;
len = lengthbases[lensymbol] + 2;
if (lengthbits[lensymbol] > 0) {
if (!rar_br_read_ahead(a, br, lengthbits[lensymbol]))
goto truncated_data;
len += rar_br_bits(br, lengthbits[lensymbol]);
rar_br_consume(br, lengthbits[lensymbol]);
}
for (i = offsindex; i > 0; i--)
rar->oldoffset[i] = rar->oldoffset[i-1];
rar->oldoffset[0] = offs;
}
else if(symbol<=270)
{
offs = shortbases[symbol-263] + 1;
if(shortbits[symbol-263] > 0) {
if (!rar_br_read_ahead(a, br, shortbits[symbol-263]))
goto truncated_data;
offs += rar_br_bits(br, shortbits[symbol-263]);
rar_br_consume(br, shortbits[symbol-263]);
}
len = 2;
for(i = 3; i > 0; i--)
rar->oldoffset[i] = rar->oldoffset[i-1];
rar->oldoffset[0] = offs;
}
else
{
if (symbol-271 > (int)(sizeof(lengthbases)/sizeof(lengthbases[0])))
goto bad_data;
if (symbol-271 > (int)(sizeof(lengthbits)/sizeof(lengthbits[0])))
goto bad_data;
len = lengthbases[symbol-271]+3;
if(lengthbits[symbol-271] > 0) {
if (!rar_br_read_ahead(a, br, lengthbits[symbol-271]))
goto truncated_data;
len += rar_br_bits(br, lengthbits[symbol-271]);
rar_br_consume(br, lengthbits[symbol-271]);
}
if ((offssymbol = read_next_symbol(a, &rar->offsetcode)) < 0)
goto bad_data;
if (offssymbol > (int)(sizeof(offsetbases)/sizeof(offsetbases[0])))
goto bad_data;
if (offssymbol > (int)(sizeof(offsetbits)/sizeof(offsetbits[0])))
goto bad_data;
offs = offsetbases[offssymbol]+1;
if(offsetbits[offssymbol] > 0)
{
if(offssymbol > 9)
{
if(offsetbits[offssymbol] > 4) {
if (!rar_br_read_ahead(a, br, offsetbits[offssymbol] - 4))
goto truncated_data;
offs += rar_br_bits(br, offsetbits[offssymbol] - 4) << 4;
rar_br_consume(br, offsetbits[offssymbol] - 4);
}
if(rar->numlowoffsetrepeats > 0)
{
rar->numlowoffsetrepeats--;
offs += rar->lastlowoffset;
}
else
{
if ((lowoffsetsymbol =
read_next_symbol(a, &rar->lowoffsetcode)) < 0)
return (ARCHIVE_FATAL);
if(lowoffsetsymbol == 16)
{
rar->numlowoffsetrepeats = 15;
offs += rar->lastlowoffset;
}
else
{
offs += lowoffsetsymbol;
rar->lastlowoffset = lowoffsetsymbol;
}
}
}
else {
if (!rar_br_read_ahead(a, br, offsetbits[offssymbol]))
goto truncated_data;
offs += rar_br_bits(br, offsetbits[offssymbol]);
rar_br_consume(br, offsetbits[offssymbol]);
}
}
if (offs >= 0x40000)
len++;
if (offs >= 0x2000)
len++;
for(i = 3; i > 0; i--)
rar->oldoffset[i] = rar->oldoffset[i-1];
rar->oldoffset[0] = offs;
}
rar->lastoffset = offs;
rar->lastlength = len;
rar->output_last_match = 1;
}
truncated_data:
archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT,
"Truncated RAR file data");
rar->valid = 0;
return (ARCHIVE_FATAL);
bad_data:
archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT,
"Bad RAR file data");
return (ARCHIVE_FATAL);
}
| 0 |
[
"CWE-119",
"CWE-787"
] |
libarchive
|
05caadc7eedbef471ac9610809ba683f0c698700
| 267,412,227,975,387,830,000,000,000,000,000,000,000 | 226 |
Issue 719: Fix for TALOS-CAN-154
A RAR file with an invalid zero dictionary size was not being
rejected, leading to a zero-sized allocation for the dictionary
storage which was then overwritten during the dictionary initialization.
Thanks to the Open Source and Threat Intelligence project at Cisco for
reporting this.
|
bool ProtocolV1::is_connected() {
return can_write.load() == WriteStatus::CANWRITE;
}
| 0 |
[
"CWE-294"
] |
ceph
|
6c14c2fb5650426285428dfe6ca1597e5ea1d07d
| 23,046,163,266,788,796,000,000,000,000,000,000,000 | 3 |
mon/MonClient: bring back CEPHX_V2 authorizer challenges
Commit c58c5754dfd2 ("msg/async/ProtocolV1: use AuthServer and
AuthClient") introduced a backwards compatibility issue into msgr1.
To fix it, commit 321548010578 ("mon/MonClient: skip CEPHX_V2
challenge if client doesn't support it") set out to skip authorizer
challenges for peers that don't support CEPHX_V2. However, it
made it so that authorizer challenges are skipped for all peers in
both msgr1 and msgr2 cases, effectively disabling the protection
against replay attacks that was put in place in commit f80b848d3f83
("auth/cephx: add authorizer challenge", CVE-2018-1128).
This is because con->get_features() always returns 0 at that
point. In msgr1 case, the peer shares its features along with the
authorizer, but while they are available in connect_msg.features they
aren't assigned to con until ProtocolV1::open(). In msgr2 case, the
peer doesn't share its features until much later (in CLIENT_IDENT
frame, i.e. after the authentication phase). The result is that
!CEPHX_V2 branch is taken in all cases and replay attack protection
is lost.
Only clusters with cephx_service_require_version set to 2 on the
service daemons would not be silently downgraded. But, since the
default is 1 and there are no reports of looping on BADAUTHORIZER
faults, I'm pretty sure that no one has ever done that. Note that
cephx_require_version set to 2 would have no effect even though it
is supposed to be stronger than cephx_service_require_version
because MonClient::handle_auth_request() didn't check it.
To fix:
- for msgr1, check connect_msg.features (as was done before commit
c58c5754dfd2) and challenge if CEPHX_V2 is supported. Together
with two preceding patches that resurrect proper cephx_* option
handling in msgr1, this covers both "I want old clients to work"
and "I wish to require better authentication" use cases.
- for msgr2, don't check anything and always challenge. CEPHX_V2
predates msgr2, anyone speaking msgr2 must support it.
Signed-off-by: Ilya Dryomov <[email protected]>
(cherry picked from commit 4a82c72e3bdddcb625933e83af8b50a444b961f1)
|
static int do_read_fault(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
int ret = 0;
/*
* Let's call ->map_pages() first and use ->fault() as fallback
* if page by the offset is not ready to be mapped (cold cache or
* something).
*/
if (vma->vm_ops->map_pages && fault_around_bytes >> PAGE_SHIFT > 1) {
ret = do_fault_around(vmf);
if (ret)
return ret;
}
ret = __do_fault(vmf);
if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
return ret;
ret |= finish_fault(vmf);
unlock_page(vmf->page);
if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
put_page(vmf->page);
return ret;
}
| 0 |
[
"CWE-119"
] |
linux
|
1be7107fbe18eed3e319a6c3e83c78254b693acb
| 61,183,270,204,423,310,000,000,000,000,000,000,000 | 26 |
mm: larger stack guard gap, between vmas
Stack guard page is a useful feature to reduce a risk of stack smashing
into a different mapping. We have been using a single page gap which
is sufficient to prevent having stack adjacent to a different mapping.
But this seems to be insufficient in the light of the stack usage in
userspace. E.g. glibc uses as large as 64kB alloca() in many commonly
used functions. Others use constructs liks gid_t buffer[NGROUPS_MAX]
which is 256kB or stack strings with MAX_ARG_STRLEN.
This will become especially dangerous for suid binaries and the default
no limit for the stack size limit because those applications can be
tricked to consume a large portion of the stack and a single glibc call
could jump over the guard page. These attacks are not theoretical,
unfortunatelly.
Make those attacks less probable by increasing the stack guard gap
to 1MB (on systems with 4k pages; but make it depend on the page size
because systems with larger base pages might cap stack allocations in
the PAGE_SIZE units) which should cover larger alloca() and VLA stack
allocations. It is obviously not a full fix because the problem is
somehow inherent, but it should reduce attack space a lot.
One could argue that the gap size should be configurable from userspace,
but that can be done later when somebody finds that the new 1MB is wrong
for some special case applications. For now, add a kernel command line
option (stack_guard_gap) to specify the stack gap size (in page units).
Implementation wise, first delete all the old code for stack guard page:
because although we could get away with accounting one extra page in a
stack vma, accounting a larger gap can break userspace - case in point,
a program run with "ulimit -S -v 20000" failed when the 1MB gap was
counted for RLIMIT_AS; similar problems could come with RLIMIT_MLOCK
and strict non-overcommit mode.
Instead of keeping gap inside the stack vma, maintain the stack guard
gap as a gap between vmas: using vm_start_gap() in place of vm_start
(or vm_end_gap() in place of vm_end if VM_GROWSUP) in just those few
places which need to respect the gap - mainly arch_get_unmapped_area(),
and and the vma tree's subtree_gap support for that.
Original-patch-by: Oleg Nesterov <[email protected]>
Original-patch-by: Michal Hocko <[email protected]>
Signed-off-by: Hugh Dickins <[email protected]>
Acked-by: Michal Hocko <[email protected]>
Tested-by: Helge Deller <[email protected]> # parisc
Signed-off-by: Linus Torvalds <[email protected]>
|
int ldb_msg_add_value(struct ldb_message *msg,
const char *attr_name,
const struct ldb_val *val,
struct ldb_message_element **return_el)
{
struct ldb_message_element *el;
int ret;
el = ldb_msg_find_element(msg, attr_name);
if (!el) {
ret = ldb_msg_add_empty(msg, attr_name, 0, &el);
if (ret != LDB_SUCCESS) {
return ret;
}
}
ret = ldb_msg_element_add_value(msg->elements, el, val);
if (ret != LDB_SUCCESS) {
return ret;
}
if (return_el) {
*return_el = el;
}
return LDB_SUCCESS;
}
| 0 |
[
"CWE-200"
] |
samba
|
a2bb5beee82fd9c4c29decc07024057febeaf1b5
| 185,354,622,206,094,240,000,000,000,000,000,000,000 | 27 |
CVE-2022-32746 ldb: Ensure shallow copy modifications do not affect original message
Using the newly added ldb flag, we can now detect when a message has
been shallow-copied so that its elements share their values with the
original message elements. Then when adding values to the copied
message, we now make a copy of the shared values array first.
This should prevent a use-after-free that occurred in LDB modules when
new values were added to a shallow copy of a message by calling
talloc_realloc() on the original values array, invalidating the 'values'
pointer in the original message element. The original values pointer can
later be used in the database audit logging module which logs database
requests, and potentially cause a crash.
BUG: https://bugzilla.samba.org/show_bug.cgi?id=15009
Signed-off-by: Joseph Sutton <[email protected]>
|
static int poll_action_name_to_id(const char *name, size_t namesz)
{
int id = -1;
if (strncasecmp(name, "move", namesz) == 0 && namesz == 4)
id = MNT_TABDIFF_MOVE;
else if (strncasecmp(name, "mount", namesz) == 0 && namesz == 5)
id = MNT_TABDIFF_MOUNT;
else if (strncasecmp(name, "umount", namesz) == 0 && namesz == 6)
id = MNT_TABDIFF_UMOUNT;
else if (strncasecmp(name, "remount", namesz) == 0 && namesz == 7)
id = MNT_TABDIFF_REMOUNT;
else
warnx(_("unknown action: %s"), name);
return id;
}
| 0 |
[
"CWE-552",
"CWE-703"
] |
util-linux
|
166e87368ae88bf31112a30e078cceae637f4cdb
| 132,084,049,063,885,300,000,000,000,000,000,000,000 | 17 |
libmount: remove support for deleted mount table entries
The "(deleted)" suffix has been originally used by kernel for deleted
mountpoints. Since kernel commit 9d4d65748a5ca26ea8650e50ba521295549bf4e3
(Dec 2014) kernel does not use this suffix for mount stuff in /proc at
all. Let's remove this support from libmount too.
Signed-off-by: Karel Zak <[email protected]>
|
static void move_back(compiler_common *common, jump_list **backtracks, BOOL must_be_valid)
{
/* Goes one character back. Affects STR_PTR and TMP1. If must_be_valid is TRUE,
TMP2 is not used. Otherwise TMP2 must contain the start of the subject buffer,
and it is destroyed. Does not modify STR_PTR for invalid character sequences. */
DEFINE_COMPILER;
#if defined SUPPORT_UNICODE && PCRE2_CODE_UNIT_WIDTH != 32
struct sljit_jump *jump;
#endif
#ifdef SUPPORT_UNICODE
#if PCRE2_CODE_UNIT_WIDTH == 8
struct sljit_label *label;
if (common->utf)
{
if (!must_be_valid && common->invalid_utf)
{
OP1(MOV_UCHAR, TMP1, 0, SLJIT_MEM1(STR_PTR), -IN_UCHARS(1));
OP2(SLJIT_SUB, STR_PTR, 0, STR_PTR, 0, SLJIT_IMM, IN_UCHARS(1));
jump = CMP(SLJIT_LESS, TMP1, 0, SLJIT_IMM, 0x80);
add_jump(compiler, &common->utfmoveback_invalid, JUMP(SLJIT_FAST_CALL));
if (backtracks != NULL)
add_jump(compiler, backtracks, CMP(SLJIT_EQUAL, TMP1, 0, SLJIT_IMM, 0));
JUMPHERE(jump);
return;
}
label = LABEL();
OP1(MOV_UCHAR, TMP1, 0, SLJIT_MEM1(STR_PTR), -IN_UCHARS(1));
OP2(SLJIT_SUB, STR_PTR, 0, STR_PTR, 0, SLJIT_IMM, IN_UCHARS(1));
OP2(SLJIT_AND, TMP1, 0, TMP1, 0, SLJIT_IMM, 0xc0);
CMPTO(SLJIT_EQUAL, TMP1, 0, SLJIT_IMM, 0x80, label);
return;
}
#elif PCRE2_CODE_UNIT_WIDTH == 16
if (common->utf)
{
OP1(MOV_UCHAR, TMP1, 0, SLJIT_MEM1(STR_PTR), -IN_UCHARS(1));
OP2(SLJIT_SUB, STR_PTR, 0, STR_PTR, 0, SLJIT_IMM, IN_UCHARS(1));
if (!must_be_valid && common->invalid_utf)
{
OP2(SLJIT_SUB, TMP1, 0, TMP1, 0, SLJIT_IMM, 0xd800);
jump = CMP(SLJIT_GREATER_EQUAL, TMP1, 0, SLJIT_IMM, 0xe000 - 0xd800);
add_jump(compiler, &common->utfmoveback_invalid, JUMP(SLJIT_FAST_CALL));
if (backtracks != NULL)
add_jump(compiler, backtracks, CMP(SLJIT_EQUAL, TMP1, 0, SLJIT_IMM, 0));
JUMPHERE(jump);
return;
}
/* Skip low surrogate if necessary. */
OP2(SLJIT_AND, TMP1, 0, TMP1, 0, SLJIT_IMM, 0xfc00);
OP2U(SLJIT_SUB | SLJIT_SET_Z, TMP1, 0, SLJIT_IMM, 0xdc00);
OP_FLAGS(SLJIT_MOV, TMP1, 0, SLJIT_EQUAL);
OP2(SLJIT_SHL, TMP1, 0, TMP1, 0, SLJIT_IMM, UCHAR_SHIFT);
OP2(SLJIT_SUB, STR_PTR, 0, STR_PTR, 0, TMP1, 0);
return;
}
#elif PCRE2_CODE_UNIT_WIDTH == 32
if (common->invalid_utf && !must_be_valid)
{
OP1(MOV_UCHAR, TMP1, 0, SLJIT_MEM1(STR_PTR), -IN_UCHARS(1));
if (backtracks != NULL)
{
add_jump(compiler, backtracks, CMP(SLJIT_GREATER_EQUAL, TMP1, 0, SLJIT_IMM, 0x110000));
OP2(SLJIT_SUB, STR_PTR, 0, STR_PTR, 0, SLJIT_IMM, IN_UCHARS(1));
return;
}
OP2U(SLJIT_SUB | SLJIT_SET_LESS, TMP1, 0, SLJIT_IMM, 0x110000);
OP_FLAGS(SLJIT_MOV, TMP1, 0, SLJIT_LESS);
OP2(SLJIT_SHL, TMP1, 0, TMP1, 0, SLJIT_IMM, UCHAR_SHIFT);
OP2(SLJIT_SUB, STR_PTR, 0, STR_PTR, 0, TMP1, 0);
return;
}
#endif /* PCRE2_CODE_UNIT_WIDTH == [8|16|32] */
#endif /* SUPPORT_UNICODE */
SLJIT_UNUSED_ARG(backtracks);
SLJIT_UNUSED_ARG(must_be_valid);
OP2(SLJIT_SUB, STR_PTR, 0, STR_PTR, 0, SLJIT_IMM, IN_UCHARS(1));
}
| 0 |
[
"CWE-125"
] |
pcre2
|
50a51cb7e67268e6ad417eb07c9de9bfea5cc55a
| 130,451,376,165,842,210,000,000,000,000,000,000,000 | 86 |
Fixed a unicode properrty matching issue in JIT
|
png_set_sBIT(png_const_structrp png_ptr, png_inforp info_ptr,
png_const_color_8p sig_bit)
{
png_debug1(1, "in %s storage function", "sBIT");
if (png_ptr == NULL || info_ptr == NULL || sig_bit == NULL)
return;
info_ptr->sig_bit = *sig_bit;
info_ptr->valid |= PNG_INFO_sBIT;
}
| 0 |
[
"CWE-120"
] |
libpng
|
a901eb3ce6087e0afeef988247f1a1aa208cb54d
| 142,339,061,585,622,760,000,000,000,000,000,000,000 | 11 |
[libpng16] Prevent reading over-length PLTE chunk (Cosmin Truta).
|
QPDF_Stream::getDict() const
{
return this->stream_dict;
}
| 0 |
[
"CWE-787"
] |
qpdf
|
d71f05ca07eb5c7cfa4d6d23e5c1f2a800f52e8e
| 306,799,944,395,374,800,000,000,000,000,000,000,000 | 4 |
Fix sign and conversion warnings (major)
This makes all integer type conversions that have potential data loss
explicit with calls that do range checks and raise an exception. After
this commit, qpdf builds with no warnings when -Wsign-conversion
-Wconversion is used with gcc or clang or when -W3 -Wd4800 is used
with MSVC. This significantly reduces the likelihood of potential
crashes from bogus integer values.
There are some parts of the code that take int when they should take
size_t or an offset. Such places would make qpdf not support files
with more than 2^31 of something that usually wouldn't be so large. In
the event that such a file shows up and is valid, at least qpdf would
raise an error in the right spot so the issue could be legitimately
addressed rather than failing in some weird way because of a silent
overflow condition.
|
CImg<T>& rotate(const float angle, const unsigned int interpolation=1,
const unsigned int boundary_conditions=0) {
const float nangle = cimg::mod(angle,360.f);
if (nangle==0.f) return *this;
return get_rotate(nangle,interpolation,boundary_conditions).move_to(*this);
}
| 0 |
[
"CWE-770"
] |
cimg
|
619cb58dd90b4e03ac68286c70ed98acbefd1c90
| 318,252,358,564,504,500,000,000,000,000,000,000,000 | 6 |
CImg<>::load_bmp() and CImg<>::load_pandore(): Check that dimensions encoded in file does not exceed file size.
|
int llhttp__on_url_complete(llhttp_t* s, const char* p, const char* endp) {
int err;
CALLBACK_MAYBE(s, on_url_complete);
return err;
}
| 0 |
[
"CWE-444"
] |
node
|
af488f8dc82d69847992ea1cd2f53dc8082b3b91
| 274,267,362,995,425,600,000,000,000,000,000,000,000 | 5 |
deps: update llhttp to 6.0.4
Refs: https://hackerone.com/reports/1238099
Refs: https://hackerone.com/reports/1238709
Refs: https://github.com/nodejs-private/llhttp-private/pull/6
Refs: https://github.com/nodejs-private/llhttp-private/pull/5
CVE-ID: CVE-2021-22959
CVE-ID: CVE-2021-22960
PR-URL: https://github.com/nodejs-private/node-private/pull/284
Reviewed-By: Akshay K <[email protected]>
Reviewed-By: James M Snell <[email protected]>
Reviewed-By: Robert Nagy <[email protected]>
|
RecordFindClientOnContext(RecordContextPtr pContext,
XID clientspec, int *pposition)
{
RecordClientsAndProtocolPtr pRCAP;
for (pRCAP = pContext->pListOfRCAP; pRCAP; pRCAP = pRCAP->pNextRCAP) {
int i;
for (i = 0; i < pRCAP->numClients; i++) {
if (pRCAP->pClientIDs[i] == clientspec) {
if (pposition)
*pposition = i;
return pRCAP;
}
}
}
return NULL;
} /* RecordFindClientOnContext */
| 0 |
[
"CWE-191"
] |
xserver
|
2902b78535ecc6821cc027351818b28a5c7fdbdc
| 270,396,251,550,962,350,000,000,000,000,000,000,000 | 18 |
Fix XRecordRegisterClients() Integer underflow
CVE-2020-14362 ZDI-CAN-11574
This vulnerability was discovered by:
Jan-Niklas Sohn working with Trend Micro Zero Day Initiative
Signed-off-by: Matthieu Herrb <[email protected]>
|
static void perf_event_exit_cpu_context(int cpu)
{
struct perf_event_context *ctx;
struct pmu *pmu;
int idx;
idx = srcu_read_lock(&pmus_srcu);
list_for_each_entry_rcu(pmu, &pmus, entry) {
ctx = &per_cpu_ptr(pmu->pmu_cpu_context, cpu)->ctx;
mutex_lock(&ctx->mutex);
smp_call_function_single(cpu, __perf_event_exit_context, ctx, 1);
mutex_unlock(&ctx->mutex);
}
srcu_read_unlock(&pmus_srcu, idx);
}
| 0 |
[
"CWE-703",
"CWE-189"
] |
linux
|
8176cced706b5e5d15887584150764894e94e02f
| 272,223,862,006,372,170,000,000,000,000,000,000,000 | 16 |
perf: Treat attr.config as u64 in perf_swevent_init()
Trinity discovered that we fail to check all 64 bits of
attr.config passed by user space, resulting to out-of-bounds
access of the perf_swevent_enabled array in
sw_perf_event_destroy().
Introduced in commit b0a873ebb ("perf: Register PMU
implementations").
Signed-off-by: Tommi Rantala <[email protected]>
Cc: Peter Zijlstra <[email protected]>
Cc: [email protected]
Cc: Paul Mackerras <[email protected]>
Cc: Arnaldo Carvalho de Melo <[email protected]>
Link: http://lkml.kernel.org/r/[email protected]
Signed-off-by: Ingo Molnar <[email protected]>
|
void EbmlMaster::Read(EbmlStream & inDataStream, const EbmlSemanticContext & sContext, int & UpperEltFound, EbmlElement * & FoundElt, bool AllowDummyElt, ScopeMode ReadFully)
{
if (ReadFully == SCOPE_NO_DATA)
return;
EbmlElement * ElementLevelA;
// remove all existing elements, including the mandatory ones...
size_t Index;
for (Index=0; Index<ElementList.size(); Index++) {
if (!(*ElementList[Index]).IsLocked()) {
delete ElementList[Index];
}
}
ElementList.clear();
uint64 MaxSizeToRead;
if (IsFiniteSize())
MaxSizeToRead = GetSize();
else
MaxSizeToRead = 0x7FFFFFFF;
// read blocks and discard the ones we don't care about
if (MaxSizeToRead > 0)
{
inDataStream.I_O().setFilePointer(GetSizePosition() + GetSizeLength(), seek_beginning);
ElementLevelA = inDataStream.FindNextElement(sContext, UpperEltFound, MaxSizeToRead, AllowDummyElt);
while (ElementLevelA != NULL && UpperEltFound <= 0 && MaxSizeToRead > 0) {
if (IsFiniteSize() && ElementLevelA->IsFiniteSize())
MaxSizeToRead = GetEndPosition() - ElementLevelA->GetEndPosition(); // even if it's the default value
if (!AllowDummyElt && ElementLevelA->IsDummy()) {
if (ElementLevelA->IsFiniteSize()) {
ElementLevelA->SkipData(inDataStream, sContext);
delete ElementLevelA; // forget this unknown element
} else {
delete ElementLevelA; // forget this unknown element
break;
}
} else {
ElementLevelA->Read(inDataStream, EBML_CONTEXT(ElementLevelA), UpperEltFound, FoundElt, AllowDummyElt, ReadFully);
// Discard elements that couldn't be read properly if
// SCOPE_ALL_DATA has been requested. This can happen
// e.g. if block data is defective.
bool DeleteElement = true;
if (ElementLevelA->ValueIsSet() || (ReadFully != SCOPE_ALL_DATA)) {
ElementList.push_back(ElementLevelA);
DeleteElement = false;
}
// just in case
if (ElementLevelA->IsFiniteSize()) {
ElementLevelA->SkipData(inDataStream, EBML_CONTEXT(ElementLevelA));
if (DeleteElement)
delete ElementLevelA;
} else {
if (DeleteElement)
delete ElementLevelA;
if (UpperEltFound) {
--UpperEltFound;
if (UpperEltFound > 0 || MaxSizeToRead <= 0)
goto processCrc;
ElementLevelA = FoundElt;
}
break;
}
}
if (UpperEltFound > 0) {
UpperEltFound--;
if (UpperEltFound > 0 || MaxSizeToRead <= 0)
goto processCrc;
ElementLevelA = FoundElt;
continue;
}
if (UpperEltFound < 0) {
UpperEltFound++;
if (UpperEltFound < 0)
goto processCrc;
}
if (MaxSizeToRead <= 0)
goto processCrc;// this level is finished
ElementLevelA = inDataStream.FindNextElement(sContext, UpperEltFound, MaxSizeToRead, AllowDummyElt);
}
if (UpperEltFound > 0) {
FoundElt = ElementLevelA;
}
}
processCrc:
EBML_MASTER_ITERATOR Itr, CrcItr;
for (Itr = ElementList.begin(); Itr != ElementList.end();) {
if ((EbmlId)(*(*Itr)) == EBML_ID(EbmlCrc32)) {
bChecksumUsed = true;
// remove the element
Checksum = *(static_cast<EbmlCrc32*>(*Itr));
CrcItr = Itr;
break;
}
++Itr;
}
if (bChecksumUsed)
{
delete *CrcItr;
Remove(CrcItr);
}
SetValueIsSet();
}
| 0 |
[
"CWE-703"
] |
libebml
|
88409e2a94dd3b40ff81d08bf6d92f486d036b24
| 19,796,853,357,842,760,000,000,000,000,000,000,000 | 115 |
EbmlMaster: propagate upper level element after infinite sized one correctly
When the parser encountered a deeply nested element with an infinite
size then a following element of an upper level was not propagated
correctly. Instead the element with the infinite size was added into the
EBML element tree a second time resulting in memory access after freeing
it and multiple attempts to free the same memory address during
destruction.
Fixes the issue reported as Cisco TALOS-CAN-0037.
|
Item_result result_type () const
{
return type_handler()->result_type();
}
| 0 |
[
"CWE-416",
"CWE-703"
] |
server
|
08c7ab404f69d9c4ca6ca7a9cf7eec74c804f917
| 91,867,073,371,373,710,000,000,000,000,000,000,000 | 4 |
MDEV-24176 Server crashes after insert in the table with virtual
column generated using date_format() and if()
vcol_info->expr is allocated on expr_arena at parsing stage. Since
expr item is allocated on expr_arena all its containee items must be
allocated on expr_arena too. Otherwise fix_session_expr() will
encounter prematurely freed item.
When table is reopened from cache vcol_info contains stale
expression. We refresh expression via TABLE::vcol_fix_exprs() but
first we must prepare a proper context (Vcol_expr_context) which meets
some requirements:
1. As noted above expr update must be done on expr_arena as there may
be new items created. It was a bug in fix_session_expr_for_read() and
was just not reproduced because of no second refix. Now refix is done
for more cases so it does reproduce. Tests affected: vcol.binlog
2. Also name resolution context must be narrowed to the single table.
Tested by: vcol.update main.default vcol.vcol_syntax gcol.gcol_bugfixes
3. sql_mode must be clean and not fail expr update.
sql_mode such as MODE_NO_BACKSLASH_ESCAPES, MODE_NO_ZERO_IN_DATE, etc
must not affect vcol expression update. If the table was created
successfully any further evaluation must not fail. Tests affected:
main.func_like
Reviewed by: Sergei Golubchik <[email protected]>
|
mt_new(mrb_state *mrb)
{
mt_tbl *t;
t = (mt_tbl*)mrb_malloc(mrb, sizeof(mt_tbl));
t->size = 0;
t->alloc = 0;
t->table = NULL;
return t;
}
| 0 |
[
"CWE-476",
"CWE-190"
] |
mruby
|
f5e10c5a79a17939af763b1dcf5232ce47e24a34
| 15,262,612,009,271,964,000,000,000,000,000,000,000 | 11 |
proc.c: add `mrb_state` argument to `mrb_proc_copy()`.
The function may invoke the garbage collection and it requires
`mrb_state` to run.
|
static int show_log(WriterContext *w, int section_ids, int section_id, int log_level)
{
int i;
pthread_mutex_lock(&log_mutex);
if (!log_buffer_size) {
pthread_mutex_unlock(&log_mutex);
return 0;
}
writer_print_section_header(w, section_ids);
for (i=0; i<log_buffer_size; i++) {
if (log_buffer[i].log_level <= log_level) {
writer_print_section_header(w, section_id);
print_str("context", log_buffer[i].context_name);
print_int("level", log_buffer[i].log_level);
print_int("category", log_buffer[i].category);
if (log_buffer[i].parent_name) {
print_str("parent_context", log_buffer[i].parent_name);
print_int("parent_category", log_buffer[i].parent_category);
} else {
print_str_opt("parent_context", "N/A");
print_str_opt("parent_category", "N/A");
}
print_str("message", log_buffer[i].log_message);
writer_print_section_footer(w);
}
}
clear_log(0);
pthread_mutex_unlock(&log_mutex);
writer_print_section_footer(w);
return 0;
}
| 0 |
[
"CWE-476"
] |
FFmpeg
|
837cb4325b712ff1aab531bf41668933f61d75d2
| 22,023,459,638,883,967,000,000,000,000,000,000,000 | 34 |
ffprobe: Fix null pointer dereference with color primaries
Found-by: AD-lab of venustech
Signed-off-by: Michael Niedermayer <[email protected]>
|
void CLASS phase_one_load_raw_c()
{
static const int length[] = { 8,7,6,9,11,10,5,12,14,13 };
int *offset, len[2], pred[2], row, col, i, j;
ushort *pixel;
short (*c_black)[2], (*r_black)[2];
#ifdef LIBRAW_LIBRARY_BUILD
if(ph1.format == 6)
throw LIBRAW_EXCEPTION_IO_CORRUPT;
#endif
pixel = (ushort *) calloc (raw_width*3 + raw_height*4, 2);
merror (pixel, "phase_one_load_raw_c()");
offset = (int *) (pixel + raw_width);
fseek (ifp, strip_offset, SEEK_SET);
for (row=0; row < raw_height; row++)
offset[row] = get4();
c_black = (short (*)[2]) (offset + raw_height);
fseek (ifp, ph1.black_col, SEEK_SET);
if (ph1.black_col)
read_shorts ((ushort *) c_black[0], raw_height*2);
r_black = c_black + raw_height;
fseek (ifp, ph1.black_row, SEEK_SET);
if (ph1.black_row)
read_shorts ((ushort *) r_black[0], raw_width*2);
#ifdef LIBRAW_LIBRARY_BUILD
// Copy data to internal copy (ever if not read)
if (ph1.black_col || ph1.black_row )
{
imgdata.rawdata.ph1_cblack = (short(*)[2])calloc(raw_height*2,sizeof(ushort));
merror(imgdata.rawdata.ph1_cblack,"phase_one_load_raw_c()");
memmove(imgdata.rawdata.ph1_cblack,(ushort*)c_black[0],raw_height*2*sizeof(ushort));
imgdata.rawdata.ph1_rblack = (short(*)[2])calloc(raw_width*2,sizeof(ushort));
merror(imgdata.rawdata.ph1_rblack,"phase_one_load_raw_c()");
memmove(imgdata.rawdata.ph1_rblack,(ushort*)r_black[0],raw_width*2*sizeof(ushort));
}
#endif
for (i=0; i < 256; i++)
curve[i] = i*i / 3.969 + 0.5;
#ifdef LIBRAW_LIBRARY_BUILD
try {
#endif
for (row=0; row < raw_height; row++) {
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
fseek (ifp, data_offset + offset[row], SEEK_SET);
ph1_bits(-1);
pred[0] = pred[1] = 0;
for (col=0; col < raw_width; col++) {
if (col >= (raw_width & -8))
len[0] = len[1] = 14;
else if ((col & 7) == 0)
for (i=0; i < 2; i++) {
for (j=0; j < 5 && !ph1_bits(1); j++);
if (j--) len[i] = length[j*2 + ph1_bits(1)];
}
if ((i = len[col & 1]) == 14)
pixel[col] = pred[col & 1] = ph1_bits(16);
else
pixel[col] = pred[col & 1] += ph1_bits(i) + 1 - (1 << (i - 1));
if (pred[col & 1] >> 16) derror();
if (ph1.format == 5 && pixel[col] < 256)
pixel[col] = curve[pixel[col]];
}
for (col=0; col < raw_width; col++) {
#ifndef LIBRAW_LIBRARY_BUILD
i = (pixel[col] << 2) - ph1.t_black
+ c_black[row][col >= ph1.split_col]
+ r_black[col][row >= ph1.split_row];
if (i > 0) RAW(row,col) = i;
#else
RAW(row,col) = pixel[col] << 2;
#endif
}
}
#ifdef LIBRAW_LIBRARY_BUILD
} catch(...) {
free (pixel);
throw;
}
#endif
free (pixel);
maximum = 0xfffc - ph1.t_black;
}
| 0 |
[
"CWE-129"
] |
LibRaw
|
89d065424f09b788f443734d44857289489ca9e2
| 251,209,687,564,409,150,000,000,000,000,000,000,000 | 87 |
fixed two more problems found by fuzzer
|
int init_dumping_views(char *qdatabase MY_ATTRIBUTE((unused)))
{
return 0;
} /* init_dumping_views */
| 0 |
[
"CWE-319"
] |
mysql-server
|
0002e1380d5f8c113b6bce91f2cf3f75136fd7c7
| 22,689,030,194,706,620,000,000,000,000,000,000,000 | 4 |
BUG#25575605: SETTING --SSL-MODE=REQUIRED SENDS CREDENTIALS BEFORE VERIFYING SSL CONNECTION
MYSQL_OPT_SSL_MODE option introduced.
It is set in case of --ssl-mode=REQUIRED and permits only SSL connection.
(cherry picked from commit f91b941842d240b8a62645e507f5554e8be76aec)
|
static struct sock *ping_lookup(struct net *net, struct sk_buff *skb, u16 ident)
{
struct hlist_nulls_head *hslot = ping_hashslot(&ping_table, net, ident);
struct sock *sk = NULL;
struct inet_sock *isk;
struct hlist_nulls_node *hnode;
int dif = skb->dev->ifindex;
if (skb->protocol == htons(ETH_P_IP)) {
pr_debug("try to find: num = %d, daddr = %pI4, dif = %d\n",
(int)ident, &ip_hdr(skb)->daddr, dif);
#if IS_ENABLED(CONFIG_IPV6)
} else if (skb->protocol == htons(ETH_P_IPV6)) {
pr_debug("try to find: num = %d, daddr = %pI6c, dif = %d\n",
(int)ident, &ipv6_hdr(skb)->daddr, dif);
#endif
}
read_lock_bh(&ping_table.lock);
ping_portaddr_for_each_entry(sk, hnode, hslot) {
isk = inet_sk(sk);
pr_debug("iterate\n");
if (isk->inet_num != ident)
continue;
if (skb->protocol == htons(ETH_P_IP) &&
sk->sk_family == AF_INET) {
pr_debug("found: %p: num=%d, daddr=%pI4, dif=%d\n", sk,
(int) isk->inet_num, &isk->inet_rcv_saddr,
sk->sk_bound_dev_if);
if (isk->inet_rcv_saddr &&
isk->inet_rcv_saddr != ip_hdr(skb)->daddr)
continue;
#if IS_ENABLED(CONFIG_IPV6)
} else if (skb->protocol == htons(ETH_P_IPV6) &&
sk->sk_family == AF_INET6) {
pr_debug("found: %p: num=%d, daddr=%pI6c, dif=%d\n", sk,
(int) isk->inet_num,
&sk->sk_v6_rcv_saddr,
sk->sk_bound_dev_if);
if (!ipv6_addr_any(&sk->sk_v6_rcv_saddr) &&
!ipv6_addr_equal(&sk->sk_v6_rcv_saddr,
&ipv6_hdr(skb)->daddr))
continue;
#endif
} else {
continue;
}
if (sk->sk_bound_dev_if && sk->sk_bound_dev_if != dif)
continue;
sock_hold(sk);
goto exit;
}
sk = NULL;
exit:
read_unlock_bh(&ping_table.lock);
return sk;
}
| 0 |
[
"CWE-703"
] |
linux
|
a134f083e79fb4c3d0a925691e732c56911b4326
| 314,532,630,052,594,780,000,000,000,000,000,000,000 | 67 |
ipv4: Missing sk_nulls_node_init() in ping_unhash().
If we don't do that, then the poison value is left in the ->pprev
backlink.
This can cause crashes if we do a disconnect, followed by a connect().
Tested-by: Linus Torvalds <[email protected]>
Reported-by: Wen Xu <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
static int php_snmp_write_oid_output_format(php_snmp_object *snmp_object, zval *newval TSRMLS_DC)
{
zval ztmp;
int ret = SUCCESS;
if (Z_TYPE_P(newval) != IS_LONG) {
ztmp = *newval;
zval_copy_ctor(&ztmp);
convert_to_long(&ztmp);
newval = &ztmp;
}
switch(Z_LVAL_P(newval)) {
case NETSNMP_OID_OUTPUT_SUFFIX:
case NETSNMP_OID_OUTPUT_MODULE:
case NETSNMP_OID_OUTPUT_FULL:
case NETSNMP_OID_OUTPUT_NUMERIC:
case NETSNMP_OID_OUTPUT_UCD:
case NETSNMP_OID_OUTPUT_NONE:
snmp_object->oid_output_format = Z_LVAL_P(newval);
break;
default:
php_error_docref(NULL TSRMLS_CC, E_WARNING, "Unknown SNMP output print format '%ld'", Z_LVAL_P(newval));
ret = FAILURE;
break;
}
if (newval == &ztmp) {
zval_dtor(newval);
}
return ret;
}
| 1 |
[
"CWE-416"
] |
php-src
|
cab1c3b3708eead315e033359d07049b23b147a3
| 323,425,880,239,979,100,000,000,000,000,000,000,000 | 31 |
Fixed bug #72479 - same as #72434
|
static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
u32 val, ktime_t *abs_time, u32 bitset,
u32 __user *uaddr2)
{
struct hrtimer_sleeper timeout, *to = NULL;
struct futex_pi_state *pi_state = NULL;
struct rt_mutex_waiter rt_waiter;
struct futex_hash_bucket *hb;
union futex_key key2 = FUTEX_KEY_INIT;
struct futex_q q = futex_q_init;
int res, ret;
if (uaddr == uaddr2)
return -EINVAL;
if (!bitset)
return -EINVAL;
if (abs_time) {
to = &timeout;
hrtimer_init_on_stack(&to->timer, (flags & FLAGS_CLOCKRT) ?
CLOCK_REALTIME : CLOCK_MONOTONIC,
HRTIMER_MODE_ABS);
hrtimer_init_sleeper(to, current);
hrtimer_set_expires_range_ns(&to->timer, *abs_time,
current->timer_slack_ns);
}
/*
* The waiter is allocated on our stack, manipulated by the requeue
* code while we sleep on uaddr.
*/
rt_mutex_init_waiter(&rt_waiter);
ret = get_futex_key(uaddr2, flags & FLAGS_SHARED, &key2, VERIFY_WRITE);
if (unlikely(ret != 0))
goto out;
q.bitset = bitset;
q.rt_waiter = &rt_waiter;
q.requeue_pi_key = &key2;
/*
* Prepare to wait on uaddr. On success, increments q.key (key1) ref
* count.
*/
ret = futex_wait_setup(uaddr, val, flags, &q, &hb);
if (ret)
goto out_key2;
/*
* The check above which compares uaddrs is not sufficient for
* shared futexes. We need to compare the keys:
*/
if (match_futex(&q.key, &key2)) {
queue_unlock(hb);
ret = -EINVAL;
goto out_put_keys;
}
/* Queue the futex_q, drop the hb lock, wait for wakeup. */
futex_wait_queue_me(hb, &q, to);
spin_lock(&hb->lock);
ret = handle_early_requeue_pi_wakeup(hb, &q, &key2, to);
spin_unlock(&hb->lock);
if (ret)
goto out_put_keys;
/*
* In order for us to be here, we know our q.key == key2, and since
* we took the hb->lock above, we also know that futex_requeue() has
* completed and we no longer have to concern ourselves with a wakeup
* race with the atomic proxy lock acquisition by the requeue code. The
* futex_requeue dropped our key1 reference and incremented our key2
* reference count.
*/
/* Check if the requeue code acquired the second futex for us. */
if (!q.rt_waiter) {
/*
* Got the lock. We might not be the anticipated owner if we
* did a lock-steal - fix up the PI-state in that case.
*/
if (q.pi_state && (q.pi_state->owner != current)) {
spin_lock(q.lock_ptr);
ret = fixup_pi_state_owner(uaddr2, &q, current);
if (ret && rt_mutex_owner(&q.pi_state->pi_mutex) == current) {
pi_state = q.pi_state;
get_pi_state(pi_state);
}
/*
* Drop the reference to the pi state which
* the requeue_pi() code acquired for us.
*/
put_pi_state(q.pi_state);
spin_unlock(q.lock_ptr);
}
} else {
struct rt_mutex *pi_mutex;
/*
* We have been woken up by futex_unlock_pi(), a timeout, or a
* signal. futex_unlock_pi() will not destroy the lock_ptr nor
* the pi_state.
*/
WARN_ON(!q.pi_state);
pi_mutex = &q.pi_state->pi_mutex;
ret = rt_mutex_wait_proxy_lock(pi_mutex, to, &rt_waiter);
spin_lock(q.lock_ptr);
if (ret && !rt_mutex_cleanup_proxy_lock(pi_mutex, &rt_waiter))
ret = 0;
debug_rt_mutex_free_waiter(&rt_waiter);
/*
* Fixup the pi_state owner and possibly acquire the lock if we
* haven't already.
*/
res = fixup_owner(uaddr2, &q, !ret);
/*
* If fixup_owner() returned an error, proprogate that. If it
* acquired the lock, clear -ETIMEDOUT or -EINTR.
*/
if (res)
ret = (res < 0) ? res : 0;
/*
* If fixup_pi_state_owner() faulted and was unable to handle
* the fault, unlock the rt_mutex and return the fault to
* userspace.
*/
if (ret && rt_mutex_owner(&q.pi_state->pi_mutex) == current) {
pi_state = q.pi_state;
get_pi_state(pi_state);
}
/* Unqueue and drop the lock. */
unqueue_me_pi(&q);
}
if (pi_state) {
rt_mutex_futex_unlock(&pi_state->pi_mutex);
put_pi_state(pi_state);
}
if (ret == -EINTR) {
/*
* We've already been requeued, but cannot restart by calling
* futex_lock_pi() directly. We could restart this syscall, but
* it would detect that the user space "val" changed and return
* -EWOULDBLOCK. Save the overhead of the restart and return
* -EWOULDBLOCK directly.
*/
ret = -EWOULDBLOCK;
}
out_put_keys:
put_futex_key(&q.key);
out_key2:
put_futex_key(&key2);
out:
if (to) {
hrtimer_cancel(&to->timer);
destroy_hrtimer_on_stack(&to->timer);
}
return ret;
}
| 0 |
[
"CWE-416"
] |
linux
|
48fb6f4db940e92cfb16cd878cddd59ea6120d06
| 326,571,070,555,940,800,000,000,000,000,000,000,000 | 169 |
futex: Remove unnecessary warning from get_futex_key
Commit 65d8fc777f6d ("futex: Remove requirement for lock_page() in
get_futex_key()") removed an unnecessary lock_page() with the
side-effect that page->mapping needed to be treated very carefully.
Two defensive warnings were added in case any assumption was missed and
the first warning assumed a correct application would not alter a
mapping backing a futex key. Since merging, it has not triggered for
any unexpected case but Mark Rutland reported the following bug
triggering due to the first warning.
kernel BUG at kernel/futex.c:679!
Internal error: Oops - BUG: 0 [#1] PREEMPT SMP
Modules linked in:
CPU: 0 PID: 3695 Comm: syz-executor1 Not tainted 4.13.0-rc3-00020-g307fec773ba3 #3
Hardware name: linux,dummy-virt (DT)
task: ffff80001e271780 task.stack: ffff000010908000
PC is at get_futex_key+0x6a4/0xcf0 kernel/futex.c:679
LR is at get_futex_key+0x6a4/0xcf0 kernel/futex.c:679
pc : [<ffff00000821ac14>] lr : [<ffff00000821ac14>] pstate: 80000145
The fact that it's a bug instead of a warning was due to an unrelated
arm64 problem, but the warning itself triggered because the underlying
mapping changed.
This is an application issue but from a kernel perspective it's a
recoverable situation and the warning is unnecessary so this patch
removes the warning. The warning may potentially be triggered with the
following test program from Mark although it may be necessary to adjust
NR_FUTEX_THREADS to be a value smaller than the number of CPUs in the
system.
#include <linux/futex.h>
#include <pthread.h>
#include <stdio.h>
#include <stdlib.h>
#include <sys/mman.h>
#include <sys/syscall.h>
#include <sys/time.h>
#include <unistd.h>
#define NR_FUTEX_THREADS 16
pthread_t threads[NR_FUTEX_THREADS];
void *mem;
#define MEM_PROT (PROT_READ | PROT_WRITE)
#define MEM_SIZE 65536
static int futex_wrapper(int *uaddr, int op, int val,
const struct timespec *timeout,
int *uaddr2, int val3)
{
syscall(SYS_futex, uaddr, op, val, timeout, uaddr2, val3);
}
void *poll_futex(void *unused)
{
for (;;) {
futex_wrapper(mem, FUTEX_CMP_REQUEUE_PI, 1, NULL, mem + 4, 1);
}
}
int main(int argc, char *argv[])
{
int i;
mem = mmap(NULL, MEM_SIZE, MEM_PROT,
MAP_SHARED | MAP_ANONYMOUS, -1, 0);
printf("Mapping @ %p\n", mem);
printf("Creating futex threads...\n");
for (i = 0; i < NR_FUTEX_THREADS; i++)
pthread_create(&threads[i], NULL, poll_futex, NULL);
printf("Flipping mapping...\n");
for (;;) {
mmap(mem, MEM_SIZE, MEM_PROT,
MAP_FIXED | MAP_SHARED | MAP_ANONYMOUS, -1, 0);
}
return 0;
}
Reported-and-tested-by: Mark Rutland <[email protected]>
Signed-off-by: Mel Gorman <[email protected]>
Acked-by: Peter Zijlstra (Intel) <[email protected]>
Cc: [email protected] # 4.7+
Signed-off-by: Linus Torvalds <[email protected]>
|
dns_zone_getrequestixfr(dns_zone_t *zone) {
REQUIRE(DNS_ZONE_VALID(zone));
return (zone->requestixfr);
}
| 0 |
[
"CWE-327"
] |
bind9
|
f09352d20a9d360e50683cd1d2fc52ccedcd77a0
| 94,500,619,665,997,810,000,000,000,000,000,000,000 | 4 |
Update keyfetch_done compute_tag check
If in keyfetch_done the compute_tag fails (because for example the
algorithm is not supported), don't crash, but instead ignore the
key.
|
rsvg_state_reinit (RsvgState * state)
{
RsvgState *parent = state->parent;
rsvg_state_finalize (state);
rsvg_state_init (state);
state->parent = parent;
}
| 0 |
[
"CWE-20"
] |
librsvg
|
d1c9191949747f6dcfd207831d15dd4ba00e31f2
| 292,524,597,006,855,250,000,000,000,000,000,000,000 | 7 |
state: Store mask as reference
Instead of immediately looking up the mask, store the reference and look
it up on use.
|
_g_str_eat_spaces (const char *line)
{
if (line == NULL)
return NULL;
while ((*line == ' ') && (*line != 0))
line++;
return line;
}
| 0 |
[
"CWE-22"
] |
file-roller
|
b147281293a8307808475e102a14857055f81631
| 191,146,932,722,439,730,000,000,000,000,000,000,000 | 8 |
libarchive: sanitize filenames before extracting
|
rb_str_intern(s)
VALUE s;
{
volatile VALUE str = s;
ID id;
if (!RSTRING(str)->ptr || RSTRING(str)->len == 0) {
rb_raise(rb_eArgError, "interning empty string");
}
if (strlen(RSTRING(str)->ptr) != RSTRING(str)->len)
rb_raise(rb_eArgError, "symbol string may not contain `\\0'");
if (OBJ_TAINTED(str) && rb_safe_level() >= 1 && !rb_sym_interned_p(str)) {
rb_raise(rb_eSecurityError, "Insecure: can't intern tainted string");
}
id = rb_intern(RSTRING(str)->ptr);
return ID2SYM(id);
}
| 0 |
[
"CWE-20"
] |
ruby
|
e926ef5233cc9f1035d3d51068abe9df8b5429da
| 292,513,763,152,971,230,000,000,000,000,000,000,000 | 17 |
* random.c (rb_genrand_int32, rb_genrand_real), intern.h: Export.
* string.c (rb_str_tmp_new), intern.h: New function.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/branches/ruby_1_8@16014 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
|
static void __ip_vs_del_service(struct ip_vs_service *svc)
{
struct ip_vs_dest *dest, *nxt;
struct ip_vs_scheduler *old_sched;
/* Count only IPv4 services for old get/setsockopt interface */
if (svc->af == AF_INET)
ip_vs_num_services--;
ip_vs_kill_estimator(&svc->stats);
/* Unbind scheduler */
old_sched = svc->scheduler;
ip_vs_unbind_scheduler(svc);
if (old_sched)
ip_vs_scheduler_put(old_sched);
/* Unbind app inc */
if (svc->inc) {
ip_vs_app_inc_put(svc->inc);
svc->inc = NULL;
}
/*
* Unlink the whole destination list
*/
list_for_each_entry_safe(dest, nxt, &svc->destinations, n_list) {
__ip_vs_unlink_dest(svc, dest, 0);
__ip_vs_del_dest(dest);
}
/*
* Update the virtual service counters
*/
if (svc->port == FTPPORT)
atomic_dec(&ip_vs_ftpsvc_counter);
else if (svc->port == 0)
atomic_dec(&ip_vs_nullsvc_counter);
/*
* Free the service if nobody refers to it
*/
if (atomic_read(&svc->refcnt) == 0)
kfree(svc);
/* decrease the module use count */
ip_vs_use_count_dec();
}
| 0 |
[
"CWE-119",
"CWE-787"
] |
linux
|
04bcef2a83f40c6db24222b27a52892cba39dffb
| 225,617,719,500,187,470,000,000,000,000,000,000,000 | 48 |
ipvs: Add boundary check on ioctl arguments
The ipvs code has a nifty system for doing the size of ioctl command
copies; it defines an array with values into which it indexes the cmd
to find the right length.
Unfortunately, the ipvs code forgot to check if the cmd was in the
range that the array provides, allowing for an index outside of the
array, which then gives a "garbage" result into the length, which
then gets used for copying into a stack buffer.
Fix this by adding sanity checks on these as well as the copy size.
[ [email protected]: adjusted limit to IP_VS_SO_GET_MAX ]
Signed-off-by: Arjan van de Ven <[email protected]>
Acked-by: Julian Anastasov <[email protected]>
Signed-off-by: Simon Horman <[email protected]>
Signed-off-by: Patrick McHardy <[email protected]>
|
static unsigned int multi_tx_empty(struct sb_uart_port *port)
{
struct mp_port *mtpt = (struct mp_port *)port;
unsigned long flags;
unsigned int ret;
spin_lock_irqsave(&mtpt->port.lock, flags);
ret = serial_in(mtpt, UART_LSR) & UART_LSR_TEMT ? TIOCSER_TEMT : 0;
spin_unlock_irqrestore(&mtpt->port.lock, flags);
return ret;
}
| 0 |
[
"CWE-200"
] |
linux
|
a8b33654b1e3b0c74d4a1fed041c9aae50b3c427
| 337,194,720,329,819,500,000,000,000,000,000,000,000 | 12 |
Staging: sb105x: info leak in mp_get_count()
The icount.reserved[] array isn't initialized so it leaks stack
information to userspace.
Reported-by: Nico Golde <[email protected]>
Reported-by: Fabian Yamaguchi <[email protected]>
Signed-off-by: Dan Carpenter <[email protected]>
Cc: [email protected]
Signed-off-by: Linus Torvalds <[email protected]>
|
int cdc_ncm_bind_common(struct usbnet *dev, struct usb_interface *intf, u8 data_altsetting, int drvflags)
{
struct cdc_ncm_ctx *ctx;
struct usb_driver *driver;
u8 *buf;
int len;
int temp;
u8 iface_no;
struct usb_cdc_parsed_header hdr;
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
if (!ctx)
return -ENOMEM;
hrtimer_init(&ctx->tx_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
ctx->tx_timer.function = &cdc_ncm_tx_timer_cb;
ctx->bh.data = (unsigned long)dev;
ctx->bh.func = cdc_ncm_txpath_bh;
atomic_set(&ctx->stop, 0);
spin_lock_init(&ctx->mtx);
/* store ctx pointer in device data field */
dev->data[0] = (unsigned long)ctx;
/* only the control interface can be successfully probed */
ctx->control = intf;
/* get some pointers */
driver = driver_of(intf);
buf = intf->cur_altsetting->extra;
len = intf->cur_altsetting->extralen;
/* parse through descriptors associated with control interface */
cdc_parse_cdc_header(&hdr, intf, buf, len);
if (hdr.usb_cdc_union_desc)
ctx->data = usb_ifnum_to_if(dev->udev,
hdr.usb_cdc_union_desc->bSlaveInterface0);
ctx->ether_desc = hdr.usb_cdc_ether_desc;
ctx->func_desc = hdr.usb_cdc_ncm_desc;
ctx->mbim_desc = hdr.usb_cdc_mbim_desc;
ctx->mbim_extended_desc = hdr.usb_cdc_mbim_extended_desc;
/* some buggy devices have an IAD but no CDC Union */
if (!hdr.usb_cdc_union_desc && intf->intf_assoc && intf->intf_assoc->bInterfaceCount == 2) {
ctx->data = usb_ifnum_to_if(dev->udev, intf->cur_altsetting->desc.bInterfaceNumber + 1);
dev_dbg(&intf->dev, "CDC Union missing - got slave from IAD\n");
}
/* check if we got everything */
if (!ctx->data) {
dev_dbg(&intf->dev, "CDC Union missing and no IAD found\n");
goto error;
}
if (cdc_ncm_comm_intf_is_mbim(intf->cur_altsetting)) {
if (!ctx->mbim_desc) {
dev_dbg(&intf->dev, "MBIM functional descriptor missing\n");
goto error;
}
} else {
if (!ctx->ether_desc || !ctx->func_desc) {
dev_dbg(&intf->dev, "NCM or ECM functional descriptors missing\n");
goto error;
}
}
/* claim data interface, if different from control */
if (ctx->data != ctx->control) {
temp = usb_driver_claim_interface(driver, ctx->data, dev);
if (temp) {
dev_dbg(&intf->dev, "failed to claim data intf\n");
goto error;
}
}
iface_no = ctx->data->cur_altsetting->desc.bInterfaceNumber;
/* Reset data interface. Some devices will not reset properly
* unless they are configured first. Toggle the altsetting to
* force a reset
*/
usb_set_interface(dev->udev, iface_no, data_altsetting);
temp = usb_set_interface(dev->udev, iface_no, 0);
if (temp) {
dev_dbg(&intf->dev, "set interface failed\n");
goto error2;
}
/* initialize basic device settings */
if (cdc_ncm_init(dev))
goto error2;
/* configure data interface */
temp = usb_set_interface(dev->udev, iface_no, data_altsetting);
if (temp) {
dev_dbg(&intf->dev, "set interface failed\n");
goto error2;
}
cdc_ncm_find_endpoints(dev, ctx->data);
cdc_ncm_find_endpoints(dev, ctx->control);
if (!dev->in || !dev->out || !dev->status) {
dev_dbg(&intf->dev, "failed to collect endpoints\n");
goto error2;
}
usb_set_intfdata(ctx->data, dev);
usb_set_intfdata(ctx->control, dev);
if (ctx->ether_desc) {
temp = usbnet_get_ethernet_addr(dev, ctx->ether_desc->iMACAddress);
if (temp) {
dev_dbg(&intf->dev, "failed to get mac address\n");
goto error2;
}
dev_info(&intf->dev, "MAC-Address: %pM\n", dev->net->dev_addr);
}
/* finish setting up the device specific data */
cdc_ncm_setup(dev);
/* Device-specific flags */
ctx->drvflags = drvflags;
/* Allocate the delayed NDP if needed. */
if (ctx->drvflags & CDC_NCM_FLAG_NDP_TO_END) {
ctx->delayed_ndp16 = kzalloc(ctx->max_ndp_size, GFP_KERNEL);
if (!ctx->delayed_ndp16)
goto error2;
dev_info(&intf->dev, "NDP will be placed at end of frame for this device.");
}
/* override ethtool_ops */
dev->net->ethtool_ops = &cdc_ncm_ethtool_ops;
/* add our sysfs attrs */
dev->net->sysfs_groups[0] = &cdc_ncm_sysfs_attr_group;
/* must handle MTU changes */
dev->net->netdev_ops = &cdc_ncm_netdev_ops;
return 0;
error2:
usb_set_intfdata(ctx->control, NULL);
usb_set_intfdata(ctx->data, NULL);
if (ctx->data != ctx->control)
usb_driver_release_interface(driver, ctx->data);
error:
cdc_ncm_free((struct cdc_ncm_ctx *)dev->data[0]);
dev->data[0] = 0;
dev_info(&intf->dev, "bind() failure\n");
return -ENODEV;
}
| 0 |
[
"CWE-703"
] |
linux
|
4d06dd537f95683aba3651098ae288b7cbff8274
| 165,777,781,683,685,250,000,000,000,000,000,000,000 | 154 |
cdc_ncm: do not call usbnet_link_change from cdc_ncm_bind
usbnet_link_change will call schedule_work and should be
avoided if bind is failing. Otherwise we will end up with
scheduled work referring to a netdev which has gone away.
Instead of making the call conditional, we can just defer
it to usbnet_probe, using the driver_info flag made for
this purpose.
Fixes: 8a34b0ae8778 ("usbnet: cdc_ncm: apply usbnet_link_change")
Reported-by: Andrey Konovalov <[email protected]>
Suggested-by: Linus Torvalds <[email protected]>
Signed-off-by: Bjørn Mork <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
static int llc_ui_recvmsg(struct kiocb *iocb, struct socket *sock,
struct msghdr *msg, size_t len, int flags)
{
struct sockaddr_llc *uaddr = (struct sockaddr_llc *)msg->msg_name;
const int nonblock = flags & MSG_DONTWAIT;
struct sk_buff *skb = NULL;
struct sock *sk = sock->sk;
struct llc_sock *llc = llc_sk(sk);
size_t copied = 0;
u32 peek_seq = 0;
u32 *seq;
unsigned long used;
int target; /* Read at least this many bytes */
long timeo;
lock_sock(sk);
copied = -ENOTCONN;
if (unlikely(sk->sk_type == SOCK_STREAM && sk->sk_state == TCP_LISTEN))
goto out;
timeo = sock_rcvtimeo(sk, nonblock);
seq = &llc->copied_seq;
if (flags & MSG_PEEK) {
peek_seq = llc->copied_seq;
seq = &peek_seq;
}
target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
copied = 0;
do {
u32 offset;
/*
* We need to check signals first, to get correct SIGURG
* handling. FIXME: Need to check this doesn't impact 1003.1g
* and move it down to the bottom of the loop
*/
if (signal_pending(current)) {
if (copied)
break;
copied = timeo ? sock_intr_errno(timeo) : -EAGAIN;
break;
}
/* Next get a buffer. */
skb = skb_peek(&sk->sk_receive_queue);
if (skb) {
offset = *seq;
goto found_ok_skb;
}
/* Well, if we have backlog, try to process it now yet. */
if (copied >= target && !sk->sk_backlog.tail)
break;
if (copied) {
if (sk->sk_err ||
sk->sk_state == TCP_CLOSE ||
(sk->sk_shutdown & RCV_SHUTDOWN) ||
!timeo ||
(flags & MSG_PEEK))
break;
} else {
if (sock_flag(sk, SOCK_DONE))
break;
if (sk->sk_err) {
copied = sock_error(sk);
break;
}
if (sk->sk_shutdown & RCV_SHUTDOWN)
break;
if (sk->sk_type == SOCK_STREAM && sk->sk_state == TCP_CLOSE) {
if (!sock_flag(sk, SOCK_DONE)) {
/*
* This occurs when user tries to read
* from never connected socket.
*/
copied = -ENOTCONN;
break;
}
break;
}
if (!timeo) {
copied = -EAGAIN;
break;
}
}
if (copied >= target) { /* Do not sleep, just process backlog. */
release_sock(sk);
lock_sock(sk);
} else
sk_wait_data(sk, &timeo);
if ((flags & MSG_PEEK) && peek_seq != llc->copied_seq) {
if (net_ratelimit())
printk(KERN_DEBUG "LLC(%s:%d): Application "
"bug, race in MSG_PEEK.\n",
current->comm, task_pid_nr(current));
peek_seq = llc->copied_seq;
}
continue;
found_ok_skb:
/* Ok so how much can we use? */
used = skb->len - offset;
if (len < used)
used = len;
if (!(flags & MSG_TRUNC)) {
int rc = skb_copy_datagram_iovec(skb, offset,
msg->msg_iov, used);
if (rc) {
/* Exception. Bailout! */
if (!copied)
copied = -EFAULT;
break;
}
}
*seq += used;
copied += used;
len -= used;
if (!(flags & MSG_PEEK)) {
sk_eat_skb(sk, skb, 0);
*seq = 0;
}
/* For non stream protcols we get one packet per recvmsg call */
if (sk->sk_type != SOCK_STREAM)
goto copy_uaddr;
/* Partial read */
if (used + offset < skb->len)
continue;
} while (len > 0);
out:
release_sock(sk);
return copied;
copy_uaddr:
if (uaddr != NULL && skb != NULL) {
memcpy(uaddr, llc_ui_skb_cb(skb), sizeof(*uaddr));
msg->msg_namelen = sizeof(*uaddr);
}
goto out;
}
| 0 |
[
"CWE-200"
] |
linux-2.6
|
28e9fc592cb8c7a43e4d3147b38be6032a0e81bc
| 130,562,314,383,448,920,000,000,000,000,000,000,000 | 152 |
NET: llc, zero sockaddr_llc struct
sllc_arphrd member of sockaddr_llc might not be changed. Zero sllc
before copying to the above layer's structure.
Signed-off-by: Jiri Slaby <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
SPL_METHOD(SplObjectStorage, key)
{
spl_SplObjectStorage *intern = Z_SPLOBJSTORAGE_P(getThis());
if (zend_parse_parameters_none() == FAILURE) {
return;
}
RETURN_LONG(intern->index);
} /* }}} */
| 0 |
[
"CWE-119",
"CWE-787"
] |
php-src
|
61cdd1255d5b9c8453be71aacbbf682796ac77d4
| 263,226,830,115,737,960,000,000,000,000,000,000,000 | 10 |
Fix bug #73257 and bug #73258 - SplObjectStorage unserialize allows use of non-object as key
|
void HTTPSession::onError(HTTPCodec::StreamID streamID,
const HTTPException& error, bool newTxn) {
DestructorGuard dg(this);
// The codec detected an error in the ingress stream, possibly bad
// syntax, a truncated message, or bad semantics in the frame. If reads
// are paused, queue up the event; otherwise, process it now.
VLOG(4) << "Error on " << *this << ", streamID=" << streamID
<< ", " << error;
if (ingressError_) {
return;
}
if (!codec_->supportsParallelRequests()) {
// this error should only prevent us from reading/handling more errors
// on serial streams
ingressError_ = true;
setCloseReason(ConnectionCloseReason::SESSION_PARSE_ERROR);
}
if ((streamID == 0) && infoCallback_) {
infoCallback_->onIngressError(*this, kErrorMessage);
}
if (!streamID) {
ingressError_ = true;
onSessionParseError(error);
return;
}
HTTPTransaction* txn = findTransaction(streamID);
if (!txn) {
if (error.hasHttpStatusCode() && streamID != 0) {
// If the error has an HTTP code, then parsing was fine, it just was
// illegal in a higher level way
txn = createTransaction(streamID, HTTPCodec::NoStream,
HTTPCodec::NoExAttributes);
if (infoCallback_) {
infoCallback_->onRequestBegin(*this);
}
if (txn) {
handleErrorDirectly(txn, error);
}
} else if (newTxn) {
onNewTransactionParseError(streamID, error);
} else {
VLOG(4) << *this << " parse error with invalid transaction";
invalidStream(streamID);
}
return;
}
if (!txn->getHandler() &&
txn->getEgressState() == HTTPTransactionEgressSM::State::Start) {
handleErrorDirectly(txn, error);
return;
}
txn->onError(error);
if (!codec_->isReusable() && transactions_.empty()) {
VLOG(4) << *this << "shutdown from onError";
setCloseReason(ConnectionCloseReason::SESSION_PARSE_ERROR);
shutdownTransport(true, true);
}
}
| 0 |
[
"CWE-20"
] |
proxygen
|
0600ebe59c3e82cd012def77ca9ca1918da74a71
| 206,369,138,256,575,440,000,000,000,000,000,000,000 | 63 |
Check that a secondary auth manager is set before dereferencing.
Summary: CVE-2018-6343
Reviewed By: mingtaoy
Differential Revision: D12994423
fbshipit-source-id: 9229ec11da8085f1fa153595e8e5353e19d06fb7
|
int platform_device_register(struct platform_device *pdev)
{
device_initialize(&pdev->dev);
arch_setup_pdev_archdata(pdev);
return platform_device_add(pdev);
}
| 0 |
[
"CWE-362",
"CWE-284"
] |
linux
|
6265539776a0810b7ce6398c27866ddb9c6bd154
| 338,644,918,162,166,270,000,000,000,000,000,000,000 | 6 |
driver core: platform: fix race condition with driver_override
The driver_override implementation is susceptible to race condition when
different threads are reading vs storing a different driver override.
Add locking to avoid race condition.
Fixes: 3d713e0e382e ("driver core: platform: add device binding path 'driver_override'")
Cc: [email protected]
Signed-off-by: Adrian Salido <[email protected]>
Signed-off-by: Greg Kroah-Hartman <[email protected]>
|
msg_start(void)
{
int did_return = FALSE;
if (!msg_silent)
VIM_CLEAR(keep_msg);
#ifdef FEAT_EVAL
if (need_clr_eos)
{
// Halfway an ":echo" command and getting an (error) message: clear
// any text from the command.
need_clr_eos = FALSE;
msg_clr_eos();
}
#endif
if (!msg_scroll && full_screen) // overwrite last message
{
msg_row = cmdline_row;
msg_col =
#ifdef FEAT_RIGHTLEFT
cmdmsg_rl ? Columns - 1 :
#endif
0;
}
else if (msg_didout) // start message on next line
{
msg_putchar('\n');
did_return = TRUE;
if (exmode_active != EXMODE_NORMAL)
cmdline_row = msg_row;
}
if (!msg_didany || lines_left < 0)
msg_starthere();
if (msg_silent == 0)
{
msg_didout = FALSE; // no output on current line yet
cursor_off();
}
// when redirecting, may need to start a new line.
if (!did_return)
redir_write((char_u *)"\n", -1);
}
| 0 |
[
"CWE-416"
] |
vim
|
9f1a39a5d1cd7989ada2d1cb32f97d84360e050f
| 207,478,675,501,330,750,000,000,000,000,000,000,000 | 45 |
patch 8.2.4040: keeping track of allocated lines is too complicated
Problem: Keeping track of allocated lines in user functions is too
complicated.
Solution: Instead of freeing individual lines keep them all until the end.
|
static int samldb_rodc_add(struct samldb_ctx *ac)
{
struct ldb_context *ldb = ldb_module_get_ctx(ac->module);
uint32_t krbtgt_number, i_start, i;
int ret;
struct ldb_val newpass_utf16;
/* find a unused msDS-SecondaryKrbTgtNumber */
i_start = generate_random() & 0xFFFF;
if (i_start == 0) {
i_start = 1;
}
for (i=i_start; i<=0xFFFF; i++) {
if (samldb_krbtgtnumber_available(ac, i)) {
krbtgt_number = i;
goto found;
}
}
for (i=1; i<i_start; i++) {
if (samldb_krbtgtnumber_available(ac, i)) {
krbtgt_number = i;
goto found;
}
}
ldb_asprintf_errstring(ldb,
"%08X: Unable to find available msDS-SecondaryKrbTgtNumber",
W_ERROR_V(WERR_NO_SYSTEM_RESOURCES));
return LDB_ERR_OTHER;
found:
ret = ldb_msg_add_empty(ac->msg, "msDS-SecondaryKrbTgtNumber",
LDB_FLAG_INTERNAL_DISABLE_VALIDATION, NULL);
if (ret != LDB_SUCCESS) {
return ldb_operr(ldb);
}
ret = samdb_msg_add_uint(ldb, ac->msg, ac->msg,
"msDS-SecondaryKrbTgtNumber", krbtgt_number);
if (ret != LDB_SUCCESS) {
return ldb_operr(ldb);
}
ret = ldb_msg_add_fmt(ac->msg, "sAMAccountName", "krbtgt_%u",
krbtgt_number);
if (ret != LDB_SUCCESS) {
return ldb_operr(ldb);
}
newpass_utf16 = data_blob_talloc_zero(ac->module, 256);
if (newpass_utf16.data == NULL) {
return ldb_oom(ldb);
}
/*
* Note that the password_hash module will ignore
* this value and use it's own generate_secret_buffer()
* that's why we can just use generate_random_buffer()
* here.
*/
generate_random_buffer(newpass_utf16.data, newpass_utf16.length);
ret = ldb_msg_add_steal_value(ac->msg, "clearTextPassword", &newpass_utf16);
if (ret != LDB_SUCCESS) {
return ldb_operr(ldb);
}
return samldb_next_step(ac);
}
| 1 |
[
"CWE-200"
] |
samba
|
0a3aa5f908e351201dc9c4d4807b09ed9eedff77
| 202,150,202,285,615,070,000,000,000,000,000,000,000 | 68 |
CVE-2022-32746 ldb: Make use of functions for appending to an ldb_message
This aims to minimise usage of the error-prone pattern of searching for
a just-added message element in order to make modifications to it (and
potentially finding the wrong element).
BUG: https://bugzilla.samba.org/show_bug.cgi?id=15009
Signed-off-by: Joseph Sutton <[email protected]>
|
Subsets and Splits
CWE 416 & 19
The query filters records related to specific CWEs (Common Weakness Enumerations), providing a basic overview of entries with these vulnerabilities but without deeper analysis.
CWE Frequency in Train Set
Counts the occurrences of each CWE (Common Weakness Enumeration) in the dataset, providing a basic distribution but limited insight.