func
stringlengths 0
484k
| target
int64 0
1
| cwe
listlengths 0
4
| project
stringclasses 799
values | commit_id
stringlengths 40
40
| hash
float64 1,215,700,430,453,689,100,000,000B
340,281,914,521,452,260,000,000,000,000B
| size
int64 1
24k
| message
stringlengths 0
13.3k
|
---|---|---|---|---|---|---|---|
vte_sequence_handler_mk (VteTerminal *terminal, GValueArray *params)
{
terminal->pvt->screen->defaults.attr.invisible = 1;
} | 0 | []
| vte | 58bc3a942f198a1a8788553ca72c19d7c1702b74 | 170,823,869,529,485,070,000,000,000,000,000,000,000 | 4 | fix bug #548272
svn path=/trunk/; revision=2365 |
static int mlx5_ib_umem_get(struct mlx5_ib_dev *dev,
struct ib_pd *pd,
unsigned long addr, size_t size,
struct ib_umem **umem,
int *npages, int *page_shift, int *ncont,
u32 *offset)
{
int err;
*umem = ib_umem_get(pd->uobject->context, addr, size, 0, 0);
if (IS_ERR(*umem)) {
mlx5_ib_dbg(dev, "umem_get failed\n");
return PTR_ERR(*umem);
}
mlx5_ib_cont_pages(*umem, addr, 0, npages, page_shift, ncont, NULL);
err = mlx5_ib_get_buf_offset(addr, *page_shift, offset);
if (err) {
mlx5_ib_warn(dev, "bad offset\n");
goto err_umem;
}
mlx5_ib_dbg(dev, "addr 0x%lx, size %zu, npages %d, page_shift %d, ncont %d, offset %d\n",
addr, size, *npages, *page_shift, *ncont, *offset);
return 0;
err_umem:
ib_umem_release(*umem);
*umem = NULL;
return err;
} | 0 | [
"CWE-119",
"CWE-787"
]
| linux | 0625b4ba1a5d4703c7fb01c497bd6c156908af00 | 313,386,041,918,835,230,000,000,000,000,000,000,000 | 34 | IB/mlx5: Fix leaking stack memory to userspace
mlx5_ib_create_qp_resp was never initialized and only the first 4 bytes
were written.
Fixes: 41d902cb7c32 ("RDMA/mlx5: Fix definition of mlx5_ib_create_qp_resp")
Cc: <[email protected]>
Acked-by: Leon Romanovsky <[email protected]>
Signed-off-by: Jason Gunthorpe <[email protected]> |
gboolean vnc_color_map_lookup(VncColorMap *map,
guint16 idx,
guint16 *red,
guint16 *green,
guint16 *blue)
{
if (idx < map->offset || idx >= (map->size + map->offset))
return FALSE;
*red = map->colors[idx - map->offset].red;
*green = map->colors[idx - map->offset].green;
*blue = map->colors[idx - map->offset].blue;
return TRUE;
} | 0 | []
| gtk-vnc | c8583fd3783c5b811590fcb7bae4ce6e7344963e | 138,646,569,126,229,490,000,000,000,000,000,000,000 | 15 | Correctly validate color map range indexes
The color map index could wrap around to zero causing negative
array index accesses.
https://bugzilla.gnome.org/show_bug.cgi?id=778050
CVE-2017-5885
Signed-off-by: Daniel P. Berrange <[email protected]> |
static void mod_ua_on_tsx_state( pjsip_transaction *tsx, pjsip_event *e)
{
pjsip_dialog *dlg;
/* If the module id is -1, it could mean that the module has been
* destroyed.
*/
if (mod_ua.mod.id == -1)
return;
/* Get the dialog where this transaction belongs. */
dlg = (pjsip_dialog*) tsx->mod_data[mod_ua.mod.id];
/* If dialog instance has gone, it could mean that the dialog
* may has been destroyed.
*/
if (dlg == NULL)
return;
/* Hand over the event to the dialog. */
pjsip_dlg_on_tsx_state(dlg, tsx, e);
} | 0 | [
"CWE-416"
]
| pjproject | db3235953baa56d2fb0e276ca510fefca751643f | 176,931,045,361,928,100,000,000,000,000,000,000,000 | 22 | Merge pull request from GHSA-ffff-m5fm-qm62
* Update pjsip_ua_unregister_dlg():
- update the hash key if the dialog being unregistered is used as hash key.
- add an assertion check to make sure that the dlg_set to be removed is valid (can be found in the hash table).
* Change hash key string comparison method. |
static int ZEND_FASTCALL ZEND_MUL_SPEC_CONST_CV_HANDLER(ZEND_OPCODE_HANDLER_ARGS)
{
zend_op *opline = EX(opline);
mul_function(&EX_T(opline->result.u.var).tmp_var,
&opline->op1.u.constant,
_get_zval_ptr_cv(&opline->op2, EX(Ts), BP_VAR_R TSRMLS_CC) TSRMLS_CC);
ZEND_VM_NEXT_OPCODE();
} | 0 | []
| php-src | ce96fd6b0761d98353761bf78d5bfb55291179fd | 260,278,765,532,923,660,000,000,000,000,000,000,000 | 12 | - fix #39863, do not accept paths with NULL in them. See http://news.php.net/php.internals/50191, trunk will have the patch later (adding a macro and/or changing (some) APIs. Patch by Rasmus |
void setBearerToken(Http::RequestHeaderMap& headers, const std::string& token) {
headers.setInline(authorization_handle.handle(), absl::StrCat("Bearer ", token));
} | 0 | [
"CWE-416"
]
| envoy | 7ffda4e809dec74449ebc330cebb9d2f4ab61360 | 283,241,537,846,338,600,000,000,000,000,000,000,000 | 3 | oauth2: do not blindly accept requests with a token in the Authorization headera (781)
The logic was broken because it assumed an additional call would be
performed to the auth server, which isn't the case. Per the filter
documentation, a request is only considered subsequently authenticated
if there's valid cookie that was set after the access token was received
from the auth server:
https://www.envoyproxy.io/docs/envoy/latest/configuration/http/http_filters/oauth2_filter
More info about how to validate an access token (which we don't do, per
above):
https://www.oauth.com/oauth2-servers/token-introspection-endpoint/
https://datatracker.ietf.org/doc/html/rfc7662
Also fix the fact that ee shouldn't be calling continueDecoding() after
decoder_callbacks_->encodeHeaders().
Signed-off-by: Raul Gutierrez Segales <[email protected]>
Signed-off-by: Matt Klein <[email protected]>
Signed-off-by: Pradeep Rao <[email protected]> |
static struct page **__iommu_get_pages(void *cpu_addr, struct dma_attrs *attrs)
{
struct vm_struct *area;
if (__in_atomic_pool(cpu_addr, PAGE_SIZE))
return __atomic_get_pages(cpu_addr);
if (dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs))
return cpu_addr;
area = find_vm_area(cpu_addr);
if (area && (area->flags & VM_ARM_DMA_CONSISTENT))
return area->pages;
return NULL;
} | 0 | [
"CWE-284",
"CWE-264"
]
| linux | 0ea1ec713f04bdfac343c9702b21cd3a7c711826 | 49,350,434,962,507,990,000,000,000,000,000,000,000 | 15 | ARM: dma-mapping: don't allow DMA mappings to be marked executable
DMA mapping permissions were being derived from pgprot_kernel directly
without using PAGE_KERNEL. This causes them to be marked with executable
permission, which is not what we want. Fix this.
Signed-off-by: Russell King <[email protected]> |
int sqlite3KeyInfoIsWriteable(KeyInfo *p){ return p->nRef==1; } | 0 | [
"CWE-20"
]
| sqlite | e59c562b3f6894f84c715772c4b116d7b5c01348 | 51,614,606,130,873,700,000,000,000,000,000,000,000 | 1 | Fix a crash that could occur if a sub-select that uses both DISTINCT and window functions also used an ORDER BY that is the same as its select list.
FossilOrigin-Name: bcdd66c1691955c697f3d756c2b035acfe98f6aad72e90b0021bab6e9023b3ba |
static av_cold void uninit(AVFilterContext *ctx)
{
KerndeintContext *kerndeint = ctx->priv;
av_free(kerndeint->tmp_data[0]);
} | 0 | [
"CWE-119",
"CWE-787"
]
| FFmpeg | e43a0a232dbf6d3c161823c2e07c52e76227a1bc | 150,707,826,326,881,400,000,000,000,000,000,000,000 | 6 | avfilter: fix plane validity checks
Fixes out of array accesses
Signed-off-by: Michael Niedermayer <[email protected]> |
static gboolean cosine_read(wtap *wth, int *err, gchar **err_info,
gint64 *data_offset)
{
gint64 offset;
char line[COSINE_LINE_LENGTH];
/* Find the next packet */
offset = cosine_seek_next_packet(wth, err, err_info, line);
if (offset < 0)
return FALSE;
*data_offset = offset;
/* Parse the header and convert the ASCII hex dump to binary data */
return parse_cosine_packet(wth->fh, &wth->phdr, wth->frame_buffer,
line, err, err_info);
} | 0 | [
"CWE-119",
"CWE-787"
]
| wireshark | a66628e425db725df1ac52a3c573a03357060ddd | 77,673,008,875,563,600,000,000,000,000,000,000,000 | 16 | Don't treat the packet length as unsigned.
The scanf family of functions are as annoyingly bad at handling unsigned
numbers as strtoul() is - both of them are perfectly willing to accept a
value beginning with a negative sign as an unsigned value. When using
strtoul(), you can compensate for this by explicitly checking for a '-'
as the first character of the string, but you can't do that with
sscanf().
So revert to having pkt_len be signed, and scanning it with %d, but
check for a negative value and fail if we see a negative value.
Bug: 12395
Change-Id: I43b458a73b0934e9a5c2c89d34eac5a8f21a7455
Reviewed-on: https://code.wireshark.org/review/15223
Reviewed-by: Guy Harris <[email protected]> |
static inline bool tcp_packet_delayed(const struct tcp_sock *tp)
{
return !tp->retrans_stamp ||
tcp_tsopt_ecr_before(tp, tp->retrans_stamp);
} | 0 | [
"CWE-703",
"CWE-189"
]
| linux | 8b8a321ff72c785ed5e8b4cf6eda20b35d427390 | 97,550,441,759,933,260,000,000,000,000,000,000,000 | 5 | tcp: fix zero cwnd in tcp_cwnd_reduction
Patch 3759824da87b ("tcp: PRR uses CRB mode by default and SS mode
conditionally") introduced a bug that cwnd may become 0 when both
inflight and sndcnt are 0 (cwnd = inflight + sndcnt). This may lead
to a div-by-zero if the connection starts another cwnd reduction
phase by setting tp->prior_cwnd to the current cwnd (0) in
tcp_init_cwnd_reduction().
To prevent this we skip PRR operation when nothing is acked or
sacked. Then cwnd must be positive in all cases as long as ssthresh
is positive:
1) The proportional reduction mode
inflight > ssthresh > 0
2) The reduction bound mode
a) inflight == ssthresh > 0
b) inflight < ssthresh
sndcnt > 0 since newly_acked_sacked > 0 and inflight < ssthresh
Therefore in all cases inflight and sndcnt can not both be 0.
We check invalid tp->prior_cwnd to avoid potential div0 bugs.
In reality this bug is triggered only with a sequence of less common
events. For example, the connection is terminating an ECN-triggered
cwnd reduction with an inflight 0, then it receives reordered/old
ACKs or DSACKs from prior transmission (which acks nothing). Or the
connection is in fast recovery stage that marks everything lost,
but fails to retransmit due to local issues, then receives data
packets from other end which acks nothing.
Fixes: 3759824da87b ("tcp: PRR uses CRB mode by default and SS mode conditionally")
Reported-by: Oleksandr Natalenko <[email protected]>
Signed-off-by: Yuchung Cheng <[email protected]>
Signed-off-by: Neal Cardwell <[email protected]>
Signed-off-by: Eric Dumazet <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
int saa7164_bus_setup(struct saa7164_dev *dev)
{
struct tmComResBusInfo *b = &dev->bus;
mutex_init(&b->lock);
b->Type = TYPE_BUS_PCIe;
b->m_wMaxReqSize = SAA_DEVICE_MAXREQUESTSIZE;
b->m_pdwSetRing = (u8 __iomem *)(dev->bmmio +
((u32)dev->busdesc.CommandRing));
b->m_dwSizeSetRing = SAA_DEVICE_BUFFERBLOCKSIZE;
b->m_pdwGetRing = (u8 __iomem *)(dev->bmmio +
((u32)dev->busdesc.ResponseRing));
b->m_dwSizeGetRing = SAA_DEVICE_BUFFERBLOCKSIZE;
b->m_dwSetWritePos = ((u32)dev->intfdesc.BARLocation) +
(2 * sizeof(u64));
b->m_dwSetReadPos = b->m_dwSetWritePos + (1 * sizeof(u32));
b->m_dwGetWritePos = b->m_dwSetWritePos + (2 * sizeof(u32));
b->m_dwGetReadPos = b->m_dwSetWritePos + (3 * sizeof(u32));
return 0;
} | 0 | [
"CWE-125"
]
| media-tree | 354dd3924a2e43806774953de536257548b5002c | 130,563,647,423,199,060,000,000,000,000,000,000,000 | 28 | [PATCH] saa7164: Bug - Double fetch PCIe access condition
Avoid a double fetch by reusing the values from the prior transfer.
Originally reported via https://bugzilla.kernel.org/show_bug.cgi?id=195559
Thanks to Pengfei Wang <[email protected]> for reporting.
Signed-off-by: Steven Toth <[email protected]> |
main (int argc, char **argv)
{
g_test_init (&argc, &argv, NULL);
g_test_add_func ("/errors/non_svg_element", test_non_svg_element);
g_test_add_data_func_full ("/errors/instancing_limit/323-nested-use.svg",
"323-nested-use.svg",
test_instancing_limit,
NULL);
g_test_add_data_func_full ("/errors/instancing_limit/515-pattern-billion-laughs.svg",
"515-pattern-billion-laughs.svg",
test_instancing_limit,
NULL);
return g_test_run ();
} | 1 | [
"CWE-400"
]
| librsvg | 572f95f739529b865e2717664d6fefcef9493135 | 198,787,974,589,204,080,000,000,000,000,000,000,000 | 19 | (#515) - Add a limit for the number of loaded elements
This fixes the last part of #515, an enormous SVG file with millions
of elements, which causes out-of-memory.
To avoid unbounded memory consumption, we'll set a hard limit on the
number of loaded elements. The largest legitimate file I have is a
map rendering with about 26K elements; here we set a limit of 200,000
elements for good measure.
Fixes https://gitlab.gnome.org/GNOME/librsvg/issues/515 |
static void ll_find_deltas(struct object_entry **list, unsigned list_size,
int window, int depth, unsigned *processed)
{
struct thread_params *p;
int i, ret, active_threads = 0;
init_threaded_search();
if (delta_search_threads <= 1) {
find_deltas(list, &list_size, window, depth, processed);
cleanup_threaded_search();
return;
}
if (progress > pack_to_stdout)
fprintf(stderr, "Delta compression using up to %d threads.\n",
delta_search_threads);
p = xcalloc(delta_search_threads, sizeof(*p));
/* Partition the work amongst work threads. */
for (i = 0; i < delta_search_threads; i++) {
unsigned sub_size = list_size / (delta_search_threads - i);
/* don't use too small segments or no deltas will be found */
if (sub_size < 2*window && i+1 < delta_search_threads)
sub_size = 0;
p[i].window = window;
p[i].depth = depth;
p[i].processed = processed;
p[i].working = 1;
p[i].data_ready = 0;
/* try to split chunks on "path" boundaries */
while (sub_size && sub_size < list_size &&
list[sub_size]->hash &&
list[sub_size]->hash == list[sub_size-1]->hash)
sub_size++;
p[i].list = list;
p[i].list_size = sub_size;
p[i].remaining = sub_size;
list += sub_size;
list_size -= sub_size;
}
/* Start work threads. */
for (i = 0; i < delta_search_threads; i++) {
if (!p[i].list_size)
continue;
pthread_mutex_init(&p[i].mutex, NULL);
pthread_cond_init(&p[i].cond, NULL);
ret = pthread_create(&p[i].thread, NULL,
threaded_find_deltas, &p[i]);
if (ret)
die("unable to create thread: %s", strerror(ret));
active_threads++;
}
/*
* Now let's wait for work completion. Each time a thread is done
* with its work, we steal half of the remaining work from the
* thread with the largest number of unprocessed objects and give
* it to that newly idle thread. This ensure good load balancing
* until the remaining object list segments are simply too short
* to be worth splitting anymore.
*/
while (active_threads) {
struct thread_params *target = NULL;
struct thread_params *victim = NULL;
unsigned sub_size = 0;
progress_lock();
for (;;) {
for (i = 0; !target && i < delta_search_threads; i++)
if (!p[i].working)
target = &p[i];
if (target)
break;
pthread_cond_wait(&progress_cond, &progress_mutex);
}
for (i = 0; i < delta_search_threads; i++)
if (p[i].remaining > 2*window &&
(!victim || victim->remaining < p[i].remaining))
victim = &p[i];
if (victim) {
sub_size = victim->remaining / 2;
list = victim->list + victim->list_size - sub_size;
while (sub_size && list[0]->hash &&
list[0]->hash == list[-1]->hash) {
list++;
sub_size--;
}
if (!sub_size) {
/*
* It is possible for some "paths" to have
* so many objects that no hash boundary
* might be found. Let's just steal the
* exact half in that case.
*/
sub_size = victim->remaining / 2;
list -= sub_size;
}
target->list = list;
victim->list_size -= sub_size;
victim->remaining -= sub_size;
}
target->list_size = sub_size;
target->remaining = sub_size;
target->working = 1;
progress_unlock();
pthread_mutex_lock(&target->mutex);
target->data_ready = 1;
pthread_cond_signal(&target->cond);
pthread_mutex_unlock(&target->mutex);
if (!sub_size) {
pthread_join(target->thread, NULL);
pthread_cond_destroy(&target->cond);
pthread_mutex_destroy(&target->mutex);
active_threads--;
}
}
cleanup_threaded_search();
free(p);
} | 0 | [
"CWE-119",
"CWE-787"
]
| git | de1e67d0703894cb6ea782e36abb63976ab07e60 | 332,072,012,814,104,720,000,000,000,000,000,000,000 | 128 | list-objects: pass full pathname to callbacks
When we find a blob at "a/b/c", we currently pass this to
our show_object_fn callbacks as two components: "a/b/" and
"c". Callbacks which want the full value then call
path_name(), which concatenates the two. But this is an
inefficient interface; the path is a strbuf, and we could
simply append "c" to it temporarily, then roll back the
length, without creating a new copy.
So we could improve this by teaching the callsites of
path_name() this trick (and there are only 3). But we can
also notice that no callback actually cares about the
broken-down representation, and simply pass each callback
the full path "a/b/c" as a string. The callback code becomes
even simpler, then, as we do not have to worry about freeing
an allocated buffer, nor rolling back our modification to
the strbuf.
This is theoretically less efficient, as some callbacks
would not bother to format the final path component. But in
practice this is not measurable. Since we use the same
strbuf over and over, our work to grow it is amortized, and
we really only pay to memcpy a few bytes.
Signed-off-by: Jeff King <[email protected]>
Signed-off-by: Junio C Hamano <[email protected]> |
TEST_F(Http1ClientConnectionImplTest, ManyResponseHeadersAccepted) {
max_response_headers_count_ = 152;
initialize();
NiceMock<MockResponseDecoder> response_decoder;
Http::RequestEncoder& request_encoder = codec_->newStream(response_decoder);
TestRequestHeaderMapImpl headers{{":method", "GET"}, {":path", "/"}, {":authority", "host"}};
request_encoder.encodeHeaders(headers, true);
Buffer::OwnedImpl buffer("HTTP/1.1 200 OK\r\nContent-Length: 0\r\n");
auto status = codec_->dispatch(buffer);
// Response already contains one header.
buffer = Buffer::OwnedImpl(createHeaderFragment(150) + "\r\n");
status = codec_->dispatch(buffer);
} | 0 | [
"CWE-770"
]
| envoy | 7ca28ff7d46454ae930e193d97b7d08156b1ba59 | 178,388,229,437,734,180,000,000,000,000,000,000,000 | 16 | [http1] Include request URL in request header size computation, and reject partial headers that exceed configured limits (#145)
Signed-off-by: antonio <[email protected]> |
int cli_checklimits(const char *who, cli_ctx *ctx, unsigned long need1, unsigned long need2, unsigned long need3) {
int ret = CL_SUCCESS;
unsigned long needed;
/* if called without limits, go on, unpack, scan */
if(!ctx) return CL_CLEAN;
needed = (need1>need2)?need1:need2;
needed = (needed>need3)?needed:need3;
/* if we have global scan limits */
if(needed && ctx->engine->maxscansize) {
/* if the remaining scansize is too small... */
if(ctx->engine->maxscansize-ctx->scansize<needed) {
/* ... we tell the caller to skip this file */
cli_dbgmsg("%s: scansize exceeded (initial: %lu, consumed: %lu, needed: %lu)\n", who, (unsigned long int) ctx->engine->maxscansize, (unsigned long int) ctx->scansize, needed);
ret = CL_EMAXSIZE;
}
}
/* if we have per-file size limits, and we are overlimit... */
if(needed && ctx->engine->maxfilesize && ctx->engine->maxfilesize<needed) {
/* ... we tell the caller to skip this file */
cli_dbgmsg("%s: filesize exceeded (allowed: %lu, needed: %lu)\n", who, (unsigned long int) ctx->engine->maxfilesize, needed);
ret = CL_EMAXSIZE;
}
if(ctx->engine->maxfiles && ctx->scannedfiles>=ctx->engine->maxfiles) {
cli_dbgmsg("%s: files limit reached (max: %u)\n", who, ctx->engine->maxfiles);
ret = CL_EMAXFILES;
}
if (ret != CL_SUCCESS)
cli_check_blockmax(ctx, ret);
return ret;
} | 0 | []
| clamav-devel | 167c0079292814ec5523d0b97a9e1b002bf8819b | 82,211,428,788,113,730,000,000,000,000,000,000,000 | 37 | fix 0.99.3 false negative of virus Pdf.Exploit.CVE_2016_1046-1. |
static int h2_parse_header_table_size(char **args, int section_type, struct proxy *curpx,
struct proxy *defpx, const char *file, int line,
char **err)
{
if (too_many_args(1, args, err, NULL))
return -1;
h2_settings_header_table_size = atoi(args[1]);
if (h2_settings_header_table_size < 4096 || h2_settings_header_table_size > 65536) {
memprintf(err, "'%s' expects a numeric value between 4096 and 65536.", args[0]);
return -1;
}
return 0;
} | 0 | [
"CWE-119"
]
| haproxy | 3f0e1ec70173593f4c2b3681b26c04a4ed5fc588 | 38,622,182,375,410,800,000,000,000,000,000,000,000 | 14 | BUG/CRITICAL: h2: fix incorrect frame length check
The incoming H2 frame length was checked against the max_frame_size
setting instead of being checked against the bufsize. The max_frame_size
only applies to outgoing traffic and not to incoming one, so if a large
enough frame size is advertised in the SETTINGS frame, a wrapped frame
will be defragmented into a temporary allocated buffer where the second
fragment my overflow the heap by up to 16 kB.
It is very unlikely that this can be exploited for code execution given
that buffers are very short lived and their address not realistically
predictable in production, but the likeliness of an immediate crash is
absolutely certain.
This fix must be backported to 1.8.
Many thanks to Jordan Zebor from F5 Networks for reporting this issue
in a responsible way. |
static MOBI_RET mobi_parse_index_entry(MOBIIndx *indx, const MOBIIdxt idxt, const MOBITagx *tagx, const MOBIOrdt *ordt, MOBIBuffer *buf, const size_t curr_number) {
if (indx == NULL) {
debug_print("%s", "INDX structure not initialized\n");
return MOBI_INIT_FAILED;
}
const size_t entry_offset = indx->entries_count;
const size_t entry_length = idxt.offsets[curr_number + 1] - idxt.offsets[curr_number];
mobi_buffer_setpos(buf, idxt.offsets[curr_number]);
size_t entry_number = curr_number + entry_offset;
if (entry_number >= indx->total_entries_count) {
debug_print("Entry number beyond array: %zu\n", entry_number);
return MOBI_DATA_CORRUPT;
}
/* save original record maxlen */
const size_t buf_maxlen = buf->maxlen;
if (buf->offset + entry_length >= buf_maxlen) {
debug_print("Entry length too long: %zu\n", entry_length);
return MOBI_DATA_CORRUPT;
}
buf->maxlen = buf->offset + entry_length;
size_t label_length = mobi_buffer_get8(buf);
if (label_length > entry_length) {
debug_print("Label length too long: %zu\n", label_length);
return MOBI_DATA_CORRUPT;
}
char text[INDX_LABEL_SIZEMAX + 1];
/* FIXME: what is ORDT1 for? */
if (ordt->ordt2) {
label_length = mobi_getstring_ordt(ordt, buf, (unsigned char*) text, label_length);
} else {
label_length = mobi_indx_get_label((unsigned char*) text, buf, label_length, indx->ligt_entries_count);
if (buf->error != MOBI_SUCCESS) {
debug_print("Buffer error reading label: %d\n", buf->error);
return MOBI_DATA_CORRUPT;
}
}
indx->entries[entry_number].label = malloc(label_length + 1);
if (indx->entries[entry_number].label == NULL) {
debug_print("Memory allocation failed (%zu bytes)\n", label_length);
return MOBI_MALLOC_FAILED;
}
strncpy(indx->entries[entry_number].label, text, label_length + 1);
//debug_print("tag label[%zu]: %s\n", entry_number, indx->entries[entry_number].label);
unsigned char *control_bytes;
control_bytes = buf->data + buf->offset;
mobi_buffer_seek(buf, (int) tagx->control_byte_count);
indx->entries[entry_number].tags_count = 0;
indx->entries[entry_number].tags = NULL;
if (tagx->tags_count > 0) {
typedef struct {
uint8_t tag;
uint8_t tag_value_count;
uint32_t value_count;
uint32_t value_bytes;
} MOBIPtagx;
MOBIPtagx *ptagx = malloc(tagx->tags_count * sizeof(MOBIPtagx));
if (ptagx == NULL) {
debug_print("Memory allocation failed (%zu bytes)\n", tagx->tags_count * sizeof(MOBIPtagx));
return MOBI_MALLOC_FAILED;
}
uint32_t ptagx_count = 0;
size_t len;
size_t i = 0;
while (i < tagx->tags_count) {
if (tagx->tags[i].control_byte == 1) {
control_bytes++;
i++;
continue;
}
uint32_t value = control_bytes[0] & tagx->tags[i].bitmask;
if (value != 0) {
/* FIXME: is it safe to use MOBI_NOTSET? */
uint32_t value_count = MOBI_NOTSET;
uint32_t value_bytes = MOBI_NOTSET;
/* all bits of masked value are set */
if (value == tagx->tags[i].bitmask) {
/* more than 1 bit set */
if (mobi_bitcount(tagx->tags[i].bitmask) > 1) {
/* read value bytes from entry */
len = 0;
value_bytes = mobi_buffer_get_varlen(buf, &len);
} else {
value_count = 1;
}
} else {
uint8_t mask = tagx->tags[i].bitmask;
while ((mask & 1) == 0) {
mask >>= 1;
value >>= 1;
}
value_count = value;
}
ptagx[ptagx_count].tag = tagx->tags[i].tag;
ptagx[ptagx_count].tag_value_count = tagx->tags[i].values_count;
ptagx[ptagx_count].value_count = value_count;
ptagx[ptagx_count].value_bytes = value_bytes;
ptagx_count++;
}
i++;
}
indx->entries[entry_number].tags = malloc(tagx->tags_count * sizeof(MOBIIndexTag));
if (indx->entries[entry_number].tags == NULL) {
debug_print("Memory allocation failed (%zu bytes)\n", tagx->tags_count * sizeof(MOBIIndexTag));
free(ptagx);
return MOBI_MALLOC_FAILED;
}
i = 0;
while (i < ptagx_count) {
uint32_t tagvalues_count = 0;
/* FIXME: is it safe to use MOBI_NOTSET? */
/* value count is set */
uint32_t tagvalues[INDX_TAGVALUES_MAX];
if (ptagx[i].value_count != MOBI_NOTSET) {
size_t count = ptagx[i].value_count * ptagx[i].tag_value_count;
while (count-- && tagvalues_count < INDX_TAGVALUES_MAX) {
len = 0;
const uint32_t value_bytes = mobi_buffer_get_varlen(buf, &len);
tagvalues[tagvalues_count++] = value_bytes;
}
/* value count is not set */
} else {
/* read value_bytes bytes */
len = 0;
while (len < ptagx[i].value_bytes && tagvalues_count < INDX_TAGVALUES_MAX) {
const uint32_t value_bytes = mobi_buffer_get_varlen(buf, &len);
tagvalues[tagvalues_count++] = value_bytes;
}
}
if (tagvalues_count) {
const size_t arr_size = tagvalues_count * sizeof(*indx->entries[entry_number].tags[i].tagvalues);
indx->entries[entry_number].tags[i].tagvalues = malloc(arr_size);
if (indx->entries[entry_number].tags[i].tagvalues == NULL) {
debug_print("Memory allocation failed (%zu bytes)\n", arr_size);
free(ptagx);
return MOBI_MALLOC_FAILED;
}
memcpy(indx->entries[entry_number].tags[i].tagvalues, tagvalues, arr_size);
} else {
indx->entries[entry_number].tags[i].tagvalues = NULL;
}
indx->entries[entry_number].tags[i].tagid = ptagx[i].tag;
indx->entries[entry_number].tags[i].tagvalues_count = tagvalues_count;
indx->entries[entry_number].tags_count++;
i++;
}
free(ptagx);
}
/* restore buffer maxlen */
buf->maxlen = buf_maxlen;
return MOBI_SUCCESS;
} | 0 | [
"CWE-125"
]
| libmobi | 612562bc1ea38f1708b044e7a079c47a05b1291d | 299,380,336,118,284,700,000,000,000,000,000,000,000 | 151 | Fix: index entry label not being zero-terminated with corrupt input |
int ipc_addid(struct ipc_ids *ids, struct kern_ipc_perm *new, int size)
{
kuid_t euid;
kgid_t egid;
int id;
int next_id = ids->next_id;
if (size > IPCMNI)
size = IPCMNI;
if (ids->in_use >= size)
return -ENOSPC;
idr_preload(GFP_KERNEL);
spin_lock_init(&new->lock);
new->deleted = false;
rcu_read_lock();
spin_lock(&new->lock);
current_euid_egid(&euid, &egid);
new->cuid = new->uid = euid;
new->gid = new->cgid = egid;
id = idr_alloc(&ids->ipcs_idr, new,
(next_id < 0) ? 0 : ipcid_to_idx(next_id), 0,
GFP_NOWAIT);
idr_preload_end();
if (id < 0) {
spin_unlock(&new->lock);
rcu_read_unlock();
return id;
}
ids->in_use++;
if (next_id < 0) {
new->seq = ids->seq++;
if (ids->seq > IPCID_SEQ_MAX)
ids->seq = 0;
} else {
new->seq = ipcid_to_seqx(next_id);
ids->next_id = -1;
}
new->id = ipc_buildid(id, new->seq);
return id;
} | 0 | [
"CWE-362",
"CWE-401"
]
| linux | b9a532277938798b53178d5a66af6e2915cb27cf | 189,430,966,304,454,360,000,000,000,000,000,000,000 | 48 | Initialize msg/shm IPC objects before doing ipc_addid()
As reported by Dmitry Vyukov, we really shouldn't do ipc_addid() before
having initialized the IPC object state. Yes, we initialize the IPC
object in a locked state, but with all the lockless RCU lookup work,
that IPC object lock no longer means that the state cannot be seen.
We already did this for the IPC semaphore code (see commit e8577d1f0329:
"ipc/sem.c: fully initialize sem_array before making it visible") but we
clearly forgot about msg and shm.
Reported-by: Dmitry Vyukov <[email protected]>
Cc: Manfred Spraul <[email protected]>
Cc: Davidlohr Bueso <[email protected]>
Cc: [email protected]
Signed-off-by: Linus Torvalds <[email protected]> |
unsigned VvcUnit::extractUEGolombCode()
{
int cnt = 0;
for (; m_reader.getBits(1) == 0; cnt++)
;
if (cnt > INT_BIT)
THROW_BITSTREAM_ERR;
return (1 << cnt) - 1 + m_reader.getBits(cnt);
} | 0 | [
"CWE-22"
]
| tsMuxer | 3763dd34755a8944d903aa19578fa22cd3734165 | 21,807,689,871,349,560,000,000,000,000,000,000,000 | 9 | Fix Buffer Overflow
Fixes issue #509. |
RemoteFsDevice::RemoteFsDevice(MusicLibraryModel *m, const DeviceOptions &options, const Details &d)
: FsDevice(m, d.name, createUdi(d.name))
, mountToken(0)
, currentMountStatus(false)
, details(d)
, proc(0)
, messageSent(false)
{
opts=options;
// details.path=Utils::fixPath(details.path);
load();
mount();
icn=MonoIcon::icon(details.isLocalFile()
? FontAwesome::foldero
: constSshfsProtocol==details.url.scheme()
? FontAwesome::linux_os
: FontAwesome::windows, Utils::monoIconColor());
} | 0 | [
"CWE-20",
"CWE-22"
]
| cantata | afc4f8315d3e96574925fb530a7004cc9e6ce3d3 | 118,271,934,389,924,300,000,000,000,000,000,000,000 | 18 | Remove internal Samba shre mounting code, this had some privilege escalation issues, and is not well tested |
duint32 dwgCompressor::litLength21(duint8 *cbuf, duint8 oc, duint32 *si){
duint32 srcIndex=*si;
duint32 length = oc + 8;
if (length == 0x17) {
duint32 n = cbuf[srcIndex++];
length += n;
if (n == 0xff) {
do {
n = cbuf[srcIndex++];
n |= static_cast<duint32>(cbuf[srcIndex++] << 8);
length += n;
} while (n == 0xffff);
}
}
*si = srcIndex;
return length;
} | 1 | [
"CWE-191"
]
| libdxfrw | 6417118874333309aa10c4e59f954c3905a6e8b5 | 218,867,073,774,992,660,000,000,000,000,000,000,000 | 20 | fixed heap buffer overflow vulnerability CVE-2021-21899
as reported in TALOS-2021-1350 / CVE-2021-21899,
dwgCompressor::decompress21() could be abused with a malformed DWG file
to force heap buffer overflow and possibly lead to malicious code
execution. |
void wc_ecc_free_curve(const ecc_set_type* curve, void* heap)
{
if (curve->prime != NULL)
XFREE((void*)curve->prime, heap, DYNAMIC_TYPE_ECC_BUFFER);
if (curve->Af != NULL)
XFREE((void*)curve->Af, heap, DYNAMIC_TYPE_ECC_BUFFER);
if (curve->Bf != NULL)
XFREE((void*)curve->Bf, heap, DYNAMIC_TYPE_ECC_BUFFER);
if (curve->order != NULL)
XFREE((void*)curve->order, heap, DYNAMIC_TYPE_ECC_BUFFER);
if (curve->Gx != NULL)
XFREE((void*)curve->Gx, heap, DYNAMIC_TYPE_ECC_BUFFER);
if (curve->Gy != NULL)
XFREE((void*)curve->Gy, heap, DYNAMIC_TYPE_ECC_BUFFER);
XFREE((void*)curve, heap, DYNAMIC_TYPE_ECC_BUFFER);
(void)heap;
} | 0 | [
"CWE-200"
]
| wolfssl | 9b9568d500f31f964af26ba8d01e542e1f27e5ca | 179,796,372,770,814,200,000,000,000,000,000,000,000 | 19 | Change ECDSA signing to use blinding. |
xdr_inline_pages(struct xdr_buf *xdr, unsigned int offset,
struct page **pages, unsigned int base, unsigned int len)
{
struct kvec *head = xdr->head;
struct kvec *tail = xdr->tail;
char *buf = (char *)head->iov_base;
unsigned int buflen = head->iov_len;
head->iov_len = offset;
xdr->pages = pages;
xdr->page_base = base;
xdr->page_len = len;
tail->iov_base = buf + offset;
tail->iov_len = buflen - offset;
xdr->buflen += len;
} | 0 | [
"CWE-119",
"CWE-787"
]
| linux | 6d1c0f3d28f98ea2736128ed3e46821496dc3a8c | 132,418,434,172,468,910,000,000,000,000,000,000,000 | 18 | sunrpc: Avoid a KASAN slab-out-of-bounds bug in xdr_set_page_base()
This seems to happen fairly easily during READ_PLUS testing on NFS v4.2.
I found that we could end up accessing xdr->buf->pages[pgnr] with a pgnr
greater than the number of pages in the array. So let's just return
early if we're setting base to a point at the end of the page data and
let xdr_set_tail_base() handle setting up the buffer pointers instead.
Signed-off-by: Anna Schumaker <[email protected]>
Fixes: 8d86e373b0ef ("SUNRPC: Clean up helpers xdr_set_iov() and xdr_set_page_base()")
Signed-off-by: Trond Myklebust <[email protected]> |
static int is_next_segment_free(struct f2fs_sb_info *sbi, int type)
{
struct curseg_info *curseg = CURSEG_I(sbi, type);
unsigned int segno = curseg->segno + 1;
struct free_segmap_info *free_i = FREE_I(sbi);
if (segno < MAIN_SEGS(sbi) && segno % sbi->segs_per_sec)
return !test_bit(segno, free_i->free_segmap);
return 0;
} | 0 | [
"CWE-20"
]
| linux | 638164a2718f337ea224b747cf5977ef143166a4 | 286,923,549,525,297,540,000,000,000,000,000,000,000 | 10 | f2fs: fix potential panic during fstrim
As Ju Hyung Park reported:
"When 'fstrim' is called for manual trim, a BUG() can be triggered
randomly with this patch.
I'm seeing this issue on both x86 Desktop and arm64 Android phone.
On x86 Desktop, this was caused during Ubuntu boot-up. I have a
cronjob installed which calls 'fstrim -v /' during boot. On arm64
Android, this was caused during GC looping with 1ms gc_min_sleep_time
& gc_max_sleep_time."
Root cause of this issue is that f2fs_wait_discard_bios can only be
used by f2fs_put_super, because during put_super there must be no
other referrers, so it can ignore discard entry's reference count
when removing the entry, otherwise in other caller we will hit bug_on
in __remove_discard_cmd as there may be other issuer added reference
count in discard entry.
Thread A Thread B
- issue_discard_thread
- f2fs_ioc_fitrim
- f2fs_trim_fs
- f2fs_wait_discard_bios
- __issue_discard_cmd
- __submit_discard_cmd
- __wait_discard_cmd
- dc->ref++
- __wait_one_discard_bio
- __wait_discard_cmd
- __remove_discard_cmd
- f2fs_bug_on(sbi, dc->ref)
Fixes: 969d1b180d987c2be02de890d0fff0f66a0e80de
Reported-by: Ju Hyung Park <[email protected]>
Signed-off-by: Chao Yu <[email protected]>
Signed-off-by: Jaegeuk Kim <[email protected]> |
setup_anchor(Node* node, regex_t* reg, int state, ScanEnv* env)
{
/* allowed node types in look-behind */
#define ALLOWED_TYPE_IN_LB \
( BIT_NODE_LIST | BIT_NODE_ALT | BIT_NODE_STRING | BIT_NODE_CCLASS \
| BIT_NODE_CTYPE | BIT_NODE_ANCHOR | BIT_NODE_ENCLOSURE | BIT_NODE_QUANT \
| BIT_NODE_CALL | BIT_NODE_GIMMICK)
#define ALLOWED_ENCLOSURE_IN_LB ( 1<<ENCLOSURE_MEMORY | 1<<ENCLOSURE_OPTION )
#define ALLOWED_ENCLOSURE_IN_LB_NOT (1<<ENCLOSURE_OPTION)
#define ALLOWED_ANCHOR_IN_LB \
( ANCHOR_LOOK_BEHIND | ANCHOR_BEGIN_LINE | ANCHOR_END_LINE | ANCHOR_BEGIN_BUF \
| ANCHOR_BEGIN_POSITION | ANCHOR_WORD_BOUNDARY | ANCHOR_NO_WORD_BOUNDARY \
| ANCHOR_WORD_BEGIN | ANCHOR_WORD_END \
| ANCHOR_EXTENDED_GRAPHEME_CLUSTER_BOUNDARY \
| ANCHOR_NO_EXTENDED_GRAPHEME_CLUSTER_BOUNDARY )
#define ALLOWED_ANCHOR_IN_LB_NOT \
( ANCHOR_LOOK_BEHIND | ANCHOR_LOOK_BEHIND_NOT | ANCHOR_BEGIN_LINE \
| ANCHOR_END_LINE | ANCHOR_BEGIN_BUF | ANCHOR_BEGIN_POSITION | ANCHOR_WORD_BOUNDARY \
| ANCHOR_NO_WORD_BOUNDARY | ANCHOR_WORD_BEGIN | ANCHOR_WORD_END \
| ANCHOR_EXTENDED_GRAPHEME_CLUSTER_BOUNDARY \
| ANCHOR_NO_EXTENDED_GRAPHEME_CLUSTER_BOUNDARY )
int r;
AnchorNode* an = ANCHOR_(node);
switch (an->type) {
case ANCHOR_PREC_READ:
r = setup_tree(NODE_ANCHOR_BODY(an), reg, state, env);
break;
case ANCHOR_PREC_READ_NOT:
r = setup_tree(NODE_ANCHOR_BODY(an), reg, (state | IN_NOT), env);
break;
case ANCHOR_LOOK_BEHIND:
{
r = check_type_tree(NODE_ANCHOR_BODY(an), ALLOWED_TYPE_IN_LB,
ALLOWED_ENCLOSURE_IN_LB, ALLOWED_ANCHOR_IN_LB);
if (r < 0) return r;
if (r > 0) return ONIGERR_INVALID_LOOK_BEHIND_PATTERN;
r = setup_tree(NODE_ANCHOR_BODY(an), reg, state, env);
if (r != 0) return r;
r = setup_look_behind(node, reg, env);
}
break;
case ANCHOR_LOOK_BEHIND_NOT:
{
r = check_type_tree(NODE_ANCHOR_BODY(an), ALLOWED_TYPE_IN_LB,
ALLOWED_ENCLOSURE_IN_LB_NOT, ALLOWED_ANCHOR_IN_LB_NOT);
if (r < 0) return r;
if (r > 0) return ONIGERR_INVALID_LOOK_BEHIND_PATTERN;
r = setup_tree(NODE_ANCHOR_BODY(an), reg, (state | IN_NOT), env);
if (r != 0) return r;
r = setup_look_behind(node, reg, env);
}
break;
default:
r = 0;
break;
}
return r;
} | 0 | [
"CWE-476"
]
| oniguruma | 410f5916429e7d2920e1d4867388514f605413b8 | 119,927,984,371,492,950,000,000,000,000,000,000,000 | 67 | fix #87: Read unknown address in onig_error_code_to_str() |
dissect_kafka_produce_request_partition(tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree, int offset,
kafka_api_version_t api_version _U_)
{
proto_item *ti;
proto_tree *subtree;
guint len;
kafka_packet_values_t packet_values;
memset(&packet_values, 0, sizeof(packet_values));
subtree = proto_tree_add_subtree(tree, tvb, offset, 14, ett_kafka_partition, &ti, "Partition");
offset = dissect_kafka_partition_id_ret(tvb, pinfo, subtree, offset, &packet_values.partition_id);
len = tvb_get_ntohl(tvb, offset);
offset += 4;
if (len > 0) {
offset = dissect_kafka_message_set(tvb, pinfo, subtree, offset, len, KAFKA_MESSAGE_CODEC_NONE);
}
proto_item_append_text(ti, " (ID=%u)", packet_values.partition_id);
proto_item_set_end(ti, tvb, offset);
return offset;
} | 0 | [
"CWE-401"
]
| wireshark | f4374967bbf9c12746b8ec3cd54dddada9dd353e | 129,040,216,681,292,280,000,000,000,000,000,000,000 | 25 | Kafka: Limit our decompression size.
Don't assume that the Internet has our best interests at heart when it
gives us the size of our decompression buffer. Assign an arbitrary limit
of 50 MB.
This fixes #16739 in that it takes care of
** (process:17681): WARNING **: 20:03:07.440: Dissector bug, protocol Kafka, in packet 31: ../epan/proto.c:7043: failed assertion "end >= fi->start"
which is different from the original error output. It looks like *that*
might have taken care of in one of the other recent Kafka bug fixes.
The decompression routines return a success or failure status. Use
gbooleans instead of ints for that. |
RegexMatchExpression::~RegexMatchExpression() {} | 0 | [
"CWE-190"
]
| mongo | 21d8699ed6c517b45e1613e20231cd8eba894985 | 337,976,867,677,517,640,000,000,000,000,000,000,000 | 1 | SERVER-43699 $mod should not overflow for large negative values |
CheckCompoundAffixes(CMPDAffix **ptr, char *word, int len, bool CheckInPlace)
{
bool issuffix;
if (CheckInPlace)
{
while ((*ptr)->affix)
{
if (len > (*ptr)->len && strncmp((*ptr)->affix, word, (*ptr)->len) == 0)
{
len = (*ptr)->len;
issuffix = (*ptr)->issuffix;
(*ptr)++;
return (issuffix) ? len : 0;
}
(*ptr)++;
}
}
else
{
char *affbegin;
while ((*ptr)->affix)
{
if (len > (*ptr)->len && (affbegin = strstr(word, (*ptr)->affix)) != NULL)
{
len = (*ptr)->len + (affbegin - word);
issuffix = (*ptr)->issuffix;
(*ptr)++;
return (issuffix) ? len : 0;
}
(*ptr)++;
}
}
return -1;
} | 0 | [
"CWE-119"
]
| postgres | 01824385aead50e557ca1af28640460fa9877d51 | 184,504,831,485,064,360,000,000,000,000,000,000,000 | 36 | Prevent potential overruns of fixed-size buffers.
Coverity identified a number of places in which it couldn't prove that a
string being copied into a fixed-size buffer would fit. We believe that
most, perhaps all of these are in fact safe, or are copying data that is
coming from a trusted source so that any overrun is not really a security
issue. Nonetheless it seems prudent to forestall any risk by using
strlcpy() and similar functions.
Fixes by Peter Eisentraut and Jozef Mlich based on Coverity reports.
In addition, fix a potential null-pointer-dereference crash in
contrib/chkpass. The crypt(3) function is defined to return NULL on
failure, but chkpass.c didn't check for that before using the result.
The main practical case in which this could be an issue is if libc is
configured to refuse to execute unapproved hashing algorithms (e.g.,
"FIPS mode"). This ideally should've been a separate commit, but
since it touches code adjacent to one of the buffer overrun changes,
I included it in this commit to avoid last-minute merge issues.
This issue was reported by Honza Horak.
Security: CVE-2014-0065 for buffer overruns, CVE-2014-0066 for crypt() |
vte_sequence_handler_st (VteTerminal *terminal, GValueArray *params)
{
if (terminal->pvt->tabstops == NULL) {
terminal->pvt->tabstops = g_hash_table_new(NULL, NULL);
}
_vte_terminal_set_tabstop(terminal,
terminal->pvt->screen->cursor_current.col);
} | 0 | []
| vte | 58bc3a942f198a1a8788553ca72c19d7c1702b74 | 217,667,907,790,495,500,000,000,000,000,000,000,000 | 8 | fix bug #548272
svn path=/trunk/; revision=2365 |
const std::vector<fs::path>& getTestFiles() const {
return _testFiles;
} | 0 | [
"CWE-755"
]
| mongo | 75f7184eafa78006a698cda4c4adfb57f1290047 | 119,508,997,396,655,650,000,000,000,000,000,000,000 | 3 | SERVER-50170 fix max staleness read preference parameter for server selection |
callbacks_edit_object_properties_clicked (GtkButton *button, gpointer user_data){
} | 0 | [
"CWE-200"
]
| gerbv | 319a8af890e4d0a5c38e6d08f510da8eefc42537 | 129,071,679,209,343,840,000,000,000,000,000,000,000 | 2 | Remove local alias to parameter array
Normalizing access to `gerbv_simplified_amacro_t::parameter` as a step to fix CVE-2021-40402 |
static int unimac_mdio_poll(void *wait_func_data)
{
struct unimac_mdio_priv *priv = wait_func_data;
unsigned int timeout = 1000;
do {
if (!unimac_mdio_busy(priv))
return 0;
usleep_range(1000, 2000);
} while (--timeout);
if (!timeout)
return -ETIMEDOUT;
return 0;
} | 0 | [
"CWE-476"
]
| linux | 297a6961ffb8ff4dc66c9fbf53b924bd1dda05d5 | 308,662,719,833,484,100,000,000,000,000,000,000,000 | 17 | net: phy: mdio-bcm-unimac: fix potential NULL dereference in unimac_mdio_probe()
platform_get_resource() may fail and return NULL, so we should
better check it's return value to avoid a NULL pointer dereference
a bit later in the code.
This is detected by Coccinelle semantic patch.
@@
expression pdev, res, n, t, e, e1, e2;
@@
res = platform_get_resource(pdev, t, n);
+ if (!res)
+ return -EINVAL;
... when != res == NULL
e = devm_ioremap(e1, res->start, e2);
Signed-off-by: Wei Yongjun <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
Variant c_SimpleXMLElementIterator::t_next() {
if (m_iter1 == nullptr) return uninit_null();
if (m_parent->m_is_attribute) {
m_iter1->next();
return uninit_null();
}
if (m_iter2) {
m_iter2->next();
if (!m_iter2->end()) {
return uninit_null();
}
delete m_iter2; m_iter2 = nullptr;
}
m_iter1->next();
while (!m_iter1->end()) {
if (m_iter1->second().isArray()) {
m_iter2 = new ArrayIter(m_iter1->second().toArray());
break;
}
if (m_iter1->second().isObject()) {
break;
}
m_iter1->next();
}
return uninit_null();
} | 0 | [
"CWE-94"
]
| hhvm | 95f96e7287effe2fcdfb9a5338d1a7e4f55b083b | 113,426,835,653,655,920,000,000,000,000,000,000,000 | 27 | Fix libxml_disable_entity_loader()
This wasn't calling requestInit and setting the libxml handler no null.
So the first time an error came along it would reset the handler from
no-op to reading again.
This is a much better fix, we set our custom handler in requestInit and
when libxml_disable_entity_loader we store that state as a member bool
ensuring requestInit is always called to set our own handler.
If the handler isn't inserted then the behavious is as before. The only
time this could go pear shaped is say we wanted to make the default be
off. In that case we'd need a global requestInit that is always called
since there are libxml references everywhere.
Reviewed By: @jdelong
Differential Revision: D1116686 |
static void remove_pidfile(const char *pid_file) {
if (pid_file == NULL)
return;
if (unlink(pid_file) != 0) {
vperror("Could not remove the pid file %s", pid_file);
}
} | 0 | [
"CWE-189"
]
| memcached | 6695ccbc525c36d693aaa3e8337b36aa0c784424 | 10,388,221,766,743,117,000,000,000,000,000,000,000 | 9 | Fix segfault on specially crafted packet. |
static inline int rx_work_todo(struct xen_netbk *netbk)
{
return !skb_queue_empty(&netbk->rx_queue);
} | 0 | [
"CWE-399"
]
| linux | 7d5145d8eb2b9791533ffe4dc003b129b9696c48 | 63,504,267,028,494,610,000,000,000,000,000,000,000 | 4 | xen/netback: don't leak pages on failure in xen_netbk_tx_check_gop.
Signed-off-by: Matthew Daley <[email protected]>
Reviewed-by: Konrad Rzeszutek Wilk <[email protected]>
Acked-by: Ian Campbell <[email protected]>
Acked-by: Jan Beulich <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
void gnutls_deinit(gnutls_session_t session)
{
unsigned int i;
if (session == NULL)
return;
/* remove auth info firstly */
_gnutls_free_auth_info(session);
_gnutls_handshake_internal_state_clear(session);
_gnutls_handshake_io_buffer_clear(session);
_gnutls_hello_ext_priv_deinit(session);
for (i = 0; i < MAX_EPOCH_INDEX; i++)
if (session->record_parameters[i] != NULL) {
_gnutls_epoch_free(session,
session->record_parameters[i]);
session->record_parameters[i] = NULL;
}
_gnutls_buffer_clear(&session->internals.handshake_hash_buffer);
_gnutls_buffer_clear(&session->internals.post_handshake_hash_buffer);
_gnutls_buffer_clear(&session->internals.hb_remote_data);
_gnutls_buffer_clear(&session->internals.hb_local_data);
_gnutls_buffer_clear(&session->internals.record_presend_buffer);
_gnutls_buffer_clear(&session->internals.record_key_update_buffer);
_gnutls_buffer_clear(&session->internals.reauth_buffer);
_mbuffer_head_clear(&session->internals.record_buffer);
_mbuffer_head_clear(&session->internals.record_recv_buffer);
_mbuffer_head_clear(&session->internals.record_send_buffer);
_mbuffer_head_clear(&session->internals.early_data_recv_buffer);
_gnutls_buffer_clear(&session->internals.early_data_presend_buffer);
_gnutls_free_datum(&session->internals.resumption_data);
_gnutls_free_datum(&session->internals.dtls.dcookie);
for (i = 0; i < session->internals.rexts_size; i++)
gnutls_free(session->internals.rexts[i].name);
gnutls_free(session->internals.rexts);
gnutls_free(session->internals.post_handshake_cr_context.data);
gnutls_free(session->internals.rsup);
gnutls_credentials_clear(session);
_gnutls_selected_certs_deinit(session);
/* destroy any session ticket we may have received */
_gnutls13_session_ticket_unset(session);
/* we rely on priorities' internal reference counting */
gnutls_priority_deinit(session->internals.priorities);
/* overwrite any temp TLS1.3 keys */
gnutls_memset(&session->key.proto, 0, sizeof(session->key.proto));
gnutls_mutex_deinit(&session->internals.post_negotiation_lock);
gnutls_mutex_deinit(&session->internals.epoch_lock);
gnutls_free(session);
} | 0 | []
| gnutls | 3d7fae761e65e9d0f16d7247ee8a464d4fe002da | 314,989,674,159,162,950,000,000,000,000,000,000,000 | 63 | valgrind: check if session ticket key is used without initialization
This adds a valgrind client request for
session->key.session_ticket_key to make sure that it is not used
without initialization.
Signed-off-by: Daiki Ueno <[email protected]> |
int kvm_vcpu_write_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn,
const void *data, int offset, int len)
{
struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
return __kvm_write_guest_page(vcpu->kvm, slot, gfn, data, offset, len);
} | 0 | [
"CWE-119"
]
| linux | f8be156be163a052a067306417cd0ff679068c97 | 276,416,143,107,251,050,000,000,000,000,000,000,000 | 7 | KVM: do not allow mapping valid but non-reference-counted pages
It's possible to create a region which maps valid but non-refcounted
pages (e.g., tail pages of non-compound higher order allocations). These
host pages can then be returned by gfn_to_page, gfn_to_pfn, etc., family
of APIs, which take a reference to the page, which takes it from 0 to 1.
When the reference is dropped, this will free the page incorrectly.
Fix this by only taking a reference on valid pages if it was non-zero,
which indicates it is participating in normal refcounting (and can be
released with put_page).
This addresses CVE-2021-22543.
Signed-off-by: Nicholas Piggin <[email protected]>
Tested-by: Paolo Bonzini <[email protected]>
Cc: [email protected]
Signed-off-by: Paolo Bonzini <[email protected]> |
xfs_inode_to_disk(
struct xfs_inode *ip,
struct xfs_dinode *to,
xfs_lsn_t lsn)
{
struct xfs_icdinode *from = &ip->i_d;
struct inode *inode = VFS_I(ip);
to->di_magic = cpu_to_be16(XFS_DINODE_MAGIC);
to->di_onlink = 0;
to->di_version = from->di_version;
to->di_format = from->di_format;
to->di_uid = cpu_to_be32(from->di_uid);
to->di_gid = cpu_to_be32(from->di_gid);
to->di_projid_lo = cpu_to_be16(from->di_projid_lo);
to->di_projid_hi = cpu_to_be16(from->di_projid_hi);
memset(to->di_pad, 0, sizeof(to->di_pad));
to->di_atime.t_sec = cpu_to_be32(inode->i_atime.tv_sec);
to->di_atime.t_nsec = cpu_to_be32(inode->i_atime.tv_nsec);
to->di_mtime.t_sec = cpu_to_be32(inode->i_mtime.tv_sec);
to->di_mtime.t_nsec = cpu_to_be32(inode->i_mtime.tv_nsec);
to->di_ctime.t_sec = cpu_to_be32(inode->i_ctime.tv_sec);
to->di_ctime.t_nsec = cpu_to_be32(inode->i_ctime.tv_nsec);
to->di_nlink = cpu_to_be32(inode->i_nlink);
to->di_gen = cpu_to_be32(inode->i_generation);
to->di_mode = cpu_to_be16(inode->i_mode);
to->di_size = cpu_to_be64(from->di_size);
to->di_nblocks = cpu_to_be64(from->di_nblocks);
to->di_extsize = cpu_to_be32(from->di_extsize);
to->di_nextents = cpu_to_be32(from->di_nextents);
to->di_anextents = cpu_to_be16(from->di_anextents);
to->di_forkoff = from->di_forkoff;
to->di_aformat = from->di_aformat;
to->di_dmevmask = cpu_to_be32(from->di_dmevmask);
to->di_dmstate = cpu_to_be16(from->di_dmstate);
to->di_flags = cpu_to_be16(from->di_flags);
if (from->di_version == 3) {
to->di_changecount = cpu_to_be64(inode_peek_iversion(inode));
to->di_crtime.t_sec = cpu_to_be32(from->di_crtime.t_sec);
to->di_crtime.t_nsec = cpu_to_be32(from->di_crtime.t_nsec);
to->di_flags2 = cpu_to_be64(from->di_flags2);
to->di_cowextsize = cpu_to_be32(from->di_cowextsize);
to->di_ino = cpu_to_be64(ip->i_ino);
to->di_lsn = cpu_to_be64(lsn);
memset(to->di_pad2, 0, sizeof(to->di_pad2));
uuid_copy(&to->di_uuid, &ip->i_mount->m_sb.sb_meta_uuid);
to->di_flushiter = 0;
} else {
to->di_flushiter = cpu_to_be16(from->di_flushiter);
}
} | 0 | []
| linux | b42db0860e13067fcc7cbfba3966c9e652668bbc | 317,289,871,014,915,200,000,000,000,000,000,000,000 | 55 | xfs: enhance dinode verifier
Add several more validations to xfs_dinode_verify:
- For LOCAL data fork formats, di_nextents must be 0.
- For LOCAL attr fork formats, di_anextents must be 0.
- For inodes with no attr fork offset,
- format must be XFS_DINODE_FMT_EXTENTS if set at all
- di_anextents must be 0.
Thanks to dchinner for pointing out a couple related checks I had
forgotten to add.
Signed-off-by: Eric Sandeen <[email protected]>
Bugzilla: https://bugzilla.kernel.org/show_bug.cgi?id=199377
Reviewed-by: Darrick J. Wong <[email protected]>
Signed-off-by: Darrick J. Wong <[email protected]> |
int snd_usb_pipe_sanity_check(struct usb_device *dev, unsigned int pipe)
{
static const int pipetypes[4] = {
PIPE_CONTROL, PIPE_ISOCHRONOUS, PIPE_BULK, PIPE_INTERRUPT
};
struct usb_host_endpoint *ep;
ep = usb_pipe_endpoint(dev, pipe);
if (!ep || usb_pipetype(pipe) != pipetypes[usb_endpoint_type(&ep->desc)])
return -EINVAL;
return 0;
} | 0 | [
"CWE-476"
]
| linux | 5d78e1c2b7f4be00bbe62141603a631dc7812f35 | 50,798,574,985,299,720,000,000,000,000,000,000,000 | 12 | ALSA: usb-audio: Fix gpf in snd_usb_pipe_sanity_check
syzbot found the following crash on:
general protection fault: 0000 [#1] SMP KASAN
RIP: 0010:snd_usb_pipe_sanity_check+0x80/0x130 sound/usb/helper.c:75
Call Trace:
snd_usb_motu_microbookii_communicate.constprop.0+0xa0/0x2fb sound/usb/quirks.c:1007
snd_usb_motu_microbookii_boot_quirk sound/usb/quirks.c:1051 [inline]
snd_usb_apply_boot_quirk.cold+0x163/0x370 sound/usb/quirks.c:1280
usb_audio_probe+0x2ec/0x2010 sound/usb/card.c:576
usb_probe_interface+0x305/0x7a0 drivers/usb/core/driver.c:361
really_probe+0x281/0x650 drivers/base/dd.c:548
....
It was introduced in commit 801ebf1043ae for checking pipe and endpoint
types. It is fixed by adding a check of the ep pointer in question.
BugLink: https://syzkaller.appspot.com/bug?extid=d59c4387bfb6eced94e2
Reported-by: syzbot <[email protected]>
Fixes: 801ebf1043ae ("ALSA: usb-audio: Sanity checks for each pipe and EP types")
Cc: Andrey Konovalov <[email protected]>
Signed-off-by: Hillf Danton <[email protected]>
Signed-off-by: Takashi Iwai <[email protected]> |
int av_parser_parse2(AVCodecParserContext *s,
AVCodecContext *avctx,
uint8_t **poutbuf, int *poutbuf_size,
const uint8_t *buf, int buf_size,
int64_t pts, int64_t dts,
int64_t pos)
{
int index, i;
uint8_t dummy_buf[FF_INPUT_BUFFER_PADDING_SIZE];
if(!(s->flags & PARSER_FLAG_FETCHED_OFFSET)) {
s->next_frame_offset =
s->cur_offset = pos;
s->flags |= PARSER_FLAG_FETCHED_OFFSET;
}
if (buf_size == 0) {
/* padding is always necessary even if EOF, so we add it here */
memset(dummy_buf, 0, sizeof(dummy_buf));
buf = dummy_buf;
} else if (s->cur_offset + buf_size !=
s->cur_frame_end[s->cur_frame_start_index]) { /* skip remainder packets */
/* add a new packet descriptor */
i = (s->cur_frame_start_index + 1) & (AV_PARSER_PTS_NB - 1);
s->cur_frame_start_index = i;
s->cur_frame_offset[i] = s->cur_offset;
s->cur_frame_end[i] = s->cur_offset + buf_size;
s->cur_frame_pts[i] = pts;
s->cur_frame_dts[i] = dts;
s->cur_frame_pos[i] = pos;
}
if (s->fetch_timestamp){
s->fetch_timestamp=0;
s->last_pts = s->pts;
s->last_dts = s->dts;
s->last_pos = s->pos;
ff_fetch_timestamp(s, 0, 0);
}
/* WARNING: the returned index can be negative */
index = s->parser->parser_parse(s, avctx, (const uint8_t **)poutbuf, poutbuf_size, buf, buf_size);
/* update the file pointer */
if (*poutbuf_size) {
/* fill the data for the current frame */
s->frame_offset = s->next_frame_offset;
/* offset of the next frame */
s->next_frame_offset = s->cur_offset + index;
s->fetch_timestamp=1;
}
if (index < 0)
index = 0;
s->cur_offset += index;
return index;
} | 0 | [
"CWE-119",
"CWE-787"
]
| FFmpeg | f31011e9abfb2ae75bb32bc44e2c34194c8dc40a | 339,713,437,892,111,100,000,000,000,000,000,000,000 | 56 | avcodec/parser: reset indexes on realloc failure
Fixes Ticket2982
Signed-off-by: Michael Niedermayer <[email protected]> |
g_file_measure_disk_usage_async (GFile *file,
GFileMeasureFlags flags,
gint io_priority,
GCancellable *cancellable,
GFileMeasureProgressCallback progress_callback,
gpointer progress_data,
GAsyncReadyCallback callback,
gpointer user_data)
{
g_return_if_fail (G_IS_FILE (file));
g_return_if_fail (cancellable == NULL || G_IS_CANCELLABLE (cancellable));
G_FILE_GET_IFACE (file)->measure_disk_usage_async (file, flags, io_priority, cancellable,
progress_callback, progress_data,
callback, user_data);
} | 0 | [
"CWE-362"
]
| glib | d8f8f4d637ce43f8699ba94c9b7648beda0ca174 | 250,129,689,799,702,320,000,000,000,000,000,000,000 | 16 | gfile: Limit access to files when copying
file_copy_fallback creates new files with default permissions and
set the correct permissions after the operation is finished. This
might cause that the files can be accessible by more users during
the operation than expected. Use G_FILE_CREATE_PRIVATE for the new
files to limit access to those files. |
R_API RList * /*<RBinClass>*/ r_bin_get_classes(RBin *bin) {
RBinObject *o = r_bin_cur_object (bin);
return o? o->classes: NULL;
} | 0 | [
"CWE-125"
]
| radare2 | d31c4d3cbdbe01ea3ded16a584de94149ecd31d9 | 108,751,994,591,867,260,000,000,000,000,000,000,000 | 4 | Fix #8748 - Fix oobread on string search |
uint32_t vrend_renderer_object_insert(struct vrend_context *ctx, void *data,
uint32_t handle, enum virgl_object_type type)
{
return vrend_object_insert(ctx->sub->object_hash, data, handle, type);
} | 0 | [
"CWE-787"
]
| virglrenderer | 95e581fd181b213c2ed7cdc63f2abc03eaaa77ec | 205,507,759,442,427,200,000,000,000,000,000,000,000 | 5 | vrend: Add test to resource OOB write and fix it
v2: Also check that no depth != 1 has been send when none is due
Closes: #250
Signed-off-by: Gert Wollny <[email protected]>
Reviewed-by: Chia-I Wu <[email protected]> |
static struct sk_buff *isdn_ppp_decompress(struct sk_buff *skb, struct ippp_struct *is, struct ippp_struct *master,
int *proto)
{
void *stat = NULL;
struct isdn_ppp_compressor *ipc = NULL;
struct sk_buff *skb_out;
int len;
struct ippp_struct *ri;
struct isdn_ppp_resetparams rsparm;
unsigned char rsdata[IPPP_RESET_MAXDATABYTES];
if (!master) {
// per-link decompression
stat = is->link_decomp_stat;
ipc = is->link_decompressor;
ri = is;
} else {
stat = master->decomp_stat;
ipc = master->decompressor;
ri = master;
}
if (!ipc) {
// no decompressor -> we can't decompress.
printk(KERN_DEBUG "ippp: no decompressor defined!\n");
return skb;
}
BUG_ON(!stat); // if we have a compressor, stat has been set as well
if ((master && *proto == PPP_COMP) || (!master && *proto == PPP_COMPFRAG)) {
// compressed packets are compressed by their protocol type
// Set up reset params for the decompressor
memset(&rsparm, 0, sizeof(rsparm));
rsparm.data = rsdata;
rsparm.maxdlen = IPPP_RESET_MAXDATABYTES;
skb_out = dev_alloc_skb(is->mru + PPP_HDRLEN);
if (!skb_out) {
kfree_skb(skb);
printk(KERN_ERR "ippp: decomp memory allocation failure\n");
return NULL;
}
len = ipc->decompress(stat, skb, skb_out, &rsparm);
kfree_skb(skb);
if (len <= 0) {
switch (len) {
case DECOMP_ERROR:
printk(KERN_INFO "ippp: decomp wants reset %s params\n",
rsparm.valid ? "with" : "without");
isdn_ppp_ccp_reset_trans(ri, &rsparm);
break;
case DECOMP_FATALERROR:
ri->pppcfg |= SC_DC_FERROR;
/* Kick ipppd to recognize the error */
isdn_ppp_ccp_kickup(ri);
break;
}
kfree_skb(skb_out);
return NULL;
}
*proto = isdn_ppp_strip_proto(skb_out);
if (*proto < 0) {
kfree_skb(skb_out);
return NULL;
}
return skb_out;
} else {
// uncompressed packets are fed through the decompressor to
// update the decompressor state
ipc->incomp(stat, skb, *proto);
return skb;
}
} | 0 | []
| linux | 4ab42d78e37a294ac7bc56901d563c642e03c4ae | 180,418,380,441,936,200,000,000,000,000,000,000,000 | 75 | ppp, slip: Validate VJ compression slot parameters completely
Currently slhc_init() treats out-of-range values of rslots and tslots
as equivalent to 0, except that if tslots is too large it will
dereference a null pointer (CVE-2015-7799).
Add a range-check at the top of the function and make it return an
ERR_PTR() on error instead of NULL. Change the callers accordingly.
Compile-tested only.
Reported-by: 郭永刚 <[email protected]>
References: http://article.gmane.org/gmane.comp.security.oss.general/17908
Signed-off-by: Ben Hutchings <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
{
struct sock *sk = sock->sk;
struct net *net = sock_net(sk);
struct unix_sock *u = unix_sk(sk);
struct sockaddr_un *sunaddr = (struct sockaddr_un *)uaddr;
char *sun_path = sunaddr->sun_path;
struct dentry *dentry = NULL;
struct path path;
int err;
unsigned hash;
struct unix_address *addr;
struct hlist_head *list;
err = -EINVAL;
if (sunaddr->sun_family != AF_UNIX)
goto out;
if (addr_len == sizeof(short)) {
err = unix_autobind(sock);
goto out;
}
err = unix_mkname(sunaddr, addr_len, &hash);
if (err < 0)
goto out;
addr_len = err;
mutex_lock(&u->readlock);
err = -EINVAL;
if (u->addr)
goto out_up;
err = -ENOMEM;
addr = kmalloc(sizeof(*addr)+addr_len, GFP_KERNEL);
if (!addr)
goto out_up;
memcpy(addr->name, sunaddr, addr_len);
addr->len = addr_len;
addr->hash = hash ^ sk->sk_type;
atomic_set(&addr->refcnt, 1);
if (sun_path[0]) {
unsigned int mode;
err = 0;
/*
* Get the parent directory, calculate the hash for last
* component.
*/
dentry = kern_path_create(AT_FDCWD, sun_path, &path, 0);
err = PTR_ERR(dentry);
if (IS_ERR(dentry))
goto out_mknod_parent;
/*
* All right, let's create it.
*/
mode = S_IFSOCK |
(SOCK_INODE(sock)->i_mode & ~current_umask());
err = mnt_want_write(path.mnt);
if (err)
goto out_mknod_dput;
err = security_path_mknod(&path, dentry, mode, 0);
if (err)
goto out_mknod_drop_write;
err = vfs_mknod(path.dentry->d_inode, dentry, mode, 0);
out_mknod_drop_write:
mnt_drop_write(path.mnt);
if (err)
goto out_mknod_dput;
mutex_unlock(&path.dentry->d_inode->i_mutex);
dput(path.dentry);
path.dentry = dentry;
addr->hash = UNIX_HASH_SIZE;
}
spin_lock(&unix_table_lock);
if (!sun_path[0]) {
err = -EADDRINUSE;
if (__unix_find_socket_byname(net, sunaddr, addr_len,
sk->sk_type, hash)) {
unix_release_addr(addr);
goto out_unlock;
}
list = &unix_socket_table[addr->hash];
} else {
list = &unix_socket_table[dentry->d_inode->i_ino & (UNIX_HASH_SIZE-1)];
u->dentry = path.dentry;
u->mnt = path.mnt;
}
err = 0;
__unix_remove_socket(sk);
u->addr = addr;
__unix_insert_socket(list, sk);
out_unlock:
spin_unlock(&unix_table_lock);
out_up:
mutex_unlock(&u->readlock);
out:
return err;
out_mknod_dput:
dput(dentry);
mutex_unlock(&path.dentry->d_inode->i_mutex);
path_put(&path);
out_mknod_parent:
if (err == -EEXIST)
err = -EADDRINUSE;
unix_release_addr(addr);
goto out_up;
} | 0 | []
| linux-2.6 | 16e5726269611b71c930054ffe9b858c1cea88eb | 101,060,182,756,179,780,000,000,000,000,000,000,000 | 118 | af_unix: dont send SCM_CREDENTIALS by default
Since commit 7361c36c5224 (af_unix: Allow credentials to work across
user and pid namespaces) af_unix performance dropped a lot.
This is because we now take a reference on pid and cred in each write(),
and release them in read(), usually done from another process,
eventually from another cpu. This triggers false sharing.
# Events: 154K cycles
#
# Overhead Command Shared Object Symbol
# ........ ....... .................. .........................
#
10.40% hackbench [kernel.kallsyms] [k] put_pid
8.60% hackbench [kernel.kallsyms] [k] unix_stream_recvmsg
7.87% hackbench [kernel.kallsyms] [k] unix_stream_sendmsg
6.11% hackbench [kernel.kallsyms] [k] do_raw_spin_lock
4.95% hackbench [kernel.kallsyms] [k] unix_scm_to_skb
4.87% hackbench [kernel.kallsyms] [k] pid_nr_ns
4.34% hackbench [kernel.kallsyms] [k] cred_to_ucred
2.39% hackbench [kernel.kallsyms] [k] unix_destruct_scm
2.24% hackbench [kernel.kallsyms] [k] sub_preempt_count
1.75% hackbench [kernel.kallsyms] [k] fget_light
1.51% hackbench [kernel.kallsyms] [k]
__mutex_lock_interruptible_slowpath
1.42% hackbench [kernel.kallsyms] [k] sock_alloc_send_pskb
This patch includes SCM_CREDENTIALS information in a af_unix message/skb
only if requested by the sender, [man 7 unix for details how to include
ancillary data using sendmsg() system call]
Note: This might break buggy applications that expected SCM_CREDENTIAL
from an unaware write() system call, and receiver not using SO_PASSCRED
socket option.
If SOCK_PASSCRED is set on source or destination socket, we still
include credentials for mere write() syscalls.
Performance boost in hackbench : more than 50% gain on a 16 thread
machine (2 quad-core cpus, 2 threads per core)
hackbench 20 thread 2000
4.228 sec instead of 9.102 sec
Signed-off-by: Eric Dumazet <[email protected]>
Acked-by: Tim Chen <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
static void bnx2x_parity_recover(struct bnx2x *bp)
{
bool global = false;
u32 error_recovered, error_unrecovered;
bool is_parity;
DP(NETIF_MSG_HW, "Handling parity\n");
while (1) {
switch (bp->recovery_state) {
case BNX2X_RECOVERY_INIT:
DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_INIT\n");
is_parity = bnx2x_chk_parity_attn(bp, &global, false);
WARN_ON(!is_parity);
/* Try to get a LEADER_LOCK HW lock */
if (bnx2x_trylock_leader_lock(bp)) {
bnx2x_set_reset_in_progress(bp);
/*
* Check if there is a global attention and if
* there was a global attention, set the global
* reset bit.
*/
if (global)
bnx2x_set_reset_global(bp);
bp->is_leader = 1;
}
/* Stop the driver */
/* If interface has been removed - break */
if (bnx2x_nic_unload(bp, UNLOAD_RECOVERY, false))
return;
bp->recovery_state = BNX2X_RECOVERY_WAIT;
/* Ensure "is_leader", MCP command sequence and
* "recovery_state" update values are seen on other
* CPUs.
*/
smp_mb();
break;
case BNX2X_RECOVERY_WAIT:
DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_WAIT\n");
if (bp->is_leader) {
int other_engine = BP_PATH(bp) ? 0 : 1;
bool other_load_status =
bnx2x_get_load_status(bp, other_engine);
bool load_status =
bnx2x_get_load_status(bp, BP_PATH(bp));
global = bnx2x_reset_is_global(bp);
/*
* In case of a parity in a global block, let
* the first leader that performs a
* leader_reset() reset the global blocks in
* order to clear global attentions. Otherwise
* the gates will remain closed for that
* engine.
*/
if (load_status ||
(global && other_load_status)) {
/* Wait until all other functions get
* down.
*/
schedule_delayed_work(&bp->sp_rtnl_task,
HZ/10);
return;
} else {
/* If all other functions got down -
* try to bring the chip back to
* normal. In any case it's an exit
* point for a leader.
*/
if (bnx2x_leader_reset(bp)) {
bnx2x_recovery_failed(bp);
return;
}
/* If we are here, means that the
* leader has succeeded and doesn't
* want to be a leader any more. Try
* to continue as a none-leader.
*/
break;
}
} else { /* non-leader */
if (!bnx2x_reset_is_done(bp, BP_PATH(bp))) {
/* Try to get a LEADER_LOCK HW lock as
* long as a former leader may have
* been unloaded by the user or
* released a leadership by another
* reason.
*/
if (bnx2x_trylock_leader_lock(bp)) {
/* I'm a leader now! Restart a
* switch case.
*/
bp->is_leader = 1;
break;
}
schedule_delayed_work(&bp->sp_rtnl_task,
HZ/10);
return;
} else {
/*
* If there was a global attention, wait
* for it to be cleared.
*/
if (bnx2x_reset_is_global(bp)) {
schedule_delayed_work(
&bp->sp_rtnl_task,
HZ/10);
return;
}
error_recovered =
bp->eth_stats.recoverable_error;
error_unrecovered =
bp->eth_stats.unrecoverable_error;
bp->recovery_state =
BNX2X_RECOVERY_NIC_LOADING;
if (bnx2x_nic_load(bp, LOAD_NORMAL)) {
error_unrecovered++;
netdev_err(bp->dev,
"Recovery failed. Power cycle needed\n");
/* Disconnect this device */
netif_device_detach(bp->dev);
/* Shut down the power */
bnx2x_set_power_state(
bp, PCI_D3hot);
smp_mb();
} else {
bp->recovery_state =
BNX2X_RECOVERY_DONE;
error_recovered++;
smp_mb();
}
bp->eth_stats.recoverable_error =
error_recovered;
bp->eth_stats.unrecoverable_error =
error_unrecovered;
return;
}
}
default:
return;
}
}
} | 0 | [
"CWE-20"
]
| linux | 8914a595110a6eca69a5e275b323f5d09e18f4f9 | 305,783,641,704,275,170,000,000,000,000,000,000,000 | 154 | bnx2x: disable GSO where gso_size is too big for hardware
If a bnx2x card is passed a GSO packet with a gso_size larger than
~9700 bytes, it will cause a firmware error that will bring the card
down:
bnx2x: [bnx2x_attn_int_deasserted3:4323(enP24p1s0f0)]MC assert!
bnx2x: [bnx2x_mc_assert:720(enP24p1s0f0)]XSTORM_ASSERT_LIST_INDEX 0x2
bnx2x: [bnx2x_mc_assert:736(enP24p1s0f0)]XSTORM_ASSERT_INDEX 0x0 = 0x00000000 0x25e43e47 0x00463e01 0x00010052
bnx2x: [bnx2x_mc_assert:750(enP24p1s0f0)]Chip Revision: everest3, FW Version: 7_13_1
... (dump of values continues) ...
Detect when the mac length of a GSO packet is greater than the maximum
packet size (9700 bytes) and disable GSO.
Signed-off-by: Daniel Axtens <[email protected]>
Reviewed-by: Eric Dumazet <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
void Http2UpstreamIntegrationTest::manySimultaneousRequests(uint32_t request_bytes, uint32_t) {
TestRandomGenerator rand;
const uint32_t num_requests = 50;
std::vector<Http::RequestEncoder*> encoders;
std::vector<IntegrationStreamDecoderPtr> responses;
std::vector<int> response_bytes;
autonomous_upstream_ = true;
initialize();
codec_client_ = makeHttpConnection(lookupPort("http"));
for (uint32_t i = 0; i < num_requests; ++i) {
response_bytes.push_back(rand.random() % (1024 * 2));
auto headers = Http::TestRequestHeaderMapImpl{
{":method", "POST"},
{":path", "/test/long/url"},
{":scheme", "http"},
{":authority", "host"},
{AutonomousStream::RESPONSE_SIZE_BYTES, std::to_string(response_bytes[i])},
{AutonomousStream::EXPECT_REQUEST_SIZE_BYTES, std::to_string(request_bytes)}};
if (i % 2 == 0) {
headers.addCopy(AutonomousStream::RESET_AFTER_REQUEST, "yes");
}
auto encoder_decoder = codec_client_->startRequest(headers);
encoders.push_back(&encoder_decoder.first);
responses.push_back(std::move(encoder_decoder.second));
codec_client_->sendData(*encoders[i], request_bytes, true);
}
for (uint32_t i = 0; i < num_requests; ++i) {
responses[i]->waitForEndStream();
if (i % 2 != 0) {
EXPECT_TRUE(responses[i]->complete());
EXPECT_EQ("200", responses[i]->headers().getStatusValue());
EXPECT_EQ(response_bytes[i], responses[i]->body().length());
} else {
// Upstream stream reset.
EXPECT_EQ("503", responses[i]->headers().getStatusValue());
}
}
} | 1 | [
"CWE-400"
]
| envoy | 0e49a495826ea9e29134c1bd54fdeb31a034f40c | 211,585,361,828,745,030,000,000,000,000,000,000,000 | 40 | http/2: add stats and stream flush timeout (#139)
This commit adds a new stream flush timeout to guard against a
remote server that does not open window once an entire stream has
been buffered for flushing. Additional stats have also been added
to better understand the codecs view of active streams as well as
amount of data buffered.
Signed-off-by: Matt Klein <[email protected]> |
static int rtnl_valid_dump_net_req(const struct nlmsghdr *nlh, struct sock *sk,
struct rtnl_net_dump_cb *net_cb,
struct netlink_callback *cb)
{
struct netlink_ext_ack *extack = cb->extack;
struct nlattr *tb[NETNSA_MAX + 1];
int err, i;
err = nlmsg_parse_strict(nlh, sizeof(struct rtgenmsg), tb, NETNSA_MAX,
rtnl_net_policy, extack);
if (err < 0)
return err;
for (i = 0; i <= NETNSA_MAX; i++) {
if (!tb[i])
continue;
if (i == NETNSA_TARGET_NSID) {
struct net *net;
net = rtnl_get_net_ns_capable(sk, nla_get_s32(tb[i]));
if (IS_ERR(net)) {
NL_SET_BAD_ATTR(extack, tb[i]);
NL_SET_ERR_MSG(extack,
"Invalid target network namespace id");
return PTR_ERR(net);
}
net_cb->fillargs.add_ref = true;
net_cb->ref_net = net_cb->tgt_net;
net_cb->tgt_net = net;
} else {
NL_SET_BAD_ATTR(extack, tb[i]);
NL_SET_ERR_MSG(extack,
"Unsupported attribute in dump request");
return -EINVAL;
}
}
return 0;
} | 0 | [
"CWE-200",
"CWE-190",
"CWE-326"
]
| linux | 355b98553789b646ed97ad801a619ff898471b92 | 104,952,972,533,125,180,000,000,000,000,000,000,000 | 40 | netns: provide pure entropy for net_hash_mix()
net_hash_mix() currently uses kernel address of a struct net,
and is used in many places that could be used to reveal this
address to a patient attacker, thus defeating KASLR, for
the typical case (initial net namespace, &init_net is
not dynamically allocated)
I believe the original implementation tried to avoid spending
too many cycles in this function, but security comes first.
Also provide entropy regardless of CONFIG_NET_NS.
Fixes: 0b4419162aa6 ("netns: introduce the net_hash_mix "salt" for hashes")
Signed-off-by: Eric Dumazet <[email protected]>
Reported-by: Amit Klein <[email protected]>
Reported-by: Benny Pinkas <[email protected]>
Cc: Pavel Emelyanov <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
xfrm_policy_inexact_alloc_chain(struct xfrm_pol_inexact_bin *bin,
struct xfrm_policy *policy, u8 dir)
{
struct xfrm_pol_inexact_node *n;
struct net *net;
net = xp_net(policy);
lockdep_assert_held(&net->xfrm.xfrm_policy_lock);
if (xfrm_policy_inexact_insert_use_any_list(policy))
return &bin->hhead;
if (xfrm_pol_inexact_addr_use_any_list(&policy->selector.daddr,
policy->family,
policy->selector.prefixlen_d)) {
write_seqcount_begin(&bin->count);
n = xfrm_policy_inexact_insert_node(net,
&bin->root_s,
&policy->selector.saddr,
policy->family,
policy->selector.prefixlen_s,
dir);
write_seqcount_end(&bin->count);
if (!n)
return NULL;
return &n->hhead;
}
/* daddr is fixed */
write_seqcount_begin(&bin->count);
n = xfrm_policy_inexact_insert_node(net,
&bin->root_d,
&policy->selector.daddr,
policy->family,
policy->selector.prefixlen_d, dir);
write_seqcount_end(&bin->count);
if (!n)
return NULL;
/* saddr is wildcard */
if (xfrm_pol_inexact_addr_use_any_list(&policy->selector.saddr,
policy->family,
policy->selector.prefixlen_s))
return &n->hhead;
write_seqcount_begin(&bin->count);
n = xfrm_policy_inexact_insert_node(net,
&n->root,
&policy->selector.saddr,
policy->family,
policy->selector.prefixlen_s, dir);
write_seqcount_end(&bin->count);
if (!n)
return NULL;
return &n->hhead;
} | 0 | [
"CWE-703"
]
| linux | f85daf0e725358be78dfd208dea5fd665d8cb901 | 130,793,388,562,813,990,000,000,000,000,000,000,000 | 58 | xfrm: xfrm_policy: fix a possible double xfrm_pols_put() in xfrm_bundle_lookup()
xfrm_policy_lookup() will call xfrm_pol_hold_rcu() to get a refcount of
pols[0]. This refcount can be dropped in xfrm_expand_policies() when
xfrm_expand_policies() return error. pols[0]'s refcount is balanced in
here. But xfrm_bundle_lookup() will also call xfrm_pols_put() with
num_pols == 1 to drop this refcount when xfrm_expand_policies() return
error.
This patch also fix an illegal address access. pols[0] will save a error
point when xfrm_policy_lookup fails. This lead to xfrm_pols_put to resolve
an illegal address in xfrm_bundle_lookup's error path.
Fix these by setting num_pols = 0 in xfrm_expand_policies()'s error path.
Fixes: 80c802f3073e ("xfrm: cache bundles instead of policies for outgoing flows")
Signed-off-by: Hangyu Hua <[email protected]>
Signed-off-by: Steffen Klassert <[email protected]> |
void Field_string::sort_string(uchar *to,uint length)
{
#ifdef DBUG_ASSERT_EXISTS
size_t tmp=
#endif
field_charset->coll->strnxfrm(field_charset,
to, length,
char_length() *
field_charset->strxfrm_multiply,
ptr, field_length,
MY_STRXFRM_PAD_WITH_SPACE |
MY_STRXFRM_PAD_TO_MAXLEN);
DBUG_ASSERT(tmp == length);
} | 0 | [
"CWE-416",
"CWE-703"
]
| server | 08c7ab404f69d9c4ca6ca7a9cf7eec74c804f917 | 105,351,285,926,911,570,000,000,000,000,000,000,000 | 14 | MDEV-24176 Server crashes after insert in the table with virtual
column generated using date_format() and if()
vcol_info->expr is allocated on expr_arena at parsing stage. Since
expr item is allocated on expr_arena all its containee items must be
allocated on expr_arena too. Otherwise fix_session_expr() will
encounter prematurely freed item.
When table is reopened from cache vcol_info contains stale
expression. We refresh expression via TABLE::vcol_fix_exprs() but
first we must prepare a proper context (Vcol_expr_context) which meets
some requirements:
1. As noted above expr update must be done on expr_arena as there may
be new items created. It was a bug in fix_session_expr_for_read() and
was just not reproduced because of no second refix. Now refix is done
for more cases so it does reproduce. Tests affected: vcol.binlog
2. Also name resolution context must be narrowed to the single table.
Tested by: vcol.update main.default vcol.vcol_syntax gcol.gcol_bugfixes
3. sql_mode must be clean and not fail expr update.
sql_mode such as MODE_NO_BACKSLASH_ESCAPES, MODE_NO_ZERO_IN_DATE, etc
must not affect vcol expression update. If the table was created
successfully any further evaluation must not fail. Tests affected:
main.func_like
Reviewed by: Sergei Golubchik <[email protected]> |
static int rsa_priv_encode(PKCS8_PRIV_KEY_INFO *p8, const EVP_PKEY *pkey)
{
unsigned char *rk = NULL;
int rklen;
rklen = i2d_RSAPrivateKey(pkey->pkey.rsa, &rk);
if (rklen <= 0) {
RSAerr(RSA_F_RSA_PRIV_ENCODE, ERR_R_MALLOC_FAILURE);
return 0;
}
if (!PKCS8_pkey_set0(p8, OBJ_nid2obj(NID_rsaEncryption), 0,
V_ASN1_NULL, NULL, rk, rklen)) {
RSAerr(RSA_F_RSA_PRIV_ENCODE, ERR_R_MALLOC_FAILURE);
return 0;
}
return 1;
} | 0 | []
| openssl | 4b22cce3812052fe64fc3f6d58d8cc884e3cb834 | 110,784,177,939,436,730,000,000,000,000,000,000,000 | 19 | Reject invalid PSS parameters.
Fix a bug where invalid PSS parameters are not rejected resulting in a
NULL pointer exception. This can be triggered during certificate
verification so could be a DoS attack against a client or a server
enabling client authentication.
Thanks to Brian Carpenter for reporting this issues.
CVE-2015-0208
Reviewed-by: Tim Hudson <[email protected]> |
SPL_METHOD(SplDoublyLinkedList, rewind)
{
spl_dllist_object *intern = Z_SPLDLLIST_P(getThis());
if (zend_parse_parameters_none() == FAILURE) {
return;
}
spl_dllist_it_helper_rewind(&intern->traverse_pointer, &intern->traverse_position, intern->llist, intern->flags);
} | 0 | [
"CWE-415"
]
| php-src | 28a6ed9f9a36b9c517e4a8a429baf4dd382fc5d5 | 313,763,549,962,670,700,000,000,000,000,000,000,000 | 10 | Fix bug #71735: Double-free in SplDoublyLinkedList::offsetSet |
chkpass_out(PG_FUNCTION_ARGS)
{
chkpass *password = (chkpass *) PG_GETARG_POINTER(0);
char *result;
result = (char *) palloc(16);
result[0] = ':';
strcpy(result + 1, password->password);
PG_RETURN_CSTRING(result);
} | 0 | [
"CWE-119"
]
| postgres | 01824385aead50e557ca1af28640460fa9877d51 | 303,985,812,312,439,760,000,000,000,000,000,000,000 | 11 | Prevent potential overruns of fixed-size buffers.
Coverity identified a number of places in which it couldn't prove that a
string being copied into a fixed-size buffer would fit. We believe that
most, perhaps all of these are in fact safe, or are copying data that is
coming from a trusted source so that any overrun is not really a security
issue. Nonetheless it seems prudent to forestall any risk by using
strlcpy() and similar functions.
Fixes by Peter Eisentraut and Jozef Mlich based on Coverity reports.
In addition, fix a potential null-pointer-dereference crash in
contrib/chkpass. The crypt(3) function is defined to return NULL on
failure, but chkpass.c didn't check for that before using the result.
The main practical case in which this could be an issue is if libc is
configured to refuse to execute unapproved hashing algorithms (e.g.,
"FIPS mode"). This ideally should've been a separate commit, but
since it touches code adjacent to one of the buffer overrun changes,
I included it in this commit to avoid last-minute merge issues.
This issue was reported by Honza Horak.
Security: CVE-2014-0065 for buffer overruns, CVE-2014-0066 for crypt() |
static int ZEND_FASTCALL zend_binary_assign_op_obj_helper_SPEC_VAR_CONST(int (*binary_op)(zval *result, zval *op1, zval *op2 TSRMLS_DC), ZEND_OPCODE_HANDLER_ARGS)
{
zend_op *opline = EX(opline);
zend_op *op_data = opline+1;
zend_free_op free_op1, free_op_data1;
zval **object_ptr = _get_zval_ptr_ptr_var(&opline->op1, EX(Ts), &free_op1 TSRMLS_CC);
zval *object;
zval *property = &opline->op2.u.constant;
zval *value = get_zval_ptr(&op_data->op1, EX(Ts), &free_op_data1, BP_VAR_R);
znode *result = &opline->result;
int have_get_ptr = 0;
if (IS_VAR == IS_VAR && !object_ptr) {
zend_error_noreturn(E_ERROR, "Cannot use string offset as an object");
}
EX_T(result->u.var).var.ptr_ptr = NULL;
make_real_object(object_ptr TSRMLS_CC);
object = *object_ptr;
if (Z_TYPE_P(object) != IS_OBJECT) {
zend_error(E_WARNING, "Attempt to assign property of non-object");
FREE_OP(free_op_data1);
if (!RETURN_VALUE_UNUSED(result)) {
EX_T(result->u.var).var.ptr = EG(uninitialized_zval_ptr);
EX_T(result->u.var).var.ptr_ptr = NULL;
PZVAL_LOCK(EG(uninitialized_zval_ptr));
}
} else {
/* here we are sure we are dealing with an object */
if (0) {
MAKE_REAL_ZVAL_PTR(property);
}
/* here property is a string */
if (opline->extended_value == ZEND_ASSIGN_OBJ
&& Z_OBJ_HT_P(object)->get_property_ptr_ptr) {
zval **zptr = Z_OBJ_HT_P(object)->get_property_ptr_ptr(object, property TSRMLS_CC);
if (zptr != NULL) { /* NULL means no success in getting PTR */
SEPARATE_ZVAL_IF_NOT_REF(zptr);
have_get_ptr = 1;
binary_op(*zptr, *zptr, value TSRMLS_CC);
if (!RETURN_VALUE_UNUSED(result)) {
EX_T(result->u.var).var.ptr = *zptr;
EX_T(result->u.var).var.ptr_ptr = NULL;
PZVAL_LOCK(*zptr);
}
}
}
if (!have_get_ptr) {
zval *z = NULL;
if (opline->extended_value == ZEND_ASSIGN_OBJ) {
if (Z_OBJ_HT_P(object)->read_property) {
z = Z_OBJ_HT_P(object)->read_property(object, property, BP_VAR_R TSRMLS_CC);
}
} else /* if (opline->extended_value == ZEND_ASSIGN_DIM) */ {
if (Z_OBJ_HT_P(object)->read_dimension) {
z = Z_OBJ_HT_P(object)->read_dimension(object, property, BP_VAR_R TSRMLS_CC);
}
}
if (z) {
if (Z_TYPE_P(z) == IS_OBJECT && Z_OBJ_HT_P(z)->get) {
zval *value = Z_OBJ_HT_P(z)->get(z TSRMLS_CC);
if (Z_REFCOUNT_P(z) == 0) {
GC_REMOVE_ZVAL_FROM_BUFFER(z);
zval_dtor(z);
FREE_ZVAL(z);
}
z = value;
}
Z_ADDREF_P(z);
SEPARATE_ZVAL_IF_NOT_REF(&z);
binary_op(z, z, value TSRMLS_CC);
if (opline->extended_value == ZEND_ASSIGN_OBJ) {
Z_OBJ_HT_P(object)->write_property(object, property, z TSRMLS_CC);
} else /* if (opline->extended_value == ZEND_ASSIGN_DIM) */ {
Z_OBJ_HT_P(object)->write_dimension(object, property, z TSRMLS_CC);
}
if (!RETURN_VALUE_UNUSED(result)) {
EX_T(result->u.var).var.ptr = z;
EX_T(result->u.var).var.ptr_ptr = NULL;
PZVAL_LOCK(z);
}
zval_ptr_dtor(&z);
} else {
zend_error(E_WARNING, "Attempt to assign property of non-object");
if (!RETURN_VALUE_UNUSED(result)) {
EX_T(result->u.var).var.ptr = EG(uninitialized_zval_ptr);
EX_T(result->u.var).var.ptr_ptr = NULL;
PZVAL_LOCK(EG(uninitialized_zval_ptr));
}
}
}
if (0) {
zval_ptr_dtor(&property);
} else {
}
FREE_OP(free_op_data1);
}
if (free_op1.var) {zval_ptr_dtor(&free_op1.var);};
/* assign_obj has two opcodes! */
ZEND_VM_INC_OPCODE();
ZEND_VM_NEXT_OPCODE();
} | 0 | []
| php-src | ce96fd6b0761d98353761bf78d5bfb55291179fd | 244,974,789,023,373,600,000,000,000,000,000,000,000 | 113 | - fix #39863, do not accept paths with NULL in them. See http://news.php.net/php.internals/50191, trunk will have the patch later (adding a macro and/or changing (some) APIs. Patch by Rasmus |
inline void free_set_stmt_mem_root()
{
DBUG_ASSERT(!is_arena_for_set_stmt());
if (mem_root_for_set_stmt)
{
free_root(mem_root_for_set_stmt, MYF(0));
delete mem_root_for_set_stmt;
mem_root_for_set_stmt= 0;
}
} | 0 | [
"CWE-703"
]
| server | 39feab3cd31b5414aa9b428eaba915c251ac34a2 | 332,049,176,049,746,460,000,000,000,000,000,000,000 | 10 | MDEV-26412 Server crash in Item_field::fix_outer_field for INSERT SELECT
IF an INSERT/REPLACE SELECT statement contained an ON expression in the top
level select and this expression used a subquery with a column reference
that could not be resolved then an attempt to resolve this reference as
an outer reference caused a crash of the server. This happened because the
outer context field in the Name_resolution_context structure was not set
to NULL for such references. Rather it pointed to the first element in
the select_stack.
Note that starting from 10.4 we cannot use the SELECT_LEX::outer_select()
method when parsing a SELECT construct.
Approved by Oleksandr Byelkin <[email protected]> |
zdevicename(i_ctx_t *i_ctx_p)
{
os_ptr op = osp;
const char *dname;
check_read_type(*op, t_device);
if (op->value.pdevice == NULL)
/* This can happen if we invalidated devices on the stack by calling nulldevice after they were pushed */
return_error(gs_error_undefined);
dname = op->value.pdevice->dname;
make_const_string(op, avm_foreign | a_readonly, strlen(dname),
(const byte *)dname);
return 0;
} | 0 | []
| ghostpdl | 661e8d8fb8248c38d67958beda32f3a5876d0c3f | 176,319,835,489,871,900,000,000,000,000,000,000,000 | 15 | Bug 700176: check the *output* device for LockSafetyParams
When calling .setdevice we were checking if LockSafetyParams was set, and if so
throwing an invalidaccess error.
The problem is, if another device, for example the pdf14 compositor is the 'top'
device, that does not (and cannot) honour LockSafetyParams.
To solve this, we'll now use the (relatively new) gxdso_current_output_device
spec_op to retrieve the *actual* output device, and check the LockSafetyParams
flag in that. |
xmlDefaultExternalEntityLoader(const char *URL, const char *ID,
xmlParserCtxtPtr ctxt)
{
xmlParserInputPtr ret = NULL;
xmlChar *resource = NULL;
#ifdef DEBUG_EXTERNAL_ENTITIES
xmlGenericError(xmlGenericErrorContext,
"xmlDefaultExternalEntityLoader(%s, xxx)\n", URL);
#endif
if ((ctxt != NULL) && (ctxt->options & XML_PARSE_NONET)) {
int options = ctxt->options;
ctxt->options -= XML_PARSE_NONET;
ret = xmlNoNetExternalEntityLoader(URL, ID, ctxt);
ctxt->options = options;
return(ret);
}
#ifdef LIBXML_CATALOG_ENABLED
resource = xmlResolveResourceFromCatalog(URL, ID, ctxt);
#endif
if (resource == NULL)
resource = (xmlChar *) URL;
if (resource == NULL) {
if (ID == NULL)
ID = "NULL";
__xmlLoaderErr(ctxt, "failed to load external entity \"%s\"\n", ID);
return (NULL);
}
ret = xmlNewInputFromFile(ctxt, (const char *) resource);
if ((resource != NULL) && (resource != (xmlChar *) URL))
xmlFree(resource);
return (ret);
} | 0 | [
"CWE-134"
]
| libxml2 | 4472c3a5a5b516aaf59b89be602fbce52756c3e9 | 310,285,448,768,902,030,000,000,000,000,000,000,000 | 36 | Fix some format string warnings with possible format string vulnerability
For https://bugzilla.gnome.org/show_bug.cgi?id=761029
Decorate every method in libxml2 with the appropriate
LIBXML_ATTR_FORMAT(fmt,args) macro and add some cleanups
following the reports. |
TEST(FormatterTest, FormatCustom) {
Date date(2012, 12, 9);
EXPECT_THROW_MSG(fmt::format("{:s}", date), FormatError,
"unmatched '}' in format string");
} | 0 | [
"CWE-134",
"CWE-119",
"CWE-787"
]
| fmt | 8cf30aa2be256eba07bb1cefb998c52326e846e7 | 180,657,806,963,119,900,000,000,000,000,000,000,000 | 5 | Fix segfault on complex pointer formatting (#642) |
size_t olm_pk_key_length(void) {
return olm::encode_base64_length(CURVE25519_KEY_LENGTH);
} | 0 | [
"CWE-787"
]
| olm | ccc0d122ee1b4d5e5ca4ec1432086be17d5f901b | 254,981,264,088,932,300,000,000,000,000,000,000,000 | 3 | olm_pk_decrypt: Ensure inputs are of correct length. |
max_topfill(void)
{
int n;
n = plines_nofill(curwin->w_topline);
if (n >= curwin->w_height)
curwin->w_topfill = 0;
else
{
curwin->w_topfill = diff_check_fill(curwin, curwin->w_topline);
if (curwin->w_topfill + n > curwin->w_height)
curwin->w_topfill = curwin->w_height - n;
}
} | 0 | [
"CWE-122"
]
| vim | 777e7c21b7627be80961848ac560cb0a9978ff43 | 286,068,800,814,948,830,000,000,000,000,000,000,000 | 14 | patch 8.2.3564: invalid memory access when scrolling without valid screen
Problem: Invalid memory access when scrolling without a valid screen.
Solution: Do not set VALID_BOTLINE in w_valid. |
static struct socket_address *tls_socket_get_my_addr(struct socket_context *sock, TALLOC_CTX *mem_ctx)
{
struct tls_context *tls = talloc_get_type(sock->private_data, struct tls_context);
return socket_get_my_addr(tls->socket, mem_ctx);
} | 0 | []
| samba | 22af043d2f20760f27150d7d469c7c7b944c6b55 | 169,089,918,527,009,850,000,000,000,000,000,000,000 | 5 | CVE-2013-4476: s4:libtls: check for safe permissions of tls private key file (key.pem)
If the tls key is not owned by root or has not mode 0600 samba will not
start up.
Bug: https://bugzilla.samba.org/show_bug.cgi?id=10234
Pair-Programmed-With: Stefan Metzmacher <[email protected]>
Signed-off-by: Björn Baumbach <[email protected]>
Signed-off-by: Stefan Metzmacher <[email protected]>
Reviewed-by: Stefan Metzmacher <[email protected]>
Autobuild-User(master): Karolin Seeger <[email protected]>
Autobuild-Date(master): Mon Nov 11 13:07:16 CET 2013 on sn-devel-104 |
static void expire_timers(struct timer_base *base, struct hlist_head *head)
{
while (!hlist_empty(head)) {
struct timer_list *timer;
void (*fn)(unsigned long);
unsigned long data;
timer = hlist_entry(head->first, struct timer_list, entry);
timer_stats_account_timer(timer);
base->running_timer = timer;
detach_timer(timer, true);
fn = timer->function;
data = timer->data;
if (timer->flags & TIMER_IRQSAFE) {
spin_unlock(&base->lock);
call_timer_fn(timer, fn, data);
spin_lock(&base->lock);
} else {
spin_unlock_irq(&base->lock);
call_timer_fn(timer, fn, data);
spin_lock_irq(&base->lock);
}
}
} | 1 | [
"CWE-200"
]
| tip | dfb4357da6ddbdf57d583ba64361c9d792b0e0b1 | 270,531,969,593,363,830,000,000,000,000,000,000,000 | 27 | time: Remove CONFIG_TIMER_STATS
Currently CONFIG_TIMER_STATS exposes process information across namespaces:
kernel/time/timer_list.c print_timer():
SEQ_printf(m, ", %s/%d", tmp, timer->start_pid);
/proc/timer_list:
#11: <0000000000000000>, hrtimer_wakeup, S:01, do_nanosleep, cron/2570
Given that the tracer can give the same information, this patch entirely
removes CONFIG_TIMER_STATS.
Suggested-by: Thomas Gleixner <[email protected]>
Signed-off-by: Kees Cook <[email protected]>
Acked-by: John Stultz <[email protected]>
Cc: Nicolas Pitre <[email protected]>
Cc: [email protected]
Cc: Lai Jiangshan <[email protected]>
Cc: Shuah Khan <[email protected]>
Cc: Xing Gao <[email protected]>
Cc: Jonathan Corbet <[email protected]>
Cc: Jessica Frazelle <[email protected]>
Cc: [email protected]
Cc: Nicolas Iooss <[email protected]>
Cc: "Paul E. McKenney" <[email protected]>
Cc: Petr Mladek <[email protected]>
Cc: Richard Cochran <[email protected]>
Cc: Tejun Heo <[email protected]>
Cc: Michal Marek <[email protected]>
Cc: Josh Poimboeuf <[email protected]>
Cc: Dmitry Vyukov <[email protected]>
Cc: Oleg Nesterov <[email protected]>
Cc: "Eric W. Biederman" <[email protected]>
Cc: Olof Johansson <[email protected]>
Cc: Andrew Morton <[email protected]>
Cc: [email protected]
Cc: Arjan van de Ven <[email protected]>
Link: http://lkml.kernel.org/r/20170208192659.GA32582@beast
Signed-off-by: Thomas Gleixner <[email protected]> |
static int intel_vgpu_set_intx_trigger(struct intel_vgpu *vgpu,
unsigned int index, unsigned int start, unsigned int count,
uint32_t flags, void *data)
{
return 0;
} | 0 | [
"CWE-20"
]
| linux | 51b00d8509dc69c98740da2ad07308b630d3eb7d | 91,932,081,891,674,260,000,000,000,000,000,000,000 | 6 | drm/i915/gvt: Fix mmap range check
This is to fix missed mmap range check on vGPU bar2 region
and only allow to map vGPU allocated GMADDR range, which means
user space should support sparse mmap to get proper offset for
mmap vGPU aperture. And this takes care of actual pgoff in mmap
request as original code always does from beginning of vGPU
aperture.
Fixes: 659643f7d814 ("drm/i915/gvt/kvmgt: add vfio/mdev support to KVMGT")
Cc: "Monroy, Rodrigo Axel" <[email protected]>
Cc: "Orrala Contreras, Alfredo" <[email protected]>
Cc: [email protected] # v4.10+
Reviewed-by: Hang Yuan <[email protected]>
Signed-off-by: Zhenyu Wang <[email protected]> |
tsize_t t2p_write_pdf_xobject_stream_dict(ttile_t tile,
T2P* t2p,
TIFF* output){
tsize_t written=0;
char buffer[32];
int buflen=0;
written += t2p_write_pdf_stream_dict(0, t2p->pdf_xrefcount+1, output);
written += t2pWriteFile(output,
(tdata_t) "/Type /XObject \n/Subtype /Image \n/Name /Im",
42);
buflen=snprintf(buffer, sizeof(buffer), "%u", t2p->pdf_page+1);
check_snprintf_ret(t2p, buflen, buffer);
written += t2pWriteFile(output, (tdata_t) buffer, buflen);
if(tile != 0){
written += t2pWriteFile(output, (tdata_t) "_", 1);
buflen=snprintf(buffer, sizeof(buffer), "%lu", (unsigned long)tile);
check_snprintf_ret(t2p, buflen, buffer);
written += t2pWriteFile(output, (tdata_t) buffer, buflen);
}
written += t2pWriteFile(output, (tdata_t) "\n/Width ", 8);
if(tile==0){
buflen=snprintf(buffer, sizeof(buffer), "%lu", (unsigned long)t2p->tiff_width);
} else {
if(t2p_tile_is_right_edge(t2p->tiff_tiles[t2p->pdf_page], tile-1)!=0){
buflen=snprintf(buffer, sizeof(buffer), "%lu",
(unsigned long)t2p->tiff_tiles[t2p->pdf_page].tiles_edgetilewidth);
} else {
buflen=snprintf(buffer, sizeof(buffer), "%lu",
(unsigned long)t2p->tiff_tiles[t2p->pdf_page].tiles_tilewidth);
}
}
check_snprintf_ret(t2p, buflen, buffer);
written += t2pWriteFile(output, (tdata_t) buffer, buflen);
written += t2pWriteFile(output, (tdata_t) "\n/Height ", 9);
if(tile==0){
buflen=snprintf(buffer, sizeof(buffer), "%lu", (unsigned long)t2p->tiff_length);
} else {
if(t2p_tile_is_bottom_edge(t2p->tiff_tiles[t2p->pdf_page], tile-1)!=0){
buflen=snprintf(buffer, sizeof(buffer), "%lu",
(unsigned long)t2p->tiff_tiles[t2p->pdf_page].tiles_edgetilelength);
} else {
buflen=snprintf(buffer, sizeof(buffer), "%lu",
(unsigned long)t2p->tiff_tiles[t2p->pdf_page].tiles_tilelength);
}
}
check_snprintf_ret(t2p, buflen, buffer);
written += t2pWriteFile(output, (tdata_t) buffer, buflen);
written += t2pWriteFile(output, (tdata_t) "\n/BitsPerComponent ", 19);
buflen=snprintf(buffer, sizeof(buffer), "%u", t2p->tiff_bitspersample);
check_snprintf_ret(t2p, buflen, buffer);
written += t2pWriteFile(output, (tdata_t) buffer, buflen);
written += t2pWriteFile(output, (tdata_t) "\n/ColorSpace ", 13);
written += t2p_write_pdf_xobject_cs(t2p, output);
if (t2p->pdf_image_interpolate)
written += t2pWriteFile(output,
(tdata_t) "\n/Interpolate true", 18);
if( (t2p->pdf_switchdecode != 0)
#ifdef CCITT_SUPPORT
&& ! (t2p->pdf_colorspace & T2P_CS_BILEVEL
&& t2p->pdf_compression == T2P_COMPRESS_G4)
#endif
){
written += t2p_write_pdf_xobject_decode(t2p, output);
}
written += t2p_write_pdf_xobject_stream_filter(tile, t2p, output);
return(written);
} | 0 | [
"CWE-787"
]
| libtiff | 7be2e452ddcf6d7abca88f41d3761e6edab72b22 | 37,242,852,271,470,010,000,000,000,000,000,000,000 | 70 | tiff2pdf.c: properly calculate datasize when saving to JPEG YCbCr
fixes #220 |
bool do_notify_parent(struct task_struct *tsk, int sig)
{
struct siginfo info;
unsigned long flags;
struct sighand_struct *psig;
bool autoreap = false;
BUG_ON(sig == -1);
/* do_notify_parent_cldstop should have been called instead. */
BUG_ON(task_is_stopped_or_traced(tsk));
BUG_ON(!tsk->ptrace &&
(tsk->group_leader != tsk || !thread_group_empty(tsk)));
if (sig != SIGCHLD) {
/*
* This is only possible if parent == real_parent.
* Check if it has changed security domain.
*/
if (tsk->parent_exec_id != tsk->parent->self_exec_id)
sig = SIGCHLD;
}
info.si_signo = sig;
info.si_errno = 0;
/*
* We are under tasklist_lock here so our parent is tied to
* us and cannot change.
*
* task_active_pid_ns will always return the same pid namespace
* until a task passes through release_task.
*
* write_lock() currently calls preempt_disable() which is the
* same as rcu_read_lock(), but according to Oleg, this is not
* correct to rely on this
*/
rcu_read_lock();
info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(tsk->parent));
info.si_uid = from_kuid_munged(task_cred_xxx(tsk->parent, user_ns),
task_uid(tsk));
rcu_read_unlock();
info.si_utime = cputime_to_clock_t(tsk->utime + tsk->signal->utime);
info.si_stime = cputime_to_clock_t(tsk->stime + tsk->signal->stime);
info.si_status = tsk->exit_code & 0x7f;
if (tsk->exit_code & 0x80)
info.si_code = CLD_DUMPED;
else if (tsk->exit_code & 0x7f)
info.si_code = CLD_KILLED;
else {
info.si_code = CLD_EXITED;
info.si_status = tsk->exit_code >> 8;
}
psig = tsk->parent->sighand;
spin_lock_irqsave(&psig->siglock, flags);
if (!tsk->ptrace && sig == SIGCHLD &&
(psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN ||
(psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) {
/*
* We are exiting and our parent doesn't care. POSIX.1
* defines special semantics for setting SIGCHLD to SIG_IGN
* or setting the SA_NOCLDWAIT flag: we should be reaped
* automatically and not left for our parent's wait4 call.
* Rather than having the parent do it as a magic kind of
* signal handler, we just set this to tell do_exit that we
* can be cleaned up without becoming a zombie. Note that
* we still call __wake_up_parent in this case, because a
* blocked sys_wait4 might now return -ECHILD.
*
* Whether we send SIGCHLD or not for SA_NOCLDWAIT
* is implementation-defined: we do (if you don't want
* it, just use SIG_IGN instead).
*/
autoreap = true;
if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN)
sig = 0;
}
if (valid_signal(sig) && sig)
__group_send_sig_info(sig, &info, tsk->parent);
__wake_up_parent(tsk, tsk->parent);
spin_unlock_irqrestore(&psig->siglock, flags);
return autoreap;
} | 0 | [
"CWE-20",
"CWE-362"
]
| linux | 9899d11f654474d2d54ea52ceaa2a1f4db3abd68 | 182,413,696,403,871,500,000,000,000,000,000,000,000 | 87 | ptrace: ensure arch_ptrace/ptrace_request can never race with SIGKILL
putreg() assumes that the tracee is not running and pt_regs_access() can
safely play with its stack. However a killed tracee can return from
ptrace_stop() to the low-level asm code and do RESTORE_REST, this means
that debugger can actually read/modify the kernel stack until the tracee
does SAVE_REST again.
set_task_blockstep() can race with SIGKILL too and in some sense this
race is even worse, the very fact the tracee can be woken up breaks the
logic.
As Linus suggested we can clear TASK_WAKEKILL around the arch_ptrace()
call, this ensures that nobody can ever wakeup the tracee while the
debugger looks at it. Not only this fixes the mentioned problems, we
can do some cleanups/simplifications in arch_ptrace() paths.
Probably ptrace_unfreeze_traced() needs more callers, for example it
makes sense to make the tracee killable for oom-killer before
access_process_vm().
While at it, add the comment into may_ptrace_stop() to explain why
ptrace_stop() still can't rely on SIGKILL and signal_pending_state().
Reported-by: Salman Qazi <[email protected]>
Reported-by: Suleiman Souhlal <[email protected]>
Suggested-by: Linus Torvalds <[email protected]>
Signed-off-by: Oleg Nesterov <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]> |
compile_def_function(
ufunc_T *ufunc,
int check_return_type,
compiletype_T compile_type,
cctx_T *outer_cctx)
{
char_u *line = NULL;
garray_T lines_to_free;
char_u *p;
char *errormsg = NULL; // error message
cctx_T cctx;
garray_T *instr;
int did_emsg_before = did_emsg;
int did_emsg_silent_before = did_emsg_silent;
int ret = FAIL;
sctx_T save_current_sctx = current_sctx;
int save_estack_compiling = estack_compiling;
int save_cmod_flags = cmdmod.cmod_flags;
int do_estack_push;
int new_def_function = FALSE;
#ifdef FEAT_PROFILE
int prof_lnum = -1;
#endif
int debug_lnum = -1;
// allocated lines are freed at the end
ga_init2(&lines_to_free, sizeof(char_u *), 50);
// When using a function that was compiled before: Free old instructions.
// The index is reused. Otherwise add a new entry in "def_functions".
if (ufunc->uf_dfunc_idx > 0)
{
dfunc_T *dfunc = ((dfunc_T *)def_functions.ga_data)
+ ufunc->uf_dfunc_idx;
isn_T *instr_dest = NULL;
switch (compile_type)
{
case CT_PROFILE:
#ifdef FEAT_PROFILE
instr_dest = dfunc->df_instr_prof; break;
#endif
case CT_NONE: instr_dest = dfunc->df_instr; break;
case CT_DEBUG: instr_dest = dfunc->df_instr_debug; break;
}
if (instr_dest != NULL)
// Was compiled in this mode before: Free old instructions.
delete_def_function_contents(dfunc, FALSE);
ga_clear_strings(&dfunc->df_var_names);
}
else
{
if (add_def_function(ufunc) == FAIL)
return FAIL;
new_def_function = TRUE;
}
if ((ufunc->uf_flags & FC_CLOSURE) && outer_cctx == NULL)
{
semsg(_(e_compiling_closure_without_context_str),
printable_func_name(ufunc));
return FAIL;
}
ufunc->uf_def_status = UF_COMPILING;
CLEAR_FIELD(cctx);
cctx.ctx_compile_type = compile_type;
cctx.ctx_ufunc = ufunc;
cctx.ctx_lnum = -1;
cctx.ctx_outer = outer_cctx;
ga_init2(&cctx.ctx_locals, sizeof(lvar_T), 10);
// Each entry on the type stack consists of two type pointers.
ga_init2(&cctx.ctx_type_stack, sizeof(type2_T), 50);
cctx.ctx_type_list = &ufunc->uf_type_list;
ga_init2(&cctx.ctx_instr, sizeof(isn_T), 50);
instr = &cctx.ctx_instr;
// Set the context to the function, it may be compiled when called from
// another script. Set the script version to the most modern one.
// The line number will be set in next_line_from_context().
current_sctx = ufunc->uf_script_ctx;
current_sctx.sc_version = SCRIPT_VERSION_VIM9;
// Don't use the flag from ":legacy" here.
cmdmod.cmod_flags &= ~CMOD_LEGACY;
// Make sure error messages are OK.
do_estack_push = !estack_top_is_ufunc(ufunc, 1);
if (do_estack_push)
estack_push_ufunc(ufunc, 1);
estack_compiling = TRUE;
if (check_args_shadowing(ufunc, &cctx) == FAIL)
goto erret;
if (ufunc->uf_def_args.ga_len > 0)
{
int count = ufunc->uf_def_args.ga_len;
int first_def_arg = ufunc->uf_args.ga_len - count;
int i;
char_u *arg;
int off = STACK_FRAME_SIZE + (ufunc->uf_va_name != NULL ? 1 : 0);
int did_set_arg_type = FALSE;
// Produce instructions for the default values of optional arguments.
SOURCING_LNUM = 0; // line number unknown
for (i = 0; i < count; ++i)
{
type_T *val_type;
int arg_idx = first_def_arg + i;
where_T where = WHERE_INIT;
int r;
int jump_instr_idx = instr->ga_len;
isn_T *isn;
// Use a JUMP_IF_ARG_SET instruction to skip if the value was given.
if (generate_JUMP_IF_ARG_SET(&cctx, i - count - off) == FAIL)
goto erret;
// Make sure later arguments are not found.
ufunc->uf_args_visible = arg_idx;
arg = ((char_u **)(ufunc->uf_def_args.ga_data))[i];
r = compile_expr0(&arg, &cctx);
if (r == FAIL)
goto erret;
// If no type specified use the type of the default value.
// Otherwise check that the default value type matches the
// specified type.
val_type = get_type_on_stack(&cctx, 0);
where.wt_index = arg_idx + 1;
if (ufunc->uf_arg_types[arg_idx] == &t_unknown)
{
did_set_arg_type = TRUE;
ufunc->uf_arg_types[arg_idx] = val_type;
}
else if (need_type_where(val_type, ufunc->uf_arg_types[arg_idx],
-1, where, &cctx, FALSE, FALSE) == FAIL)
goto erret;
if (generate_STORE(&cctx, ISN_STORE, i - count - off, NULL) == FAIL)
goto erret;
// set instruction index in JUMP_IF_ARG_SET to here
isn = ((isn_T *)instr->ga_data) + jump_instr_idx;
isn->isn_arg.jumparg.jump_where = instr->ga_len;
}
if (did_set_arg_type)
set_function_type(ufunc);
}
ufunc->uf_args_visible = ufunc->uf_args.ga_len;
/*
* Loop over all the lines of the function and generate instructions.
*/
for (;;)
{
exarg_T ea;
int starts_with_colon = FALSE;
char_u *cmd;
cmdmod_T local_cmdmod;
// Bail out on the first error to avoid a flood of errors and report
// the right line number when inside try/catch.
if (did_emsg_before != did_emsg)
goto erret;
if (line != NULL && *line == '|')
// the line continues after a '|'
++line;
else if (line != NULL && *skipwhite(line) != NUL
&& !(*line == '#' && (line == cctx.ctx_line_start
|| VIM_ISWHITE(line[-1]))))
{
semsg(_(e_trailing_characters_str), line);
goto erret;
}
else if (line != NULL && vim9_bad_comment(skipwhite(line)))
goto erret;
else
{
line = next_line_from_context(&cctx, FALSE);
if (cctx.ctx_lnum >= ufunc->uf_lines.ga_len)
{
// beyond the last line
#ifdef FEAT_PROFILE
if (cctx.ctx_skip != SKIP_YES)
may_generate_prof_end(&cctx, prof_lnum);
#endif
break;
}
// Make a copy, splitting off nextcmd and removing trailing spaces
// may change it.
if (line != NULL)
{
line = vim_strsave(line);
if (ga_add_string(&lines_to_free, line) == FAIL)
goto erret;
}
}
CLEAR_FIELD(ea);
ea.cmdlinep = &line;
ea.cmd = skipwhite(line);
ea.skip = cctx.ctx_skip == SKIP_YES;
if (*ea.cmd == '#')
{
// "#" starts a comment, but "#{" is an error
if (vim9_bad_comment(ea.cmd))
goto erret;
line = (char_u *)"";
continue;
}
#ifdef FEAT_PROFILE
if (cctx.ctx_compile_type == CT_PROFILE && cctx.ctx_lnum != prof_lnum
&& cctx.ctx_skip != SKIP_YES)
{
may_generate_prof_end(&cctx, prof_lnum);
prof_lnum = cctx.ctx_lnum;
generate_instr(&cctx, ISN_PROF_START);
}
#endif
if (cctx.ctx_compile_type == CT_DEBUG && cctx.ctx_lnum != debug_lnum
&& cctx.ctx_skip != SKIP_YES)
{
debug_lnum = cctx.ctx_lnum;
generate_instr_debug(&cctx);
}
cctx.ctx_prev_lnum = cctx.ctx_lnum + 1;
// Some things can be recognized by the first character.
switch (*ea.cmd)
{
case '}':
{
// "}" ends a block scope
scopetype_T stype = cctx.ctx_scope == NULL
? NO_SCOPE : cctx.ctx_scope->se_type;
if (stype == BLOCK_SCOPE)
{
compile_endblock(&cctx);
line = ea.cmd;
}
else
{
emsg(_(e_using_rcurly_outside_if_block_scope));
goto erret;
}
if (line != NULL)
line = skipwhite(ea.cmd + 1);
continue;
}
case '{':
// "{" starts a block scope
// "{'a': 1}->func() is something else
if (ends_excmd(*skipwhite(ea.cmd + 1)))
{
line = compile_block(ea.cmd, &cctx);
continue;
}
break;
}
/*
* COMMAND MODIFIERS
*/
cctx.ctx_has_cmdmod = FALSE;
if (parse_command_modifiers(&ea, &errormsg, &local_cmdmod, FALSE)
== FAIL)
goto erret;
generate_cmdmods(&cctx, &local_cmdmod);
undo_cmdmod(&local_cmdmod);
// Check if there was a colon after the last command modifier or before
// the current position.
for (p = ea.cmd; p >= line; --p)
{
if (*p == ':')
starts_with_colon = TRUE;
if (p < ea.cmd && !VIM_ISWHITE(*p))
break;
}
// Skip ":call" to get to the function name, unless using :legacy
p = ea.cmd;
if (!(local_cmdmod.cmod_flags & CMOD_LEGACY))
{
if (checkforcmd(&ea.cmd, "call", 3))
{
if (*ea.cmd == '(')
// not for "call()"
ea.cmd = p;
else
ea.cmd = skipwhite(ea.cmd);
}
if (!starts_with_colon)
{
int assign;
// Check for assignment after command modifiers.
assign = may_compile_assignment(&ea, &line, &cctx);
if (assign == OK)
goto nextline;
if (assign == FAIL)
goto erret;
}
}
/*
* COMMAND after range
* 'text'->func() should not be confused with 'a mark
* 0z1234->func() should not be confused with a zero line number
* "++nr" and "--nr" are eval commands
* in "$ENV->func()" the "$" is not a range
* "123->func()" is a method call
*/
cmd = ea.cmd;
if ((*cmd != '$' || starts_with_colon)
&& (starts_with_colon
|| !(*cmd == '\''
|| (cmd[0] == '0' && cmd[1] == 'z')
|| (cmd[0] != NUL && cmd[0] == cmd[1]
&& (*cmd == '+' || *cmd == '-'))
|| number_method(cmd))))
{
ea.cmd = skip_range(ea.cmd, TRUE, NULL);
if (ea.cmd > cmd)
{
if (!starts_with_colon
&& !(local_cmdmod.cmod_flags & CMOD_LEGACY))
{
semsg(_(e_colon_required_before_range_str), cmd);
goto erret;
}
ea.addr_count = 1;
if (ends_excmd2(line, ea.cmd))
{
// A range without a command: jump to the line.
generate_EXEC(&cctx, ISN_EXECRANGE,
vim_strnsave(cmd, ea.cmd - cmd));
line = ea.cmd;
goto nextline;
}
}
}
p = find_ex_command(&ea, NULL,
starts_with_colon || (local_cmdmod.cmod_flags & CMOD_LEGACY)
? NULL : item_exists, &cctx);
if (p == NULL)
{
if (cctx.ctx_skip != SKIP_YES)
semsg(_(e_ambiguous_use_of_user_defined_command_str), ea.cmd);
goto erret;
}
// When using ":legacy cmd" always use compile_exec().
if (local_cmdmod.cmod_flags & CMOD_LEGACY)
{
char_u *start = ea.cmd;
switch (ea.cmdidx)
{
case CMD_if:
case CMD_elseif:
case CMD_else:
case CMD_endif:
case CMD_for:
case CMD_endfor:
case CMD_continue:
case CMD_break:
case CMD_while:
case CMD_endwhile:
case CMD_try:
case CMD_catch:
case CMD_finally:
case CMD_endtry:
semsg(_(e_cannot_use_legacy_with_command_str), ea.cmd);
goto erret;
default: break;
}
// ":legacy return expr" needs to be handled differently.
if (checkforcmd(&start, "return", 4))
ea.cmdidx = CMD_return;
else
ea.cmdidx = CMD_legacy;
}
if (p == ea.cmd && ea.cmdidx != CMD_SIZE)
{
// "eval" is used for "val->func()" and "var" for "var = val", then
// "p" is equal to "ea.cmd" for a valid command.
if (ea.cmdidx == CMD_eval || ea.cmdidx == CMD_var)
;
else if (cctx.ctx_skip == SKIP_YES)
{
line += STRLEN(line);
goto nextline;
}
else
{
semsg(_(e_command_not_recognized_str), ea.cmd);
goto erret;
}
}
if (cctx.ctx_had_return
&& ea.cmdidx != CMD_elseif
&& ea.cmdidx != CMD_else
&& ea.cmdidx != CMD_endif
&& ea.cmdidx != CMD_endfor
&& ea.cmdidx != CMD_endwhile
&& ea.cmdidx != CMD_catch
&& ea.cmdidx != CMD_finally
&& ea.cmdidx != CMD_endtry)
{
emsg(_(e_unreachable_code_after_return));
goto erret;
}
p = skipwhite(p);
if (ea.cmdidx != CMD_SIZE
&& ea.cmdidx != CMD_write && ea.cmdidx != CMD_read)
{
if (ea.cmdidx >= 0)
ea.argt = excmd_get_argt(ea.cmdidx);
if ((ea.argt & EX_BANG) && *p == '!')
{
ea.forceit = TRUE;
p = skipwhite(p + 1);
}
if ((ea.argt & EX_RANGE) == 0 && ea.addr_count > 0)
{
emsg(_(e_no_range_allowed));
goto erret;
}
}
switch (ea.cmdidx)
{
case CMD_def:
case CMD_function:
ea.arg = p;
line = compile_nested_function(&ea, &cctx, &lines_to_free);
break;
case CMD_return:
line = compile_return(p, check_return_type,
local_cmdmod.cmod_flags & CMOD_LEGACY, &cctx);
cctx.ctx_had_return = TRUE;
break;
case CMD_let:
emsg(_(e_cannot_use_let_in_vim9_script));
break;
case CMD_var:
case CMD_final:
case CMD_const:
case CMD_increment:
case CMD_decrement:
line = compile_assignment(p, &ea, ea.cmdidx, &cctx);
if (line == p)
{
emsg(_(e_invalid_assignment));
line = NULL;
}
break;
case CMD_unlet:
case CMD_unlockvar:
case CMD_lockvar:
line = compile_unletlock(p, &ea, &cctx);
break;
case CMD_import:
emsg(_(e_import_can_only_be_used_in_script));
line = NULL;
break;
case CMD_if:
line = compile_if(p, &cctx);
break;
case CMD_elseif:
line = compile_elseif(p, &cctx);
cctx.ctx_had_return = FALSE;
break;
case CMD_else:
line = compile_else(p, &cctx);
cctx.ctx_had_return = FALSE;
break;
case CMD_endif:
line = compile_endif(p, &cctx);
break;
case CMD_while:
line = compile_while(p, &cctx);
break;
case CMD_endwhile:
line = compile_endwhile(p, &cctx);
cctx.ctx_had_return = FALSE;
break;
case CMD_for:
line = compile_for(p, &cctx);
break;
case CMD_endfor:
line = compile_endfor(p, &cctx);
cctx.ctx_had_return = FALSE;
break;
case CMD_continue:
line = compile_continue(p, &cctx);
break;
case CMD_break:
line = compile_break(p, &cctx);
break;
case CMD_try:
line = compile_try(p, &cctx);
break;
case CMD_catch:
line = compile_catch(p, &cctx);
cctx.ctx_had_return = FALSE;
break;
case CMD_finally:
line = compile_finally(p, &cctx);
cctx.ctx_had_return = FALSE;
break;
case CMD_endtry:
line = compile_endtry(p, &cctx);
break;
case CMD_throw:
line = compile_throw(p, &cctx);
break;
case CMD_eval:
line = compile_eval(p, &cctx);
break;
case CMD_echo:
case CMD_echon:
case CMD_execute:
case CMD_echomsg:
case CMD_echoerr:
case CMD_echoconsole:
line = compile_mult_expr(p, ea.cmdidx, &cctx);
break;
case CMD_put:
ea.cmd = cmd;
line = compile_put(p, &ea, &cctx);
break;
case CMD_substitute:
if (check_global_and_subst(ea.cmd, p) == FAIL)
goto erret;
if (cctx.ctx_skip == SKIP_YES)
line = (char_u *)"";
else
{
ea.arg = p;
line = compile_substitute(line, &ea, &cctx);
}
break;
case CMD_redir:
ea.arg = p;
line = compile_redir(line, &ea, &cctx);
break;
case CMD_cexpr:
case CMD_lexpr:
case CMD_caddexpr:
case CMD_laddexpr:
case CMD_cgetexpr:
case CMD_lgetexpr:
#ifdef FEAT_QUICKFIX
ea.arg = p;
line = compile_cexpr(line, &ea, &cctx);
#else
ex_ni(&ea);
line = NULL;
#endif
break;
case CMD_append:
case CMD_change:
case CMD_insert:
case CMD_k:
case CMD_t:
case CMD_xit:
not_in_vim9(&ea);
goto erret;
case CMD_SIZE:
if (cctx.ctx_skip != SKIP_YES)
{
semsg(_(e_invalid_command_str), ea.cmd);
goto erret;
}
// We don't check for a next command here.
line = (char_u *)"";
break;
case CMD_lua:
case CMD_mzscheme:
case CMD_perl:
case CMD_py3:
case CMD_python3:
case CMD_python:
case CMD_pythonx:
case CMD_ruby:
case CMD_tcl:
ea.arg = p;
if (vim_strchr(line, '\n') == NULL)
line = compile_exec(line, &ea, &cctx);
else
// heredoc lines have been concatenated with NL
// characters in get_function_body()
line = compile_script(line, &cctx);
break;
case CMD_vim9script:
if (cctx.ctx_skip != SKIP_YES)
{
emsg(_(e_vim9script_can_only_be_used_in_script));
goto erret;
}
line = (char_u *)"";
break;
case CMD_global:
if (check_global_and_subst(ea.cmd, p) == FAIL)
goto erret;
// FALLTHROUGH
default:
// Not recognized, execute with do_cmdline_cmd().
ea.arg = p;
line = compile_exec(line, &ea, &cctx);
break;
}
nextline:
if (line == NULL)
goto erret;
line = skipwhite(line);
// Undo any command modifiers.
generate_undo_cmdmods(&cctx);
if (cctx.ctx_type_stack.ga_len < 0)
{
iemsg("Type stack underflow");
goto erret;
}
}
if (cctx.ctx_scope != NULL)
{
if (cctx.ctx_scope->se_type == IF_SCOPE)
emsg(_(e_missing_endif));
else if (cctx.ctx_scope->se_type == WHILE_SCOPE)
emsg(_(e_missing_endwhile));
else if (cctx.ctx_scope->se_type == FOR_SCOPE)
emsg(_(e_missing_endfor));
else
emsg(_(e_missing_rcurly));
goto erret;
}
if (!cctx.ctx_had_return)
{
if (ufunc->uf_ret_type->tt_type == VAR_UNKNOWN)
ufunc->uf_ret_type = &t_void;
else if (ufunc->uf_ret_type->tt_type != VAR_VOID)
{
emsg(_(e_missing_return_statement));
goto erret;
}
// Return void if there is no return at the end.
generate_instr(&cctx, ISN_RETURN_VOID);
}
// When compiled with ":silent!" and there was an error don't consider the
// function compiled.
if (emsg_silent == 0 || did_emsg_silent == did_emsg_silent_before)
{
dfunc_T *dfunc = ((dfunc_T *)def_functions.ga_data)
+ ufunc->uf_dfunc_idx;
dfunc->df_deleted = FALSE;
dfunc->df_script_seq = current_sctx.sc_seq;
#ifdef FEAT_PROFILE
if (cctx.ctx_compile_type == CT_PROFILE)
{
dfunc->df_instr_prof = instr->ga_data;
dfunc->df_instr_prof_count = instr->ga_len;
}
else
#endif
if (cctx.ctx_compile_type == CT_DEBUG)
{
dfunc->df_instr_debug = instr->ga_data;
dfunc->df_instr_debug_count = instr->ga_len;
}
else
{
dfunc->df_instr = instr->ga_data;
dfunc->df_instr_count = instr->ga_len;
}
dfunc->df_varcount = dfunc->df_var_names.ga_len;
dfunc->df_has_closure = cctx.ctx_has_closure;
if (cctx.ctx_outer_used)
ufunc->uf_flags |= FC_CLOSURE;
ufunc->uf_def_status = UF_COMPILED;
}
ret = OK;
erret:
if (ufunc->uf_def_status == UF_COMPILING)
{
dfunc_T *dfunc = ((dfunc_T *)def_functions.ga_data)
+ ufunc->uf_dfunc_idx;
// Compiling aborted, free the generated instructions.
clear_instr_ga(instr);
VIM_CLEAR(dfunc->df_name);
ga_clear_strings(&dfunc->df_var_names);
// If using the last entry in the table and it was added above, we
// might as well remove it.
if (!dfunc->df_deleted && new_def_function
&& ufunc->uf_dfunc_idx == def_functions.ga_len - 1)
{
--def_functions.ga_len;
ufunc->uf_dfunc_idx = 0;
}
ufunc->uf_def_status = UF_COMPILE_ERROR;
while (cctx.ctx_scope != NULL)
drop_scope(&cctx);
if (errormsg != NULL)
emsg(errormsg);
else if (did_emsg == did_emsg_before)
emsg(_(e_compiling_def_function_failed));
}
if (cctx.ctx_redir_lhs.lhs_name != NULL)
{
if (ret == OK)
{
emsg(_(e_missing_redir_end));
ret = FAIL;
}
vim_free(cctx.ctx_redir_lhs.lhs_name);
vim_free(cctx.ctx_redir_lhs.lhs_whole);
}
current_sctx = save_current_sctx;
estack_compiling = save_estack_compiling;
cmdmod.cmod_flags = save_cmod_flags;
if (do_estack_push)
estack_pop();
ga_clear_strings(&lines_to_free);
free_locals(&cctx);
ga_clear(&cctx.ctx_type_stack);
return ret;
} | 0 | [
"CWE-416"
]
| vim | 1889f499a4f248cd84e0e0bf6d0d820016774494 | 119,727,654,294,199,710,000,000,000,000,000,000,000 | 781 | patch 9.0.0221: accessing freed memory if compiling nested function fails
Problem: Accessing freed memory if compiling nested function fails.
Solution: Mess up the variable name so that it won't be found. |
static FontInstance *GTextFieldGetFont(GGadget *g) {
GTextField *gt = (GTextField *) g;
return( gt->font );
} | 0 | [
"CWE-119",
"CWE-787"
]
| fontforge | 626f751752875a0ddd74b9e217b6f4828713573c | 181,434,071,683,870,470,000,000,000,000,000,000,000 | 4 | Warn users before discarding their unsaved scripts (#3852)
* Warn users before discarding their unsaved scripts
This closes #3846. |
gxps_archive_input_stream_next_piece (GXPSArchiveInputStream *stream)
{
gchar *dirname;
gchar *prefix;
if (!stream->is_interleaved)
return;
dirname = g_path_get_dirname (archive_entry_pathname (stream->entry));
if (!dirname)
return;
stream->piece++;
prefix = g_strdup_printf ("%s/[%u]", dirname, stream->piece);
g_free (dirname);
while (gxps_zip_archive_iter_next (stream->zip, &stream->entry)) {
if (g_str_has_prefix (archive_entry_pathname (stream->entry), prefix)) {
const gchar *suffix = archive_entry_pathname (stream->entry) + strlen (prefix);
if (g_ascii_strcasecmp (suffix, ".piece") == 0 ||
g_ascii_strcasecmp (suffix, ".last.piece") == 0)
break;
}
archive_read_data_skip (stream->zip->archive);
}
g_free (prefix);
} | 0 | [
"CWE-125"
]
| libgxps | b458226e162fe1ffe7acb4230c114a52ada5131b | 144,486,066,952,425,270,000,000,000,000,000,000,000 | 29 | gxps-archive: Ensure gxps_archive_read_entry() fills the GError in case of failure
And fix the callers to not overwrite the GError. |
static void announce(int mod)
{
struct ifsock *ifs;
logit(LOG_INFO, "Sending SSDP NOTIFY new:%d ...", mod);
LIST_FOREACH(ifs, &il, link) {
size_t i;
if (mod && !ifs->mod)
continue;
ifs->mod = 0;
// send_search(ifs, "upnp:rootdevice");
for (i = 0; supported_types[i]; i++) {
/* UUID sent in SSDP_ST_ALL, first announce */
if (!strcmp(supported_types[i], uuid))
continue;
send_message(ifs, supported_types[i], NULL);
}
}
} | 0 | [
"CWE-119",
"CWE-787"
]
| ssdp-responder | ce04b1f29a137198182f60bbb628d5ceb8171765 | 41,587,177,510,743,863,000,000,000,000,000,000,000 | 23 | Fix #1: Ensure recv buf is always NUL terminated
Signed-off-by: Joachim Nilsson <[email protected]> |
xsltNumberFormatGetValue(xmlXPathContextPtr context,
xmlNodePtr node,
const xmlChar *value,
double *number)
{
int amount = 0;
xmlBufferPtr pattern;
xmlXPathObjectPtr obj;
pattern = xmlBufferCreate();
if (pattern != NULL) {
xmlBufferCCat(pattern, "number(");
xmlBufferCat(pattern, value);
xmlBufferCCat(pattern, ")");
context->node = node;
obj = xmlXPathEvalExpression(xmlBufferContent(pattern),
context);
if (obj != NULL) {
*number = obj->floatval;
amount++;
xmlXPathFreeObject(obj);
}
xmlBufferFree(pattern);
}
return amount;
} | 0 | [
"CWE-119"
]
| libxslt | d182d8f6ba3071503d96ce17395c9d55871f0242 | 210,969,800,723,235,500,000,000,000,000,000,000,000 | 26 | Fix xsltNumberFormatGetMultipleLevel
Namespace nodes are actually an xmlNs, not an xmlNode. They must be
special-cased in xsltNumberFormatGetMultipleLevel to avoid an
out-of-bounds heap access.
Move the test whether a node matches the "count" pattern to a separate
function to make the code more readable. As a side effect, we also
compare expanded names when walking up the ancestor axis, fixing an
insignificant bug. |
DLLIMPORT cfg_t *cfg_addtsec(cfg_t *cfg, const char *name, const char *title)
{
cfg_opt_t *opt;
cfg_value_t *val;
if (cfg_gettsec(cfg, name, title))
return NULL;
opt = cfg_getopt(cfg, name);
if (!opt) {
cfg_error(cfg, _("no such option '%s'"), name);
return NULL;
}
val = cfg_setopt(cfg, opt, title);
if (!val)
return NULL;
val->section->path = cfg->path; /* Remember global search path. */
val->section->line = 1;
val->section->errfunc = cfg->errfunc;
return val->section;
} | 0 | []
| libconfuse | d73777c2c3566fb2647727bb56d9a2295b81669b | 62,662,134,783,259,260,000,000,000,000,000,000,000 | 23 | Fix #163: unterminated username used with getpwnam()
Signed-off-by: Joachim Wiberg <[email protected]> |
static inline void net_tx_pkt_sendv(struct NetTxPkt *pkt,
NetClientState *nc, const struct iovec *iov, int iov_cnt)
{
if (pkt->is_loopback) {
qemu_receive_packet_iov(nc, iov, iov_cnt);
} else {
qemu_sendv_packet(nc, iov, iov_cnt);
}
} | 0 | [
"CWE-835"
]
| qemu | 8c552542b81e56ff532dd27ec6e5328954bdda73 | 241,617,963,706,423,720,000,000,000,000,000,000,000 | 9 | tx_pkt: switch to use qemu_receive_packet_iov() for loopback
This patch switches to use qemu_receive_receive_iov() which can detect
reentrancy and return early.
This is intended to address CVE-2021-3416.
Cc: Prasad J Pandit <[email protected]>
Cc: [email protected]
Reviewed-by: Philippe Mathieu-Daudé <[email protected]>
Signed-off-by: Jason Wang <[email protected]> |
void RemoteFsDevice::setAudioFolder() const
{
audioFolder=Utils::fixPath(mountPoint(details, true));
} | 0 | [
"CWE-20",
"CWE-22"
]
| cantata | afc4f8315d3e96574925fb530a7004cc9e6ce3d3 | 33,159,988,533,244,830,000,000,000,000,000,000,000 | 4 | Remove internal Samba shre mounting code, this had some privilege escalation issues, and is not well tested |
nautilus_file_class_init (NautilusFileClass *class)
{
GtkIconTheme *icon_theme;
attribute_name_q = g_quark_from_static_string ("name");
attribute_size_q = g_quark_from_static_string ("size");
attribute_type_q = g_quark_from_static_string ("type");
attribute_modification_date_q = g_quark_from_static_string ("modification_date");
attribute_date_modified_q = g_quark_from_static_string ("date_modified");
attribute_accessed_date_q = g_quark_from_static_string ("accessed_date");
attribute_date_accessed_q = g_quark_from_static_string ("date_accessed");
attribute_emblems_q = g_quark_from_static_string ("emblems");
attribute_mime_type_q = g_quark_from_static_string ("mime_type");
attribute_size_detail_q = g_quark_from_static_string ("size_detail");
attribute_deep_size_q = g_quark_from_static_string ("deep_size");
attribute_deep_file_count_q = g_quark_from_static_string ("deep_file_count");
attribute_deep_directory_count_q = g_quark_from_static_string ("deep_directory_count");
attribute_deep_total_count_q = g_quark_from_static_string ("deep_total_count");
attribute_date_changed_q = g_quark_from_static_string ("date_changed");
attribute_date_permissions_q = g_quark_from_static_string ("date_permissions");
attribute_permissions_q = g_quark_from_static_string ("permissions");
attribute_selinux_context_q = g_quark_from_static_string ("selinux_context");
attribute_octal_permissions_q = g_quark_from_static_string ("octal_permissions");
attribute_owner_q = g_quark_from_static_string ("owner");
attribute_group_q = g_quark_from_static_string ("group");
attribute_uri_q = g_quark_from_static_string ("uri");
attribute_where_q = g_quark_from_static_string ("where");
attribute_link_target_q = g_quark_from_static_string ("link_target");
attribute_volume_q = g_quark_from_static_string ("volume");
attribute_free_space_q = g_quark_from_static_string ("free_space");
G_OBJECT_CLASS (class)->finalize = finalize;
G_OBJECT_CLASS (class)->constructor = nautilus_file_constructor;
signals[CHANGED] =
g_signal_new ("changed",
G_TYPE_FROM_CLASS (class),
G_SIGNAL_RUN_LAST,
G_STRUCT_OFFSET (NautilusFileClass, changed),
NULL, NULL,
g_cclosure_marshal_VOID__VOID,
G_TYPE_NONE, 0);
signals[UPDATED_DEEP_COUNT_IN_PROGRESS] =
g_signal_new ("updated_deep_count_in_progress",
G_TYPE_FROM_CLASS (class),
G_SIGNAL_RUN_LAST,
G_STRUCT_OFFSET (NautilusFileClass, updated_deep_count_in_progress),
NULL, NULL,
g_cclosure_marshal_VOID__VOID,
G_TYPE_NONE, 0);
g_type_class_add_private (class, sizeof (NautilusFileDetails));
eel_preferences_add_auto_enum (NAUTILUS_PREFERENCES_DATE_FORMAT,
&date_format_pref);
thumbnail_limit_changed_callback (NULL);
eel_preferences_add_callback (NAUTILUS_PREFERENCES_IMAGE_FILE_THUMBNAIL_LIMIT,
thumbnail_limit_changed_callback,
NULL);
thumbnail_size_changed_callback (NULL);
eel_preferences_add_callback (NAUTILUS_PREFERENCES_ICON_VIEW_THUMBNAIL_SIZE,
thumbnail_size_changed_callback,
NULL);
show_thumbnails_changed_callback (NULL);
eel_preferences_add_callback (NAUTILUS_PREFERENCES_SHOW_IMAGE_FILE_THUMBNAILS,
show_thumbnails_changed_callback,
NULL);
icon_theme = gtk_icon_theme_get_default ();
g_signal_connect_object (icon_theme,
"changed",
G_CALLBACK (icon_theme_changed_callback),
NULL, 0);
g_signal_connect (nautilus_signaller_get_current (),
"mime_data_changed",
G_CALLBACK (mime_type_data_changed_callback),
NULL);
} | 0 | []
| nautilus | 7632a3e13874a2c5e8988428ca913620a25df983 | 239,963,143,569,179,740,000,000,000,000,000,000,000 | 82 | Check for trusted desktop file launchers.
2009-02-24 Alexander Larsson <[email protected]>
* libnautilus-private/nautilus-directory-async.c:
Check for trusted desktop file launchers.
* libnautilus-private/nautilus-file-private.h:
* libnautilus-private/nautilus-file.c:
* libnautilus-private/nautilus-file.h:
Add nautilus_file_is_trusted_link.
Allow unsetting of custom display name.
* libnautilus-private/nautilus-mime-actions.c:
Display dialog when trying to launch a non-trusted desktop file.
svn path=/trunk/; revision=15003 |
CImg<T>& columns(const int x0, const int x1) {
return get_columns(x0,x1).move_to(*this);
} | 0 | [
"CWE-770"
]
| cimg | 619cb58dd90b4e03ac68286c70ed98acbefd1c90 | 111,277,627,077,924,790,000,000,000,000,000,000,000 | 3 | CImg<>::load_bmp() and CImg<>::load_pandore(): Check that dimensions encoded in file does not exceed file size. |
check_for_string_or_list_or_dict_arg(typval_T *args, int idx)
{
if (args[idx].v_type != VAR_STRING
&& args[idx].v_type != VAR_LIST
&& args[idx].v_type != VAR_DICT)
{
semsg(_(e_string_list_or_dict_required_for_argument_nr), idx + 1);
return FAIL;
}
return OK;
} | 0 | [
"CWE-125",
"CWE-122"
]
| vim | 1e56bda9048a9625bce6e660938c834c5c15b07d | 181,878,178,122,353,940,000,000,000,000,000,000,000 | 11 | patch 9.0.0104: going beyond allocated memory when evaluating string constant
Problem: Going beyond allocated memory when evaluating string constant.
Solution: Properly skip over <Key> form. |
user_func_error(int error, char_u *name, funcexe_T *funcexe)
{
switch (error)
{
case FCERR_UNKNOWN:
if (funcexe->fe_found_var)
semsg(_(e_not_callable_type_str), name);
else
emsg_funcname(e_unknown_function_str, name);
break;
case FCERR_NOTMETHOD:
emsg_funcname(
N_(e_cannot_use_function_as_method_str), name);
break;
case FCERR_DELETED:
emsg_funcname(e_function_was_deleted_str, name);
break;
case FCERR_TOOMANY:
emsg_funcname(e_too_many_arguments_for_function_str, name);
break;
case FCERR_TOOFEW:
emsg_funcname(e_not_enough_arguments_for_function_str, name);
break;
case FCERR_SCRIPT:
emsg_funcname(
e_using_sid_not_in_script_context_str, name);
break;
case FCERR_DICT:
emsg_funcname(e_calling_dict_function_without_dictionary_str,
name);
break;
}
} | 0 | [
"CWE-416"
]
| vim | 9f1a39a5d1cd7989ada2d1cb32f97d84360e050f | 265,534,366,733,353,200,000,000,000,000,000,000,000 | 33 | patch 8.2.4040: keeping track of allocated lines is too complicated
Problem: Keeping track of allocated lines in user functions is too
complicated.
Solution: Instead of freeing individual lines keep them all until the end. |
static int rtnl_getlink(struct sk_buff *skb, struct nlmsghdr *nlh,
struct netlink_ext_ack *extack)
{
struct net *net = sock_net(skb->sk);
struct net *tgt_net = net;
struct ifinfomsg *ifm;
char ifname[IFNAMSIZ];
struct nlattr *tb[IFLA_MAX+1];
struct net_device *dev = NULL;
struct sk_buff *nskb;
int netnsid = -1;
int err;
u32 ext_filter_mask = 0;
err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFLA_MAX, ifla_policy, extack);
if (err < 0)
return err;
if (tb[IFLA_IF_NETNSID]) {
netnsid = nla_get_s32(tb[IFLA_IF_NETNSID]);
tgt_net = get_target_net(NETLINK_CB(skb).sk, netnsid);
if (IS_ERR(tgt_net))
return PTR_ERR(tgt_net);
}
if (tb[IFLA_IFNAME])
nla_strlcpy(ifname, tb[IFLA_IFNAME], IFNAMSIZ);
if (tb[IFLA_EXT_MASK])
ext_filter_mask = nla_get_u32(tb[IFLA_EXT_MASK]);
err = -EINVAL;
ifm = nlmsg_data(nlh);
if (ifm->ifi_index > 0)
dev = __dev_get_by_index(tgt_net, ifm->ifi_index);
else if (tb[IFLA_IFNAME])
dev = __dev_get_by_name(tgt_net, ifname);
else
goto out;
err = -ENODEV;
if (dev == NULL)
goto out;
err = -ENOBUFS;
nskb = nlmsg_new(if_nlmsg_size(dev, ext_filter_mask), GFP_KERNEL);
if (nskb == NULL)
goto out;
err = rtnl_fill_ifinfo(nskb, dev, net,
RTM_NEWLINK, NETLINK_CB(skb).portid,
nlh->nlmsg_seq, 0, 0, ext_filter_mask,
0, NULL, netnsid);
if (err < 0) {
/* -EMSGSIZE implies BUG in if_nlmsg_size */
WARN_ON(err == -EMSGSIZE);
kfree_skb(nskb);
} else
err = rtnl_unicast(nskb, net, NETLINK_CB(skb).portid);
out:
if (netnsid >= 0)
put_net(tgt_net);
return err;
} | 0 | [
"CWE-476"
]
| linux | f428fe4a04cc339166c8bbd489789760de3a0cee | 155,405,117,827,231,860,000,000,000,000,000,000,000 | 65 | rtnetlink: give a user socket to get_target_net()
This function is used from two places: rtnl_dump_ifinfo and
rtnl_getlink. In rtnl_getlink(), we give a request skb into
get_target_net(), but in rtnl_dump_ifinfo, we give a response skb
into get_target_net().
The problem here is that NETLINK_CB() isn't initialized for the response
skb. In both cases we can get a user socket and give it instead of skb
into get_target_net().
This bug was found by syzkaller with this call-trace:
kasan: GPF could be caused by NULL-ptr deref or user memory access
general protection fault: 0000 [#1] SMP KASAN
Modules linked in:
CPU: 1 PID: 3149 Comm: syzkaller140561 Not tainted 4.15.0-rc4-mm1+ #47
Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS
Google 01/01/2011
RIP: 0010:__netlink_ns_capable+0x8b/0x120 net/netlink/af_netlink.c:868
RSP: 0018:ffff8801c880f348 EFLAGS: 00010206
RAX: dffffc0000000000 RBX: 0000000000000000 RCX: ffffffff8443f900
RDX: 000000000000007b RSI: ffffffff86510f40 RDI: 00000000000003d8
RBP: ffff8801c880f360 R08: 0000000000000000 R09: 1ffff10039101e4f
R10: 0000000000000000 R11: 0000000000000001 R12: ffffffff86510f40
R13: 000000000000000c R14: 0000000000000004 R15: 0000000000000011
FS: 0000000001a1a880(0000) GS:ffff8801db300000(0000) knlGS:0000000000000000
CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
CR2: 0000000020151000 CR3: 00000001c9511005 CR4: 00000000001606e0
DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000
DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400
Call Trace:
netlink_ns_capable+0x26/0x30 net/netlink/af_netlink.c:886
get_target_net+0x9d/0x120 net/core/rtnetlink.c:1765
rtnl_dump_ifinfo+0x2e5/0xee0 net/core/rtnetlink.c:1806
netlink_dump+0x48c/0xce0 net/netlink/af_netlink.c:2222
__netlink_dump_start+0x4f0/0x6d0 net/netlink/af_netlink.c:2319
netlink_dump_start include/linux/netlink.h:214 [inline]
rtnetlink_rcv_msg+0x7f0/0xb10 net/core/rtnetlink.c:4485
netlink_rcv_skb+0x21e/0x460 net/netlink/af_netlink.c:2441
rtnetlink_rcv+0x1c/0x20 net/core/rtnetlink.c:4540
netlink_unicast_kernel net/netlink/af_netlink.c:1308 [inline]
netlink_unicast+0x4be/0x6a0 net/netlink/af_netlink.c:1334
netlink_sendmsg+0xa4a/0xe60 net/netlink/af_netlink.c:1897
Cc: Jiri Benc <[email protected]>
Fixes: 79e1ad148c84 ("rtnetlink: use netnsid to query interface")
Signed-off-by: Andrei Vagin <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
int main( int argc, char *argv[] )
{
FILE *f;
int ret;
size_t i;
rsa_context rsa;
unsigned char hash[20];
unsigned char buf[POLARSSL_MPI_MAX_SIZE];
char filename[512];
ret = 1;
if( argc != 3 )
{
printf( "usage: rsa_verify_pss <key_file> <filename>\n" );
#if defined(_WIN32)
printf( "\n" );
#endif
goto exit;
}
printf( "\n . Reading public key from '%s'", argv[1] );
fflush( stdout );
rsa_init( &rsa, RSA_PKCS_V21, POLARSSL_MD_SHA1 );
if( ( ret = x509parse_public_keyfile( &rsa, argv[1] ) ) != 0 )
{
printf( " failed\n ! x509parse_public_key returned %d\n\n", ret );
goto exit;
}
/*
* Extract the RSA signature from the text file
*/
ret = 1;
snprintf( filename, 512, "%s.sig", argv[2] );
if( ( f = fopen( filename, "rb" ) ) == NULL )
{
printf( "\n ! Could not open %s\n\n", filename );
goto exit;
}
i = fread( buf, 1, rsa.len, f );
fclose( f );
if( i != rsa.len )
{
printf( "\n ! Invalid RSA signature format\n\n" );
goto exit;
}
/*
* Compute the SHA-1 hash of the input file and compare
* it with the hash decrypted from the RSA signature.
*/
printf( "\n . Verifying the RSA/SHA-1 signature" );
fflush( stdout );
if( ( ret = sha1_file( argv[2], hash ) ) != 0 )
{
printf( " failed\n ! Could not open or read %s\n\n", argv[2] );
goto exit;
}
if( ( ret = rsa_pkcs1_verify( &rsa, RSA_PUBLIC, SIG_RSA_SHA1,
20, hash, buf ) ) != 0 )
{
printf( " failed\n ! rsa_pkcs1_verify returned %d\n\n", ret );
goto exit;
}
printf( "\n . OK (the decrypted SHA-1 hash matches)\n\n" );
ret = 0;
exit:
#if defined(_WIN32)
printf( " + Press Enter to exit this program.\n" );
fflush( stdout ); getchar();
#endif
return( ret );
} | 1 | [
"CWE-310"
]
| polarssl | 43f9799ce61c6392a014d0a2ea136b4b3a9ee194 | 101,114,238,879,942,060,000,000,000,000,000,000,000 | 88 | RSA blinding on CRT operations to counter timing attacks |
GF_Err btrt_Write(GF_Box *s, GF_BitStream *bs)
{
GF_Err e;
GF_BitRateBox *ptr = (GF_BitRateBox *) s;
if (!s) return GF_BAD_PARAM;
e = gf_isom_box_write_header(s, bs);
if (e) return e;
gf_bs_write_u32(bs, ptr->bufferSizeDB);
gf_bs_write_u32(bs, ptr->maxBitrate);
gf_bs_write_u32(bs, ptr->avgBitrate);
return GF_OK;
} | 0 | [
"CWE-119",
"CWE-787"
]
| gpac | 90dc7f853d31b0a4e9441cba97feccf36d8b69a4 | 165,053,812,467,201,330,000,000,000,000,000,000,000 | 12 | fix some exploitable overflows (#994, #997) |
static int check_cond_jmp_op(struct bpf_verifier_env *env,
struct bpf_insn *insn, int *insn_idx)
{
struct bpf_verifier_state *this_branch = env->cur_state;
struct bpf_verifier_state *other_branch;
struct bpf_reg_state *regs = this_branch->frame[this_branch->curframe]->regs;
struct bpf_reg_state *dst_reg, *other_branch_regs;
u8 opcode = BPF_OP(insn->code);
int err;
if (opcode > BPF_JSLE) {
verbose(env, "invalid BPF_JMP opcode %x\n", opcode);
return -EINVAL;
}
if (BPF_SRC(insn->code) == BPF_X) {
if (insn->imm != 0) {
verbose(env, "BPF_JMP uses reserved fields\n");
return -EINVAL;
}
/* check src1 operand */
err = check_reg_arg(env, insn->src_reg, SRC_OP);
if (err)
return err;
if (is_pointer_value(env, insn->src_reg)) {
verbose(env, "R%d pointer comparison prohibited\n",
insn->src_reg);
return -EACCES;
}
} else {
if (insn->src_reg != BPF_REG_0) {
verbose(env, "BPF_JMP uses reserved fields\n");
return -EINVAL;
}
}
/* check src2 operand */
err = check_reg_arg(env, insn->dst_reg, SRC_OP);
if (err)
return err;
dst_reg = ®s[insn->dst_reg];
if (BPF_SRC(insn->code) == BPF_K) {
int pred = is_branch_taken(dst_reg, insn->imm, opcode);
if (pred == 1) {
/* only follow the goto, ignore fall-through */
*insn_idx += insn->off;
return 0;
} else if (pred == 0) {
/* only follow fall-through branch, since
* that's where the program will go
*/
return 0;
}
}
other_branch = push_stack(env, *insn_idx + insn->off + 1, *insn_idx);
if (!other_branch)
return -EFAULT;
other_branch_regs = other_branch->frame[other_branch->curframe]->regs;
/* detect if we are comparing against a constant value so we can adjust
* our min/max values for our dst register.
* this is only legit if both are scalars (or pointers to the same
* object, I suppose, but we don't support that right now), because
* otherwise the different base pointers mean the offsets aren't
* comparable.
*/
if (BPF_SRC(insn->code) == BPF_X) {
if (dst_reg->type == SCALAR_VALUE &&
regs[insn->src_reg].type == SCALAR_VALUE) {
if (tnum_is_const(regs[insn->src_reg].var_off))
reg_set_min_max(&other_branch_regs[insn->dst_reg],
dst_reg, regs[insn->src_reg].var_off.value,
opcode);
else if (tnum_is_const(dst_reg->var_off))
reg_set_min_max_inv(&other_branch_regs[insn->src_reg],
®s[insn->src_reg],
dst_reg->var_off.value, opcode);
else if (opcode == BPF_JEQ || opcode == BPF_JNE)
/* Comparing for equality, we can combine knowledge */
reg_combine_min_max(&other_branch_regs[insn->src_reg],
&other_branch_regs[insn->dst_reg],
®s[insn->src_reg],
®s[insn->dst_reg], opcode);
}
} else if (dst_reg->type == SCALAR_VALUE) {
reg_set_min_max(&other_branch_regs[insn->dst_reg],
dst_reg, insn->imm, opcode);
}
/* detect if R == 0 where R is returned from bpf_map_lookup_elem() */
if (BPF_SRC(insn->code) == BPF_K &&
insn->imm == 0 && (opcode == BPF_JEQ || opcode == BPF_JNE) &&
reg_type_may_be_null(dst_reg->type)) {
/* Mark all identical registers in each branch as either
* safe or unknown depending R == 0 or R != 0 conditional.
*/
mark_ptr_or_null_regs(this_branch, insn->dst_reg,
opcode == BPF_JNE);
mark_ptr_or_null_regs(other_branch, insn->dst_reg,
opcode == BPF_JEQ);
} else if (!try_match_pkt_pointers(insn, dst_reg, ®s[insn->src_reg],
this_branch, other_branch) &&
is_pointer_value(env, insn->dst_reg)) {
verbose(env, "R%d pointer comparison prohibited\n",
insn->dst_reg);
return -EACCES;
}
if (env->log.level)
print_verifier_state(env, this_branch->frame[this_branch->curframe]);
return 0;
} | 1 | [
"CWE-703",
"CWE-189"
]
| linux | 979d63d50c0c0f7bc537bf821e056cc9fe5abd38 | 26,746,519,164,483,106,000,000,000,000,000,000,000 | 117 | bpf: prevent out of bounds speculation on pointer arithmetic
Jann reported that the original commit back in b2157399cc98
("bpf: prevent out-of-bounds speculation") was not sufficient
to stop CPU from speculating out of bounds memory access:
While b2157399cc98 only focussed on masking array map access
for unprivileged users for tail calls and data access such
that the user provided index gets sanitized from BPF program
and syscall side, there is still a more generic form affected
from BPF programs that applies to most maps that hold user
data in relation to dynamic map access when dealing with
unknown scalars or "slow" known scalars as access offset, for
example:
- Load a map value pointer into R6
- Load an index into R7
- Do a slow computation (e.g. with a memory dependency) that
loads a limit into R8 (e.g. load the limit from a map for
high latency, then mask it to make the verifier happy)
- Exit if R7 >= R8 (mispredicted branch)
- Load R0 = R6[R7]
- Load R0 = R6[R0]
For unknown scalars there are two options in the BPF verifier
where we could derive knowledge from in order to guarantee
safe access to the memory: i) While </>/<=/>= variants won't
allow to derive any lower or upper bounds from the unknown
scalar where it would be safe to add it to the map value
pointer, it is possible through ==/!= test however. ii) another
option is to transform the unknown scalar into a known scalar,
for example, through ALU ops combination such as R &= <imm>
followed by R |= <imm> or any similar combination where the
original information from the unknown scalar would be destroyed
entirely leaving R with a constant. The initial slow load still
precedes the latter ALU ops on that register, so the CPU
executes speculatively from that point. Once we have the known
scalar, any compare operation would work then. A third option
only involving registers with known scalars could be crafted
as described in [0] where a CPU port (e.g. Slow Int unit)
would be filled with many dependent computations such that
the subsequent condition depending on its outcome has to wait
for evaluation on its execution port and thereby executing
speculatively if the speculated code can be scheduled on a
different execution port, or any other form of mistraining
as described in [1], for example. Given this is not limited
to only unknown scalars, not only map but also stack access
is affected since both is accessible for unprivileged users
and could potentially be used for out of bounds access under
speculation.
In order to prevent any of these cases, the verifier is now
sanitizing pointer arithmetic on the offset such that any
out of bounds speculation would be masked in a way where the
pointer arithmetic result in the destination register will
stay unchanged, meaning offset masked into zero similar as
in array_index_nospec() case. With regards to implementation,
there are three options that were considered: i) new insn
for sanitation, ii) push/pop insn and sanitation as inlined
BPF, iii) reuse of ax register and sanitation as inlined BPF.
Option i) has the downside that we end up using from reserved
bits in the opcode space, but also that we would require
each JIT to emit masking as native arch opcodes meaning
mitigation would have slow adoption till everyone implements
it eventually which is counter-productive. Option ii) and iii)
have both in common that a temporary register is needed in
order to implement the sanitation as inlined BPF since we
are not allowed to modify the source register. While a push /
pop insn in ii) would be useful to have in any case, it
requires once again that every JIT needs to implement it
first. While possible, amount of changes needed would also
be unsuitable for a -stable patch. Therefore, the path which
has fewer changes, less BPF instructions for the mitigation
and does not require anything to be changed in the JITs is
option iii) which this work is pursuing. The ax register is
already mapped to a register in all JITs (modulo arm32 where
it's mapped to stack as various other BPF registers there)
and used in constant blinding for JITs-only so far. It can
be reused for verifier rewrites under certain constraints.
The interpreter's tmp "register" has therefore been remapped
into extending the register set with hidden ax register and
reusing that for a number of instructions that needed the
prior temporary variable internally (e.g. div, mod). This
allows for zero increase in stack space usage in the interpreter,
and enables (restricted) generic use in rewrites otherwise as
long as such a patchlet does not make use of these instructions.
The sanitation mask is dynamic and relative to the offset the
map value or stack pointer currently holds.
There are various cases that need to be taken under consideration
for the masking, e.g. such operation could look as follows:
ptr += val or val += ptr or ptr -= val. Thus, the value to be
sanitized could reside either in source or in destination
register, and the limit is different depending on whether
the ALU op is addition or subtraction and depending on the
current known and bounded offset. The limit is derived as
follows: limit := max_value_size - (smin_value + off). For
subtraction: limit := umax_value + off. This holds because
we do not allow any pointer arithmetic that would
temporarily go out of bounds or would have an unknown
value with mixed signed bounds where it is unclear at
verification time whether the actual runtime value would
be either negative or positive. For example, we have a
derived map pointer value with constant offset and bounded
one, so limit based on smin_value works because the verifier
requires that statically analyzed arithmetic on the pointer
must be in bounds, and thus it checks if resulting
smin_value + off and umax_value + off is still within map
value bounds at time of arithmetic in addition to time of
access. Similarly, for the case of stack access we derive
the limit as follows: MAX_BPF_STACK + off for subtraction
and -off for the case of addition where off := ptr_reg->off +
ptr_reg->var_off.value. Subtraction is a special case for
the masking which can be in form of ptr += -val, ptr -= -val,
or ptr -= val. In the first two cases where we know that
the value is negative, we need to temporarily negate the
value in order to do the sanitation on a positive value
where we later swap the ALU op, and restore original source
register if the value was in source.
The sanitation of pointer arithmetic alone is still not fully
sufficient as is, since a scenario like the following could
happen ...
PTR += 0x1000 (e.g. K-based imm)
PTR -= BIG_NUMBER_WITH_SLOW_COMPARISON
PTR += 0x1000
PTR -= BIG_NUMBER_WITH_SLOW_COMPARISON
[...]
... which under speculation could end up as ...
PTR += 0x1000
PTR -= 0 [ truncated by mitigation ]
PTR += 0x1000
PTR -= 0 [ truncated by mitigation ]
[...]
... and therefore still access out of bounds. To prevent such
case, the verifier is also analyzing safety for potential out
of bounds access under speculative execution. Meaning, it is
also simulating pointer access under truncation. We therefore
"branch off" and push the current verification state after the
ALU operation with known 0 to the verification stack for later
analysis. Given the current path analysis succeeded it is
likely that the one under speculation can be pruned. In any
case, it is also subject to existing complexity limits and
therefore anything beyond this point will be rejected. In
terms of pruning, it needs to be ensured that the verification
state from speculative execution simulation must never prune
a non-speculative execution path, therefore, we mark verifier
state accordingly at the time of push_stack(). If verifier
detects out of bounds access under speculative execution from
one of the possible paths that includes a truncation, it will
reject such program.
Given we mask every reg-based pointer arithmetic for
unprivileged programs, we've been looking into how it could
affect real-world programs in terms of size increase. As the
majority of programs are targeted for privileged-only use
case, we've unconditionally enabled masking (with its alu
restrictions on top of it) for privileged programs for the
sake of testing in order to check i) whether they get rejected
in its current form, and ii) by how much the number of
instructions and size will increase. We've tested this by
using Katran, Cilium and test_l4lb from the kernel selftests.
For Katran we've evaluated balancer_kern.o, Cilium bpf_lxc.o
and an older test object bpf_lxc_opt_-DUNKNOWN.o and l4lb
we've used test_l4lb.o as well as test_l4lb_noinline.o. We
found that none of the programs got rejected by the verifier
with this change, and that impact is rather minimal to none.
balancer_kern.o had 13,904 bytes (1,738 insns) xlated and
7,797 bytes JITed before and after the change. Most complex
program in bpf_lxc.o had 30,544 bytes (3,817 insns) xlated
and 18,538 bytes JITed before and after and none of the other
tail call programs in bpf_lxc.o had any changes either. For
the older bpf_lxc_opt_-DUNKNOWN.o object we found a small
increase from 20,616 bytes (2,576 insns) and 12,536 bytes JITed
before to 20,664 bytes (2,582 insns) and 12,558 bytes JITed
after the change. Other programs from that object file had
similar small increase. Both test_l4lb.o had no change and
remained at 6,544 bytes (817 insns) xlated and 3,401 bytes
JITed and for test_l4lb_noinline.o constant at 5,080 bytes
(634 insns) xlated and 3,313 bytes JITed. This can be explained
in that LLVM typically optimizes stack based pointer arithmetic
by using K-based operations and that use of dynamic map access
is not overly frequent. However, in future we may decide to
optimize the algorithm further under known guarantees from
branch and value speculation. Latter seems also unclear in
terms of prediction heuristics that today's CPUs apply as well
as whether there could be collisions in e.g. the predictor's
Value History/Pattern Table for triggering out of bounds access,
thus masking is performed unconditionally at this point but could
be subject to relaxation later on. We were generally also
brainstorming various other approaches for mitigation, but the
blocker was always lack of available registers at runtime and/or
overhead for runtime tracking of limits belonging to a specific
pointer. Thus, we found this to be minimally intrusive under
given constraints.
With that in place, a simple example with sanitized access on
unprivileged load at post-verification time looks as follows:
# bpftool prog dump xlated id 282
[...]
28: (79) r1 = *(u64 *)(r7 +0)
29: (79) r2 = *(u64 *)(r7 +8)
30: (57) r1 &= 15
31: (79) r3 = *(u64 *)(r0 +4608)
32: (57) r3 &= 1
33: (47) r3 |= 1
34: (2d) if r2 > r3 goto pc+19
35: (b4) (u32) r11 = (u32) 20479 |
36: (1f) r11 -= r2 | Dynamic sanitation for pointer
37: (4f) r11 |= r2 | arithmetic with registers
38: (87) r11 = -r11 | containing bounded or known
39: (c7) r11 s>>= 63 | scalars in order to prevent
40: (5f) r11 &= r2 | out of bounds speculation.
41: (0f) r4 += r11 |
42: (71) r4 = *(u8 *)(r4 +0)
43: (6f) r4 <<= r1
[...]
For the case where the scalar sits in the destination register
as opposed to the source register, the following code is emitted
for the above example:
[...]
16: (b4) (u32) r11 = (u32) 20479
17: (1f) r11 -= r2
18: (4f) r11 |= r2
19: (87) r11 = -r11
20: (c7) r11 s>>= 63
21: (5f) r2 &= r11
22: (0f) r2 += r0
23: (61) r0 = *(u32 *)(r2 +0)
[...]
JIT blinding example with non-conflicting use of r10:
[...]
d5: je 0x0000000000000106 _
d7: mov 0x0(%rax),%edi |
da: mov $0xf153246,%r10d | Index load from map value and
e0: xor $0xf153259,%r10 | (const blinded) mask with 0x1f.
e7: and %r10,%rdi |_
ea: mov $0x2f,%r10d |
f0: sub %rdi,%r10 | Sanitized addition. Both use r10
f3: or %rdi,%r10 | but do not interfere with each
f6: neg %r10 | other. (Neither do these instructions
f9: sar $0x3f,%r10 | interfere with the use of ax as temp
fd: and %r10,%rdi | in interpreter.)
100: add %rax,%rdi |_
103: mov 0x0(%rdi),%eax
[...]
Tested that it fixes Jann's reproducer, and also checked that test_verifier
and test_progs suite with interpreter, JIT and JIT with hardening enabled
on x86-64 and arm64 runs successfully.
[0] Speculose: Analyzing the Security Implications of Speculative
Execution in CPUs, Giorgi Maisuradze and Christian Rossow,
https://arxiv.org/pdf/1801.04084.pdf
[1] A Systematic Evaluation of Transient Execution Attacks and
Defenses, Claudio Canella, Jo Van Bulck, Michael Schwarz,
Moritz Lipp, Benjamin von Berg, Philipp Ortner, Frank Piessens,
Dmitry Evtyushkin, Daniel Gruss,
https://arxiv.org/pdf/1811.05441.pdf
Fixes: b2157399cc98 ("bpf: prevent out-of-bounds speculation")
Reported-by: Jann Horn <[email protected]>
Signed-off-by: Daniel Borkmann <[email protected]>
Acked-by: Alexei Starovoitov <[email protected]>
Signed-off-by: Alexei Starovoitov <[email protected]> |
int jas_stream_flushbuf(jas_stream_t *stream, int c)
{
int len;
int n;
/* The stream should not be in an error or EOF state. */
if ((stream->flags_ & (JAS_STREAM_ERRMASK)) != 0) {
return EOF;
}
/* The stream must be open for writing. */
if ((stream->openmode_ & (JAS_STREAM_WRITE | JAS_STREAM_APPEND)) == 0) {
return EOF;
}
/* The buffer should not currently be in use for reading. */
assert(!(stream->bufmode_ & JAS_STREAM_RDBUF));
/* Note: Do not use the quantity stream->cnt to determine the number
of characters in the buffer! Depending on how this function was
called, the stream->cnt value may be "off-by-one". */
len = stream->ptr_ - stream->bufstart_;
if (len > 0) {
n = (*stream->ops_->write_)(stream->obj_, (char *)
stream->bufstart_, len);
if (n != len) {
stream->flags_ |= JAS_STREAM_ERR;
return EOF;
}
}
stream->cnt_ = stream->bufsize_;
stream->ptr_ = stream->bufstart_;
stream->bufmode_ |= JAS_STREAM_WRBUF;
if (c != EOF) {
assert(stream->cnt_ > 0);
return jas_stream_putc2(stream, c);
}
return 0;
} | 0 | [
"CWE-189"
]
| jasper | 3c55b399c36ef46befcb21e4ebc4799367f89684 | 267,696,890,211,774,700,000,000,000,000,000,000,000 | 42 | At many places in the code, jas_malloc or jas_recalloc was being
invoked with the size argument being computed in a manner that would not
allow integer overflow to be detected. Now, these places in the code
have been modified to use special-purpose memory allocation functions
(e.g., jas_alloc2, jas_alloc3, jas_realloc2) that check for overflow.
This should fix many security problems. |
static SDL_INLINE int hasNEON()
{
static int val = -1;
if (val != -1) {
return val;
}
val = SDL_HasNEON();
return val;
} | 0 | [
"CWE-190",
"CWE-787"
]
| SDL_ttf | db1b41ab8bde6723c24b866e466cad78c2fa0448 | 339,410,186,121,958,260,000,000,000,000,000,000,000 | 9 | More integer overflow (see bug #187)
Make sure that 'width + alignment' doesn't overflow, otherwise
it could create a SDL_Surface of 'width' but with wrong 'pitch' |
ConsumerReg(qqueue_t *pThis, wti_t *pWti)
{
int iCancelStateSave;
DEFiRet;
ISOBJ_TYPE_assert(pThis, qqueue);
ISOBJ_TYPE_assert(pWti, wti);
CHKiRet(DequeueForConsumer(pThis, pWti));
/* we now have a non-idle batch of work, so we can release the queue mutex and process it */
d_pthread_mutex_unlock(pThis->mut);
/* at this spot, we may be cancelled */
pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, &iCancelStateSave);
CHKiRet(pThis->pConsumer(pThis->pUsr, &pWti->batch, &pThis->bShutdownImmediate));
/* we now need to check if we should deliberately delay processing a bit
* and, if so, do that. -- rgerhards, 2008-01-30
*/
//TODO: MULTIQUEUE: the following setting is no longer correct - need to think about how to do that...
if(pThis->iDeqSlowdown) {
DBGOPRINT((obj_t*) pThis, "sleeping %d microseconds as requested by config params\n",
pThis->iDeqSlowdown);
srSleep(pThis->iDeqSlowdown / 1000000, pThis->iDeqSlowdown % 1000000);
}
/* but now cancellation is no longer permitted */
pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, &iCancelStateSave);
/* now we are done, but need to re-aquire the mutex */
d_pthread_mutex_lock(pThis->mut);
finalize_it:
dbgprintf("regular consumer finished, iret=%d, szlog %d sz phys %d\n", iRet,
getLogicalQueueSize(pThis), getPhysicalQueueSize(pThis));
RETiRet;
} | 0 | [
"CWE-772"
]
| rsyslog | dfa88369d4ca4290db56b843f9eabdae1bfe0fd5 | 56,621,731,657,061,930,000,000,000,000,000,000,000 | 39 | bugfix: memory leak when $RepeatedMsgReduction on was used
bug tracker: http://bugzilla.adiscon.com/show_bug.cgi?id=225 |
static bool verify_and_prune(const char *cgroup_use)
{
const char *p;
char *e;
int i, j;
for (p = cgroup_use; p && *p; p = e + 1) {
e = strchr(p, ',');
if (e)
*e = '\0';
if (!in_subsystem_list(p)) {
ERROR("Controller %s required by lxc.cgroup.use but not available\n", p);
return false;
}
if (e)
*e = ',';
if (!e)
break;
}
for (i = 0; i < nr_subsystems;) {
if (in_comma_list(subsystems[i], cgroup_use)) {
i++;
continue;
}
free(subsystems[i]);
for (j = i; j < nr_subsystems-1; j++)
subsystems[j] = subsystems[j+1];
subsystems[nr_subsystems-1] = NULL;
nr_subsystems--;
}
return true;
} | 0 | [
"CWE-59",
"CWE-61"
]
| lxc | 592fd47a6245508b79fe6ac819fe6d3b2c1289be | 197,744,606,998,768,060,000,000,000,000,000,000,000 | 36 | CVE-2015-1335: Protect container mounts against symlinks
When a container starts up, lxc sets up the container's inital fstree
by doing a bunch of mounting, guided by the container configuration
file. The container config is owned by the admin or user on the host,
so we do not try to guard against bad entries. However, since the
mount target is in the container, it's possible that the container admin
could divert the mount with symbolic links. This could bypass proper
container startup (i.e. confinement of a root-owned container by the
restrictive apparmor policy, by diverting the required write to
/proc/self/attr/current), or bypass the (path-based) apparmor policy
by diverting, say, /proc to /mnt in the container.
To prevent this,
1. do not allow mounts to paths containing symbolic links
2. do not allow bind mounts from relative paths containing symbolic
links.
Details:
Define safe_mount which ensures that the container has not inserted any
symbolic links into any mount targets for mounts to be done during
container setup.
The host's mount path may contain symbolic links. As it is under the
control of the administrator, that's ok. So safe_mount begins the check
for symbolic links after the rootfs->mount, by opening that directory.
It opens each directory along the path using openat() relative to the
parent directory using O_NOFOLLOW. When the target is reached, it
mounts onto /proc/self/fd/<targetfd>.
Use safe_mount() in mount_entry(), when mounting container proc,
and when needed. In particular, safe_mount() need not be used in
any case where:
1. the mount is done in the container's namespace
2. the mount is for the container's rootfs
3. the mount is relative to a tmpfs or proc/sysfs which we have
just safe_mount()ed ourselves
Since we were using proc/net as a temporary placeholder for /proc/sys/net
during container startup, and proc/net is a symbolic link, use proc/tty
instead.
Update the lxc.container.conf manpage with details about the new
restrictions.
Finally, add a testcase to test some symbolic link possibilities.
Reported-by: Roman Fiedler
Signed-off-by: Serge Hallyn <[email protected]>
Acked-by: Stéphane Graber <[email protected]> |
alert_match_word (char *word, char *masks)
{
char *p = masks;
char endchar;
int res;
if (masks[0] == 0)
return FALSE;
while (1)
{
/* if it's a 0, space or comma, the word has ended. */
if (*p == 0 || *p == ' ' || *p == ',')
{
endchar = *p;
*p = 0;
res = match (masks, word);
*p = endchar;
if (res)
return TRUE; /* yes, matched! */
masks = p + 1;
if (*p == 0)
return FALSE;
}
p++;
}
} | 0 | [
"CWE-22"
]
| hexchat | 4e061a43b3453a9856d34250c3913175c45afe9d | 142,619,710,863,796,490,000,000,000,000,000,000,000 | 29 | Clean up handling CAP LS |
MagickExport ChannelPerceptualHash *GetImagePerceptualHash(const Image *image,
ExceptionInfo *exception)
{
ChannelPerceptualHash
*perceptual_hash;
char
*colorspaces,
*p,
*q;
const char
*artifact;
MagickBooleanType
status;
ssize_t
i;
perceptual_hash=(ChannelPerceptualHash *) AcquireQuantumMemory(
MaxPixelChannels+1UL,sizeof(*perceptual_hash));
if (perceptual_hash == (ChannelPerceptualHash *) NULL)
return((ChannelPerceptualHash *) NULL);
artifact=GetImageArtifact(image,"phash:colorspaces");
if (artifact != NULL)
colorspaces=AcquireString(artifact);
else
colorspaces=AcquireString("sRGB,HCLp");
perceptual_hash[0].number_colorspaces=0;
perceptual_hash[0].number_channels=0;
q=colorspaces;
for (i=0; (p=StringToken(",",&q)) != (char *) NULL; i++)
{
ChannelMoments
*moments;
Image
*hash_image;
size_t
j;
ssize_t
channel,
colorspace;
if (i >= MaximumNumberOfPerceptualColorspaces)
break;
colorspace=ParseCommandOption(MagickColorspaceOptions,MagickFalse,p);
if (colorspace < 0)
break;
perceptual_hash[0].colorspace[i]=(ColorspaceType) colorspace;
hash_image=BlurImage(image,0.0,1.0,exception);
if (hash_image == (Image *) NULL)
break;
hash_image->depth=8;
status=TransformImageColorspace(hash_image,(ColorspaceType) colorspace,
exception);
if (status == MagickFalse)
break;
moments=GetImageMoments(hash_image,exception);
perceptual_hash[0].number_colorspaces++;
perceptual_hash[0].number_channels+=GetImageChannels(hash_image);
hash_image=DestroyImage(hash_image);
if (moments == (ChannelMoments *) NULL)
break;
for (channel=0; channel <= MaxPixelChannels; channel++)
for (j=0; j < MaximumNumberOfImageMoments; j++)
perceptual_hash[channel].phash[i][j]=
(-MagickLog10(moments[channel].invariant[j]));
moments=(ChannelMoments *) RelinquishMagickMemory(moments);
}
colorspaces=DestroyString(colorspaces);
return(perceptual_hash);
} | 0 | []
| ImageMagick | 4717744e4bb27de8ea978e51c6d5bcddf62ffe49 | 236,022,336,421,504,100,000,000,000,000,000,000,000 | 76 | https://github.com/ImageMagick/ImageMagick/issues/3332 |
TEST_F(RouterTest, AppendUpstreamHost11) {
testAppendUpstreamHost(
absl::make_optional(Http::LowerCaseString("x-custom-upstream-hostname")),
absl::make_optional(Http::LowerCaseString("x-custom-upstream-host-address")));
} | 0 | [
"CWE-703"
]
| envoy | 18871dbfb168d3512a10c78dd267ff7c03f564c6 | 74,873,757,588,616,140,000,000,000,000,000,000,000 | 5 | [1.18] CVE-2022-21655
Crash with direct_response
Signed-off-by: Otto van der Schaaf <[email protected]> |
p11_rpc_buffer_free (p11_buffer *buf)
{
if (buf == NULL)
return;
p11_buffer_uninit (buf);
free (buf);
} | 0 | [
"CWE-787"
]
| p11-kit | 2617f3ef888e103324a28811886b99ed0a56346d | 48,722,575,878,319,560,000,000,000,000,000,000,000 | 8 | Check attribute length against buffer size
If an attribute's length does not match the length of the byte array
inside it, one length was used for allocation, and the other was used
for memcpy. This additional check will instead return an error on
malformed messages. |
QPDF::resolveObjectsInStream(int obj_stream_number)
{
// Force resolution of object stream
QPDFObjectHandle obj_stream = getObjectByID(obj_stream_number, 0);
if (! obj_stream.isStream())
{
throw QPDFExc(qpdf_e_damaged_pdf, this->m->file->getName(),
this->m->last_object_description,
this->m->file->getLastOffset(),
"supposed object stream " +
QUtil::int_to_string(obj_stream_number) +
" is not a stream");
}
// For linearization data in the object, use the data from the
// object stream for the objects in the stream.
QPDFObjGen stream_og(obj_stream_number, 0);
qpdf_offset_t end_before_space =
this->m->obj_cache[stream_og].end_before_space;
qpdf_offset_t end_after_space =
this->m->obj_cache[stream_og].end_after_space;
QPDFObjectHandle dict = obj_stream.getDict();
if (! (dict.getKey("/Type").isName() &&
dict.getKey("/Type").getName() == "/ObjStm"))
{
QTC::TC("qpdf", "QPDF ERR object stream with wrong type");
throw QPDFExc(qpdf_e_damaged_pdf, this->m->file->getName(),
this->m->last_object_description,
this->m->file->getLastOffset(),
"supposed object stream " +
QUtil::int_to_string(obj_stream_number) +
" has wrong type");
}
if (! (dict.getKey("/N").isInteger() &&
dict.getKey("/First").isInteger()))
{
throw QPDFExc(qpdf_e_damaged_pdf, this->m->file->getName(),
this->m->last_object_description,
this->m->file->getLastOffset(),
"object stream " +
QUtil::int_to_string(obj_stream_number) +
" has incorrect keys");
}
int n = dict.getKey("/N").getIntValue();
int first = dict.getKey("/First").getIntValue();
std::map<int, int> offsets;
PointerHolder<Buffer> bp = obj_stream.getStreamData();
PointerHolder<InputSource> input = new BufferInputSource(
"object stream " + QUtil::int_to_string(obj_stream_number),
bp.getPointer());
for (int i = 0; i < n; ++i)
{
QPDFTokenizer::Token tnum = readToken(input);
QPDFTokenizer::Token toffset = readToken(input);
if (! ((tnum.getType() == QPDFTokenizer::tt_integer) &&
(toffset.getType() == QPDFTokenizer::tt_integer)))
{
throw QPDFExc(qpdf_e_damaged_pdf, input->getName(),
this->m->last_object_description,
input->getLastOffset(),
"expected integer in object stream header");
}
int num = atoi(tnum.getValue().c_str());
int offset = QUtil::string_to_ll(toffset.getValue().c_str());
offsets[num] = offset + first;
}
// To avoid having to read the object stream multiple times, store
// all objects that would be found here in the cache. Remember
// that some objects stored here might have been overridden by new
// objects appended to the file, so it is necessary to recheck the
// xref table and only cache what would actually be resolved here.
for (std::map<int, int>::iterator iter = offsets.begin();
iter != offsets.end(); ++iter)
{
int obj = (*iter).first;
QPDFObjGen og(obj, 0);
QPDFXRefEntry const& entry = this->m->xref_table[og];
if ((entry.getType() == 2) &&
(entry.getObjStreamNumber() == obj_stream_number))
{
int offset = (*iter).second;
input->seek(offset, SEEK_SET);
QPDFObjectHandle oh = readObject(input, "", obj, 0, true);
this->m->obj_cache[og] =
ObjCache(QPDFObjectHandle::ObjAccessor::getObject(oh),
end_before_space, end_after_space);
}
else
{
QTC::TC("qpdf", "QPDF not caching overridden objstm object");
}
}
} | 1 | [
"CWE-125"
]
| qpdf | 1868a10f8b06631362618bfc85ca8646da4b4b71 | 153,430,860,824,356,650,000,000,000,000,000,000,000 | 101 | Replace all atoi calls with QUtil::string_to_int
The latter catches underflow/overflow. |
void synthesize_relcall(void *from, void *to)
{
__synthesize_relative_insn(from, to, RELATIVECALL_OPCODE);
} | 0 | [
"CWE-264"
]
| linux | 548acf19234dbda5a52d5a8e7e205af46e9da840 | 201,277,276,651,982,340,000,000,000,000,000,000,000 | 4 | x86/mm: Expand the exception table logic to allow new handling options
Huge amounts of help from Andy Lutomirski and Borislav Petkov to
produce this. Andy provided the inspiration to add classes to the
exception table with a clever bit-squeezing trick, Boris pointed
out how much cleaner it would all be if we just had a new field.
Linus Torvalds blessed the expansion with:
' I'd rather not be clever in order to save just a tiny amount of space
in the exception table, which isn't really criticial for anybody. '
The third field is another relative function pointer, this one to a
handler that executes the actions.
We start out with three handlers:
1: Legacy - just jumps the to fixup IP
2: Fault - provide the trap number in %ax to the fixup code
3: Cleaned up legacy for the uaccess error hack
Signed-off-by: Tony Luck <[email protected]>
Reviewed-by: Borislav Petkov <[email protected]>
Cc: Linus Torvalds <[email protected]>
Cc: Peter Zijlstra <[email protected]>
Cc: Thomas Gleixner <[email protected]>
Link: http://lkml.kernel.org/r/f6af78fcbd348cf4939875cfda9c19689b5e50b8.1455732970.git.tony.luck@intel.com
Signed-off-by: Ingo Molnar <[email protected]> |
**/
const CImg<T>& save_rgb(const char *const filename) const {
return _save_rgb(0,filename); | 0 | [
"CWE-125"
]
| CImg | 10af1e8c1ad2a58a0a3342a856bae63e8f257abb | 194,798,718,426,806,770,000,000,000,000,000,000,000 | 3 | Fix other issues in 'CImg<T>::load_bmp()'. |
ebb_ews_items_to_contacts (EBookBackendEws *bbews,
const GSList *new_items,
GSList **contacts,
GCancellable *cancellable,
GError **error)
{
GSList *link;
for (link = (GSList *) new_items; link; link = g_slist_next (link)) {
EContact *contact;
EEwsItem *item = link->data;
EVCardAttribute *attr;
if (e_ews_item_get_item_type (item) == E_EWS_ITEM_TYPE_ERROR)
continue;
contact = ebb_ews_item_to_contact (bbews, item, FALSE, cancellable, error);
attr = e_vcard_attribute_new (NULL, "X-EWS-KIND");
e_vcard_add_attribute_with_value (E_VCARD (contact), attr, "DT_MAILUSER");
*contacts = g_slist_prepend (*contacts, contact);
}
} | 0 | [
"CWE-295"
]
| evolution-ews | 915226eca9454b8b3e5adb6f2fff9698451778de | 158,761,162,821,599,700,000,000,000,000,000,000,000 | 24 | I#27 - SSL Certificates are not validated
This depends on https://gitlab.gnome.org/GNOME/evolution-data-server/commit/6672b8236139bd6ef41ecb915f4c72e2a052dba5 too.
Closes https://gitlab.gnome.org/GNOME/evolution-ews/issues/27 |
static void cache_grow_end(struct kmem_cache *cachep, struct page *page)
{
struct kmem_cache_node *n;
void *list = NULL;
check_irq_off();
if (!page)
return;
INIT_LIST_HEAD(&page->lru);
n = get_node(cachep, page_to_nid(page));
spin_lock(&n->list_lock);
n->total_slabs++;
if (!page->active) {
list_add_tail(&page->lru, &(n->slabs_free));
n->free_slabs++;
} else
fixup_slab_list(cachep, n, page, &list);
STATS_INC_GROWN(cachep);
n->free_objects += cachep->num - page->active;
spin_unlock(&n->list_lock);
fixup_objfreelist_debug(cachep, &list);
} | 0 | [
"CWE-703"
]
| linux | c4e490cf148e85ead0d1b1c2caaba833f1d5b29f | 167,591,562,200,029,800,000,000,000,000,000,000,000 | 27 | mm/slab.c: fix SLAB freelist randomization duplicate entries
This patch fixes a bug in the freelist randomization code. When a high
random number is used, the freelist will contain duplicate entries. It
will result in different allocations sharing the same chunk.
It will result in odd behaviours and crashes. It should be uncommon but
it depends on the machines. We saw it happening more often on some
machines (every few hours of running tests).
Fixes: c7ce4f60ac19 ("mm: SLAB freelist randomization")
Link: http://lkml.kernel.org/r/[email protected]
Signed-off-by: John Sperbeck <[email protected]>
Signed-off-by: Thomas Garnier <[email protected]>
Cc: Christoph Lameter <[email protected]>
Cc: Pekka Enberg <[email protected]>
Cc: David Rientjes <[email protected]>
Cc: Joonsoo Kim <[email protected]>
Cc: <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]> |
/* {{{ php_mktime - (gm)mktime helper */
PHPAPI void php_mktime(INTERNAL_FUNCTION_PARAMETERS, int gmt)
{
long hou = 0, min = 0, sec = 0, mon = 0, day = 0, yea = 0, dst = -1;
timelib_time *now;
timelib_tzinfo *tzi = NULL;
long ts, adjust_seconds = 0;
int error;
if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "|lllllll", &hou, &min, &sec, &mon, &day, &yea, &dst) == FAILURE) {
RETURN_FALSE;
}
/* Initialize structure with current time */
now = timelib_time_ctor();
if (gmt) {
timelib_unixtime2gmt(now, (timelib_sll) time(NULL));
} else {
tzi = get_timezone_info(TSRMLS_C);
now->tz_info = tzi;
now->zone_type = TIMELIB_ZONETYPE_ID;
timelib_unixtime2local(now, (timelib_sll) time(NULL));
}
/* Fill in the new data */
switch (ZEND_NUM_ARGS()) {
case 7:
/* break intentionally missing */
case 6:
if (yea >= 0 && yea < 70) {
yea += 2000;
} else if (yea >= 70 && yea <= 100) {
yea += 1900;
}
now->y = yea;
/* break intentionally missing again */
case 5:
now->d = day;
/* break missing intentionally here too */
case 4:
now->m = mon;
/* and here */
case 3:
now->s = sec;
/* yup, this break isn't here on purpose too */
case 2:
now->i = min;
/* last intentionally missing break */
case 1:
now->h = hou;
break;
default:
php_error_docref(NULL TSRMLS_CC, E_STRICT, "You should be using the time() function instead");
}
/* Update the timestamp */
if (gmt) {
timelib_update_ts(now, NULL);
} else {
timelib_update_ts(now, tzi);
}
/* Support for the deprecated is_dst parameter */
if (dst != -1) {
php_error_docref(NULL TSRMLS_CC, E_DEPRECATED, "The is_dst parameter is deprecated");
if (gmt) {
/* GMT never uses DST */
if (dst == 1) {
adjust_seconds = -3600;
}
} else {
/* Figure out is_dst for current TS */
timelib_time_offset *tmp_offset;
tmp_offset = timelib_get_time_zone_info(now->sse, tzi);
if (dst == 1 && tmp_offset->is_dst == 0) {
adjust_seconds = -3600;
}
if (dst == 0 && tmp_offset->is_dst == 1) {
adjust_seconds = +3600;
}
timelib_time_offset_dtor(tmp_offset);
}
}
/* Clean up and return */
ts = timelib_date_to_int(now, &error);
ts += adjust_seconds;
timelib_time_dtor(now);
if (error) {
RETURN_FALSE;
} else {
RETURN_LONG(ts); | 0 | []
| php-src | bb057498f7457e8b2eba98332a3bad434de4cf12 | 32,128,432,420,572,236,000,000,000,000,000,000,000 | 89 | Fix #70277: new DateTimeZone($foo) is ignoring text after null byte
The DateTimeZone constructors are not binary safe. They're parsing the timezone
as string, but discard the length when calling timezone_initialize(). This
patch adds a tz_len parameter and a respective check to timezone_initialize(). |
static int check_purpose_crl_sign(const X509_PURPOSE *xp, const X509 *x, int ca)
{
if(ca) {
int ca_ret;
if((ca_ret = check_ca(x)) != 2) return ca_ret;
else return 0;
}
if(ku_reject(x, KU_CRL_SIGN)) return 0;
return 1;
} | 0 | []
| openssl | c7235be6e36c4bef84594aa3b2f0561db84b63d8 | 213,676,989,977,893,340,000,000,000,000,000,000,000 | 10 | RFC 3161 compliant time stamp request creation, response generation
and response verification.
Submitted by: Zoltan Glozik <[email protected]>
Reviewed by: Ulf Moeller |
static void genl_unregister_mc_groups(struct genl_family *family)
{
struct net *net;
int i;
netlink_table_grab();
rcu_read_lock();
for_each_net_rcu(net) {
for (i = 0; i < family->n_mcgrps; i++)
__netlink_clear_multicast_users(
net->genl_sock, family->mcgrp_offset + i);
}
rcu_read_unlock();
netlink_table_ungrab();
for (i = 0; i < family->n_mcgrps; i++) {
int grp_id = family->mcgrp_offset + i;
if (grp_id != 1)
clear_bit(grp_id, mc_groups);
genl_ctrl_event(CTRL_CMD_DELMCAST_GRP, family,
&family->mcgrps[i], grp_id);
}
} | 0 | [
"CWE-264"
]
| net | 90f62cf30a78721641e08737bda787552428061e | 172,737,261,470,982,280,000,000,000,000,000,000,000 | 24 | net: Use netlink_ns_capable to verify the permisions of netlink messages
It is possible by passing a netlink socket to a more privileged
executable and then to fool that executable into writing to the socket
data that happens to be valid netlink message to do something that
privileged executable did not intend to do.
To keep this from happening replace bare capable and ns_capable calls
with netlink_capable, netlink_net_calls and netlink_ns_capable calls.
Which act the same as the previous calls except they verify that the
opener of the socket had the desired permissions as well.
Reported-by: Andy Lutomirski <[email protected]>
Signed-off-by: "Eric W. Biederman" <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
static MagickBooleanType WritePALMImage(const ImageInfo *image_info,
Image *image,ExceptionInfo *exception)
{
MagickBooleanType
status;
MagickOffsetType
currentOffset,
offset,
scene;
MagickSizeType
cc;
PixelInfo
transpix;
QuantizeInfo
*quantize_info;
register ssize_t
x;
register const Quantum
*p;
register Quantum
*q;
ssize_t
y;
size_t
count,
bits_per_pixel,
bytes_per_row,
imageListLength,
nextDepthOffset,
one;
unsigned char
bit,
byte,
color,
*last_row,
*one_row,
*ptr,
version;
unsigned int
transparentIndex;
unsigned short
color16,
flags;
/*
Open output image file.
*/
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
status=OpenBlob(image_info,image,WriteBinaryBlobMode,exception);
if (status == MagickFalse)
return(status);
quantize_info=AcquireQuantizeInfo(image_info);
flags=0;
currentOffset=0;
transparentIndex=0;
transpix.red=0.0;
transpix.green=0.0;
transpix.blue=0.0;
transpix.alpha=0.0;
one=1;
version=0;
scene=0;
imageListLength=GetImageListLength(image);
do
{
(void) TransformImageColorspace(image,sRGBColorspace,exception);
count=GetNumberColors(image,NULL,exception);
for (bits_per_pixel=1; (one << bits_per_pixel) < count; bits_per_pixel*=2) ;
if (bits_per_pixel > 16)
bits_per_pixel=16;
else
if (bits_per_pixel < 16)
(void) TransformImageColorspace(image,image->colorspace,exception);
if (bits_per_pixel < 8)
{
(void) TransformImageColorspace(image,GRAYColorspace,exception);
(void) SetImageType(image,PaletteType,exception);
(void) SortColormapByIntensity(image,exception);
}
if ((image->storage_class == PseudoClass) && (image->colors > 256))
(void) SetImageStorageClass(image,DirectClass,exception);
if (image->storage_class == PseudoClass)
flags|=PALM_HAS_COLORMAP_FLAG;
else
flags|=PALM_IS_DIRECT_COLOR;
(void) WriteBlobMSBShort(image,(unsigned short) image->columns); /* width */
(void) WriteBlobMSBShort(image,(unsigned short) image->rows); /* height */
bytes_per_row=((image->columns+(16/bits_per_pixel-1))/(16/
bits_per_pixel))*2;
(void) WriteBlobMSBShort(image,(unsigned short) bytes_per_row);
if ((image_info->compression == RLECompression) ||
(image_info->compression == FaxCompression))
flags|=PALM_IS_COMPRESSED_FLAG;
(void) WriteBlobMSBShort(image, flags);
(void) WriteBlobByte(image,(unsigned char) bits_per_pixel);
if (bits_per_pixel > 1)
version=1;
if ((image_info->compression == RLECompression) ||
(image_info->compression == FaxCompression))
version=2;
(void) WriteBlobByte(image,version);
(void) WriteBlobMSBShort(image,0); /* nextDepthOffset */
(void) WriteBlobByte(image,(unsigned char) transparentIndex);
if (image_info->compression == RLECompression)
(void) WriteBlobByte(image,PALM_COMPRESSION_RLE);
else
if (image_info->compression == FaxCompression)
(void) WriteBlobByte(image,PALM_COMPRESSION_SCANLINE);
else
(void) WriteBlobByte(image,PALM_COMPRESSION_NONE);
(void) WriteBlobMSBShort(image,0); /* reserved */
offset=16;
if (bits_per_pixel == 16)
{
(void) WriteBlobByte(image,5); /* # of bits of red */
(void) WriteBlobByte(image,6); /* # of bits of green */
(void) WriteBlobByte(image,5); /* # of bits of blue */
(void) WriteBlobByte(image,0); /* reserved by Palm */
(void) WriteBlobMSBLong(image,0); /* no transparent color, YET */
offset+=8;
}
if (bits_per_pixel == 8)
{
if (flags & PALM_HAS_COLORMAP_FLAG) /* Write out colormap */
{
quantize_info->dither_method=IdentifyPaletteImage(image,exception)
== MagickFalse ? RiemersmaDitherMethod : NoDitherMethod;
quantize_info->number_colors=image->colors;
(void) QuantizeImage(quantize_info,image,exception);
(void) WriteBlobMSBShort(image,(unsigned short) image->colors);
for (count = 0; count < image->colors; count++)
{
(void) WriteBlobByte(image,(unsigned char) count);
(void) WriteBlobByte(image,ScaleQuantumToChar(ClampToQuantum(
image->colormap[count].red)));
(void) WriteBlobByte(image,ScaleQuantumToChar(ClampToQuantum(
image->colormap[count].green)));
(void) WriteBlobByte(image,ScaleQuantumToChar(ClampToQuantum(
image->colormap[count].blue)));
}
offset+=2+count*4;
}
else /* Map colors to Palm standard colormap */
{
Image
*affinity_image;
affinity_image=ConstituteImage(256,1,"RGB",CharPixel,&PalmPalette,
exception);
(void) TransformImageColorspace(affinity_image,
affinity_image->colorspace,exception);
(void) RemapImage(quantize_info,image,affinity_image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
q=GetAuthenticPixels(image,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelIndex(image,(Quantum) FindColor(&image->colormap[(ssize_t)
GetPixelIndex(image,q)]),q);
q+=GetPixelChannels(image);
}
}
affinity_image=DestroyImage(affinity_image);
}
}
if (flags & PALM_IS_COMPRESSED_FLAG)
(void) WriteBlobMSBShort(image,0); /* fill in size later */
last_row=(unsigned char *) NULL;
if (image_info->compression == FaxCompression)
{
last_row=(unsigned char *) AcquireQuantumMemory(bytes_per_row+256,
sizeof(*last_row));
if (last_row == (unsigned char *) NULL)
{
quantize_info=DestroyQuantizeInfo(quantize_info);
ThrowWriterException(ResourceLimitError,"MemoryAllocationFailed");
}
}
one_row=(unsigned char *) AcquireQuantumMemory(bytes_per_row+256,
sizeof(*one_row));
if (one_row == (unsigned char *) NULL)
{
if (last_row != (unsigned char *) NULL)
last_row=(unsigned char *) RelinquishMagickMemory(last_row);
quantize_info=DestroyQuantizeInfo(quantize_info);
ThrowWriterException(ResourceLimitError,"MemoryAllocationFailed");
}
for (y=0; y < (ssize_t) image->rows; y++)
{
ptr=one_row;
(void) memset(ptr,0,bytes_per_row);
p=GetVirtualPixels(image,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
if (bits_per_pixel == 16)
{
for (x=0; x < (ssize_t) image->columns; x++)
{
color16=(unsigned short) ((((31*(size_t) GetPixelRed(image,p))/
(size_t) QuantumRange) << 11) | (((63*(size_t)
GetPixelGreen(image,p))/(size_t) QuantumRange) << 5) |
((31*(size_t) GetPixelBlue(image,p))/(size_t) QuantumRange));
if (GetPixelAlpha(image,p) == (Quantum) TransparentAlpha)
{
transpix.red=(MagickRealType) GetPixelRed(image,p);
transpix.green=(MagickRealType) GetPixelGreen(image,p);
transpix.blue=(MagickRealType) GetPixelBlue(image,p);
transpix.alpha=(MagickRealType) GetPixelAlpha(image,p);
flags|=PALM_HAS_TRANSPARENCY_FLAG;
}
*ptr++=(unsigned char) ((color16 >> 8) & 0xff);
*ptr++=(unsigned char) (color16 & 0xff);
p+=GetPixelChannels(image);
}
}
else
{
byte=0x00;
bit=(unsigned char) (8-bits_per_pixel);
for (x=0; x < (ssize_t) image->columns; x++)
{
if (bits_per_pixel >= 8)
color=(unsigned char) GetPixelIndex(image,p);
else
color=(unsigned char) (GetPixelIndex(image,p)*
((one << bits_per_pixel)-1)/MagickMax(1*image->colors-1,1));
byte|=color << bit;
if (bit != 0)
bit-=(unsigned char) bits_per_pixel;
else
{
*ptr++=byte;
byte=0x00;
bit=(unsigned char) (8-bits_per_pixel);
}
p+=GetPixelChannels(image);
}
if ((image->columns % (8/bits_per_pixel)) != 0)
*ptr++=byte;
}
if (image_info->compression == RLECompression)
{
x=0;
while (x < (ssize_t) bytes_per_row)
{
byte=one_row[x];
count=1;
while ((one_row[++x] == byte) && (count < 255) &&
(x < (ssize_t) bytes_per_row))
count++;
(void) WriteBlobByte(image,(unsigned char) count);
(void) WriteBlobByte(image,(unsigned char) byte);
}
}
else
if (image_info->compression == FaxCompression)
{
char
tmpbuf[8],
*tptr;
for (x = 0; x < (ssize_t) bytes_per_row; x += 8)
{
tptr = tmpbuf;
for (bit=0, byte=0; bit < (unsigned char) MagickMin(8,(ssize_t) bytes_per_row-x); bit++)
{
if ((y == 0) || (last_row[x + bit] != one_row[x + bit]))
{
byte |= (1 << (7 - bit));
*tptr++ = (char) one_row[x + bit];
}
}
(void) WriteBlobByte(image, byte);
(void) WriteBlob(image,tptr-tmpbuf,(unsigned char *) tmpbuf);
}
(void) memcpy(last_row,one_row,bytes_per_row);
}
else
(void) WriteBlob(image,bytes_per_row,one_row);
}
if (flags & PALM_HAS_TRANSPARENCY_FLAG)
{
offset=SeekBlob(image,currentOffset+6,SEEK_SET);
(void) WriteBlobMSBShort(image,flags);
offset=SeekBlob(image,currentOffset+12,SEEK_SET);
(void) WriteBlobByte(image,(unsigned char) transparentIndex); /* trans index */
}
if (bits_per_pixel == 16)
{
offset=SeekBlob(image,currentOffset+20,SEEK_SET);
(void) WriteBlobByte(image,0); /* reserved by Palm */
(void) WriteBlobByte(image,(unsigned char) ((31*transpix.red)/
QuantumRange));
(void) WriteBlobByte(image,(unsigned char) ((63*transpix.green)/
QuantumRange));
(void) WriteBlobByte(image,(unsigned char) ((31*transpix.blue)/
QuantumRange));
}
if (flags & PALM_IS_COMPRESSED_FLAG) /* fill in size now */
{
offset=SeekBlob(image,currentOffset+offset,SEEK_SET);
(void) WriteBlobMSBShort(image,(unsigned short) (GetBlobSize(image)-
currentOffset-offset));
}
if (one_row != (unsigned char *) NULL)
one_row=(unsigned char *) RelinquishMagickMemory(one_row);
if (last_row != (unsigned char *) NULL)
last_row=(unsigned char *) RelinquishMagickMemory(last_row);
if (GetNextImageInList(image) == (Image *) NULL)
break;
/* padding to 4 byte word */
for (cc=(GetBlobSize(image)) % 4; cc > 0; cc--)
(void) WriteBlobByte(image,0);
/* write nextDepthOffset and return to end of image */
offset=SeekBlob(image,currentOffset+10,SEEK_SET);
nextDepthOffset=(size_t) ((GetBlobSize(image)-currentOffset)/4);
(void) WriteBlobMSBShort(image,(unsigned short) nextDepthOffset);
currentOffset=(MagickOffsetType) GetBlobSize(image);
offset=SeekBlob(image,currentOffset,SEEK_SET);
image=SyncNextImageInList(image);
status=SetImageProgress(image,SaveImagesTag,scene++,imageListLength);
if (status == MagickFalse)
break;
} while (image_info->adjoin != MagickFalse);
quantize_info=DestroyQuantizeInfo(quantize_info);
(void) CloseBlob(image);
return(MagickTrue);
} | 1 | [
"CWE-190"
]
| ImageMagick | db5e12e24f1378ce8c93a5c35991dcdd23a67bb0 | 303,958,891,479,009,840,000,000,000,000,000,000,000 | 349 | https://github.com/ImageMagick/ImageMagick/issues/1726 |
static inline void SetPixelOpacity(const Image *magick_restrict image,
const Quantum alpha,Quantum *magick_restrict pixel)
{
if (image->channel_map[AlphaPixelChannel].traits != UndefinedPixelTrait)
pixel[image->channel_map[AlphaPixelChannel].offset]=QuantumRange-alpha;
} | 0 | [
"CWE-20",
"CWE-125"
]
| ImageMagick | 8187d2d8fd010d2d6b1a3a8edd935beec404dddc | 324,542,824,124,160,500,000,000,000,000,000,000,000 | 6 | https://github.com/ImageMagick/ImageMagick/issues/1610 |
Subsets and Splits