func
stringlengths 0
484k
| target
int64 0
1
| cwe
listlengths 0
4
| project
stringclasses 799
values | commit_id
stringlengths 40
40
| hash
float64 1,215,700,430,453,689,100,000,000B
340,281,914,521,452,260,000,000,000,000B
| size
int64 1
24k
| message
stringlengths 0
13.3k
|
---|---|---|---|---|---|---|---|
TEST(IndexBoundsBuilderTest, TranslateGtCode) {
auto testIndex = buildSimpleIndexEntry();
BSONObj obj = BSON("a" << BSON("$gt" << BSONCode("function(){ return 0; }")));
auto expr = parseMatchExpression(obj);
BSONElement elt = obj.firstElement();
OrderedIntervalList oil;
IndexBoundsBuilder::BoundsTightness tightness;
IndexBoundsBuilder::translate(expr.get(), elt, testIndex, &oil, &tightness);
ASSERT_EQUALS(oil.name, "a");
ASSERT_EQUALS(oil.intervals.size(), 1U);
ASSERT_EQUALS(oil.intervals[0].toString(), "(function(){ return 0; }, CodeWScope( , {}))");
ASSERT_FALSE(oil.intervals[0].startInclusive);
ASSERT_FALSE(oil.intervals[0].endInclusive);
ASSERT_EQUALS(tightness, IndexBoundsBuilder::EXACT);
} | 0 | [
"CWE-754"
]
| mongo | f8f55e1825ee5c7bdb3208fc7c5b54321d172732 | 295,678,627,276,081,300,000,000,000,000,000,000,000 | 15 | SERVER-44377 generate correct plan for indexed inequalities to null |
query_info_compare(void* m1, void* m2)
{
struct query_info* msg1 = (struct query_info*)m1;
struct query_info* msg2 = (struct query_info*)m2;
int mc;
/* from most different to least different for speed */
COMPARE_IT(msg1->qtype, msg2->qtype);
if((mc = query_dname_compare(msg1->qname, msg2->qname)) != 0)
return mc;
log_assert(msg1->qname_len == msg2->qname_len);
COMPARE_IT(msg1->qclass, msg2->qclass);
return 0;
#undef COMPARE_IT
} | 0 | [
"CWE-787"
]
| unbound | 6c3a0b54ed8ace93d5b5ca7b8078dc87e75cd640 | 82,026,300,058,976,840,000,000,000,000,000,000,000 | 14 | - Fix Out of Bound Write Compressed Names in rdata_copy(),
reported by X41 D-Sec. |
gx_default_cmyk_decode_color(
gx_device * dev,
gx_color_index color,
gx_color_value cv[4] )
{
/* The device may have been determined to be 'separable'. */
if (colors_are_separable_and_linear(&dev->color_info))
return gx_default_decode_color(dev, color, cv);
else {
int i, code = dev_proc(dev, map_color_rgb)(dev, color, cv);
gx_color_value min_val = gx_max_color_value;
for (i = 0; i < 3; i++) {
if ((cv[i] = gx_max_color_value - cv[i]) < min_val)
min_val = cv[i];
}
for (i = 0; i < 3; i++)
cv[i] -= min_val;
cv[3] = min_val;
return code;
}
} | 0 | []
| ghostpdl | c9b362ba908ca4b1d7c72663a33229588012d7d9 | 176,695,869,986,942,970,000,000,000,000,000,000,000 | 23 | Bug 699670: disallow copying of the epo device
The erasepage optimisation (epo) subclass device shouldn't be allowed to be
copied because the subclass private data, child and parent pointers end up
being shared between the original device and the copy.
Add an epo_finish_copydevice which NULLs the three offending pointers, and
then communicates to the caller that copying is not allowed.
This also exposed a separate issue with the stype for subclasses devices.
Devices are, I think, unique in having two stype objects associated with them:
the usual one in the memory manager header, and the other stored in the device
structere directly. In order for the stype to be correct, we have to use the
stype for the incoming device, with the ssize of the original device (ssize
should reflect the size of the memory allocation). We correctly did so with the
stype in the device structure, but then used the prototype device's stype to
patch the memory manager stype - meaning the ssize potentially no longer
matched the allocated memory. This caused problems in the garbager where there
is an implicit assumption that the size of a single object clump (c_alone == 1)
is also the size (+ memory manager overheads) of the single object it contains.
The solution is to use the same stype instance to patch the memory manager
data as we do in the device structure (with the correct ssize). |
static inline void v4l2l_get_timestamp(struct v4l2_buffer *b) {
/* ktime_get_ts is considered deprecated, so use ktime_get_ts64 if possible */
#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 17, 0)
struct timespec ts;
ktime_get_ts(&ts);
#else
struct timespec64 ts;
ktime_get_ts64(&ts);
#endif
b->timestamp.tv_sec = ts.tv_sec;
b->timestamp.tv_usec = (ts.tv_nsec / NSEC_PER_USEC);
} | 0 | [
"CWE-787"
]
| v4l2loopback | 64a216af4c09c9ba9326057d7e78994271827eff | 116,415,507,067,574,460,000,000,000,000,000,000,000 | 13 | add explicit format specifier to printf() invocations
CWE-134 |
process_next_cpt_value(
ins_compl_next_state_T *st,
int *compl_type_arg,
pos_T *start_match_pos)
{
int compl_type = -1;
int status = INS_COMPL_CPT_OK;
st->found_all = FALSE;
while (*st->e_cpt == ',' || *st->e_cpt == ' ')
st->e_cpt++;
if (*st->e_cpt == '.' && !curbuf->b_scanned)
{
st->ins_buf = curbuf;
st->first_match_pos = *start_match_pos;
// Move the cursor back one character so that ^N can match the
// word immediately after the cursor.
if (ctrl_x_mode_normal() && dec(&st->first_match_pos) < 0)
{
// Move the cursor to after the last character in the
// buffer, so that word at start of buffer is found
// correctly.
st->first_match_pos.lnum = st->ins_buf->b_ml.ml_line_count;
st->first_match_pos.col =
(colnr_T)STRLEN(ml_get(st->first_match_pos.lnum));
}
st->last_match_pos = st->first_match_pos;
compl_type = 0;
// Remember the first match so that the loop stops when we
// wrap and come back there a second time.
st->set_match_pos = TRUE;
}
else if (vim_strchr((char_u *)"buwU", *st->e_cpt) != NULL
&& (st->ins_buf = ins_compl_next_buf(
st->ins_buf, *st->e_cpt)) != curbuf)
{
// Scan a buffer, but not the current one.
if (st->ins_buf->b_ml.ml_mfp != NULL) // loaded buffer
{
compl_started = TRUE;
st->first_match_pos.col = st->last_match_pos.col = 0;
st->first_match_pos.lnum = st->ins_buf->b_ml.ml_line_count + 1;
st->last_match_pos.lnum = 0;
compl_type = 0;
}
else // unloaded buffer, scan like dictionary
{
st->found_all = TRUE;
if (st->ins_buf->b_fname == NULL)
{
status = INS_COMPL_CPT_CONT;
goto done;
}
compl_type = CTRL_X_DICTIONARY;
st->dict = st->ins_buf->b_fname;
st->dict_f = DICT_EXACT;
}
msg_hist_off = TRUE; // reset in msg_trunc_attr()
vim_snprintf((char *)IObuff, IOSIZE, _("Scanning: %s"),
st->ins_buf->b_fname == NULL
? buf_spname(st->ins_buf)
: st->ins_buf->b_sfname == NULL
? st->ins_buf->b_fname
: st->ins_buf->b_sfname);
(void)msg_trunc_attr((char *)IObuff, TRUE, HL_ATTR(HLF_R));
}
else if (*st->e_cpt == NUL)
status = INS_COMPL_CPT_END;
else
{
if (ctrl_x_mode_line_or_eval())
compl_type = -1;
else if (*st->e_cpt == 'k' || *st->e_cpt == 's')
{
if (*st->e_cpt == 'k')
compl_type = CTRL_X_DICTIONARY;
else
compl_type = CTRL_X_THESAURUS;
if (*++st->e_cpt != ',' && *st->e_cpt != NUL)
{
st->dict = st->e_cpt;
st->dict_f = DICT_FIRST;
}
}
#ifdef FEAT_FIND_ID
else if (*st->e_cpt == 'i')
compl_type = CTRL_X_PATH_PATTERNS;
else if (*st->e_cpt == 'd')
compl_type = CTRL_X_PATH_DEFINES;
#endif
else if (*st->e_cpt == ']' || *st->e_cpt == 't')
{
msg_hist_off = TRUE; // reset in msg_trunc_attr()
compl_type = CTRL_X_TAGS;
vim_snprintf((char *)IObuff, IOSIZE, _("Scanning tags."));
(void)msg_trunc_attr((char *)IObuff, TRUE, HL_ATTR(HLF_R));
}
else
compl_type = -1;
// in any case e_cpt is advanced to the next entry
(void)copy_option_part(&st->e_cpt, IObuff, IOSIZE, ",");
st->found_all = TRUE;
if (compl_type == -1)
status = INS_COMPL_CPT_CONT;
}
done:
*compl_type_arg = compl_type;
return status;
} | 0 | [
"CWE-416"
]
| vim | 0ff01835a40f549c5c4a550502f62a2ac9ac447c | 208,734,963,631,586,250,000,000,000,000,000,000,000 | 115 | patch 9.0.0579: using freed memory when 'tagfunc' wipes out buffer
Problem: Using freed memory when 'tagfunc' wipes out buffer that holds
'complete'.
Solution: Make a copy of the option. Make sure cursor position is valid. |
next_packet (unsigned char const **bufptr, size_t *buflen,
unsigned char const **r_data, size_t *r_datalen, int *r_pkttype,
size_t *r_ntotal)
{
const unsigned char *buf = *bufptr;
size_t len = *buflen;
int c, ctb, pkttype;
unsigned long pktlen;
if (!len)
return gpg_error (GPG_ERR_NO_DATA);
ctb = *buf++; len--;
if ( !(ctb & 0x80) )
return gpg_error (GPG_ERR_INV_PACKET); /* Invalid CTB. */
pktlen = 0;
if ((ctb & 0x40)) /* New style (OpenPGP) CTB. */
{
pkttype = (ctb & 0x3f);
if (!len)
return gpg_error (GPG_ERR_INV_PACKET); /* No 1st length byte. */
c = *buf++; len--;
if (pkttype == PKT_COMPRESSED)
return gpg_error (GPG_ERR_UNEXPECTED); /* ... packet in a keyblock. */
if ( c < 192 )
pktlen = c;
else if ( c < 224 )
{
pktlen = (c - 192) * 256;
if (!len)
return gpg_error (GPG_ERR_INV_PACKET); /* No 2nd length byte. */
c = *buf++; len--;
pktlen += c + 192;
}
else if (c == 255)
{
if (len <4 )
return gpg_error (GPG_ERR_INV_PACKET); /* No length bytes. */
pktlen = buf32_to_ulong (buf);
buf += 4;
len -= 4;
}
else /* Partial length encoding is not allowed for key packets. */
return gpg_error (GPG_ERR_UNEXPECTED);
}
else /* Old style CTB. */
{
int lenbytes;
pktlen = 0;
pkttype = (ctb>>2)&0xf;
lenbytes = ((ctb&3)==3)? 0 : (1<<(ctb & 3));
if (!lenbytes) /* Not allowed in key packets. */
return gpg_error (GPG_ERR_UNEXPECTED);
if (len < lenbytes)
return gpg_error (GPG_ERR_INV_PACKET); /* Not enough length bytes. */
for (; lenbytes; lenbytes--)
{
pktlen <<= 8;
pktlen |= *buf++; len--;
}
}
/* Do some basic sanity check. */
switch (pkttype)
{
case PKT_SIGNATURE:
case PKT_SECRET_KEY:
case PKT_PUBLIC_KEY:
case PKT_SECRET_SUBKEY:
case PKT_MARKER:
case PKT_RING_TRUST:
case PKT_USER_ID:
case PKT_PUBLIC_SUBKEY:
case PKT_OLD_COMMENT:
case PKT_ATTRIBUTE:
case PKT_COMMENT:
case PKT_GPG_CONTROL:
break; /* Okay these are allowed packets. */
default:
return gpg_error (GPG_ERR_UNEXPECTED);
}
if (pktlen == (unsigned long)(-1))
return gpg_error (GPG_ERR_INV_PACKET);
if (pktlen > len)
return gpg_error (GPG_ERR_INV_PACKET); /* Packet length header too long. */
*r_data = buf;
*r_datalen = pktlen;
*r_pkttype = pkttype;
*r_ntotal = (buf - *bufptr) + pktlen;
*bufptr = buf + pktlen;
*buflen = len - pktlen;
if (!*buflen)
*bufptr = NULL;
return 0;
} | 0 | [
"CWE-20"
]
| gnupg | 2183683bd633818dd031b090b5530951de76f392 | 71,831,822,553,729,520,000,000,000,000,000,000,000 | 102 | Use inline functions to convert buffer data to scalars.
* common/host2net.h (buf16_to_ulong, buf16_to_uint): New.
(buf16_to_ushort, buf16_to_u16): New.
(buf32_to_size_t, buf32_to_ulong, buf32_to_uint, buf32_to_u32): New.
--
Commit 91b826a38880fd8a989318585eb502582636ddd8 was not enough to
avoid all sign extension on shift problems. Hanno Böck found a case
with an invalid read due to this problem. To fix that once and for
all almost all uses of "<< 24" and "<< 8" are changed by this patch to
use an inline function from host2net.h.
Signed-off-by: Werner Koch <[email protected]> |
static i64 seekcur_fdout(rzip_control *control)
{
if (!TMP_OUTBUF)
return lseek(control->fd_out, 0, SEEK_CUR);
return (control->out_relofs + control->out_ofs);
} | 0 | [
"CWE-703"
]
| lrzip | 4b3942103b57c639c8e0f31d6d5fd7bac53bbdf4 | 146,321,677,920,341,900,000,000,000,000,000,000,000 | 6 | Fix possible race condition between zpaq_decompress_buf() and clear_rulist() function as reported by wcventure. |
rdp_out_order_caps(STREAM s)
{
uint8 order_caps[32];
memset(order_caps, 0, 32);
order_caps[0] = 1; /* dest blt */
order_caps[1] = 1; /* pat blt */
order_caps[2] = 1; /* screen blt */
order_caps[3] = (g_bitmap_cache ? 1 : 0); /* memblt */
order_caps[4] = 0; /* triblt */
order_caps[8] = 1; /* line */
order_caps[9] = 1; /* line */
order_caps[10] = 1; /* rect */
order_caps[11] = (g_desktop_save ? 1 : 0); /* desksave */
order_caps[13] = 1; /* memblt */
order_caps[14] = 1; /* triblt */
order_caps[20] = (g_polygon_ellipse_orders ? 1 : 0); /* polygon */
order_caps[21] = (g_polygon_ellipse_orders ? 1 : 0); /* polygon2 */
order_caps[22] = 1; /* polyline */
order_caps[25] = (g_polygon_ellipse_orders ? 1 : 0); /* ellipse */
order_caps[26] = (g_polygon_ellipse_orders ? 1 : 0); /* ellipse2 */
order_caps[27] = 1; /* text2 */
out_uint16_le(s, RDP_CAPSET_ORDER);
out_uint16_le(s, RDP_CAPLEN_ORDER);
out_uint8s(s, 20); /* Terminal desc, pad */
out_uint16_le(s, 1); /* Cache X granularity */
out_uint16_le(s, 20); /* Cache Y granularity */
out_uint16(s, 0); /* Pad */
out_uint16_le(s, 1); /* Max order level */
out_uint16_le(s, 0x147); /* Number of fonts */
out_uint16_le(s, 0x2a); /* Capability flags */
out_uint8p(s, order_caps, 32); /* Orders supported */
out_uint16_le(s, 0x6a1); /* Text capability flags */
out_uint8s(s, 6); /* Pad */
out_uint32_le(s, g_desktop_save == False ? 0 : 0x38400); /* Desktop cache size */
out_uint32(s, 0); /* Unknown */
out_uint32_le(s, 0x4e4); /* Unknown */
} | 0 | [
"CWE-787"
]
| rdesktop | 766ebcf6f23ccfe8323ac10242ae6e127d4505d2 | 115,767,369,746,318,700,000,000,000,000,000,000,000 | 39 | Malicious RDP server security fixes
This commit includes fixes for a set of 21 vulnerabilities in
rdesktop when a malicious RDP server is used.
All vulnerabilities was identified and reported by Eyal Itkin.
* Add rdp_protocol_error function that is used in several fixes
* Refactor of process_bitmap_updates
* Fix possible integer overflow in s_check_rem() on 32bit arch
* Fix memory corruption in process_bitmap_data - CVE-2018-8794
* Fix remote code execution in process_bitmap_data - CVE-2018-8795
* Fix remote code execution in process_plane - CVE-2018-8797
* Fix Denial of Service in mcs_recv_connect_response - CVE-2018-20175
* Fix Denial of Service in mcs_parse_domain_params - CVE-2018-20175
* Fix Denial of Service in sec_parse_crypt_info - CVE-2018-20176
* Fix Denial of Service in sec_recv - CVE-2018-20176
* Fix minor information leak in rdpdr_process - CVE-2018-8791
* Fix Denial of Service in cssp_read_tsrequest - CVE-2018-8792
* Fix remote code execution in cssp_read_tsrequest - CVE-2018-8793
* Fix Denial of Service in process_bitmap_data - CVE-2018-8796
* Fix minor information leak in rdpsnd_process_ping - CVE-2018-8798
* Fix Denial of Service in process_secondary_order - CVE-2018-8799
* Fix remote code execution in in ui_clip_handle_data - CVE-2018-8800
* Fix major information leak in ui_clip_handle_data - CVE-2018-20174
* Fix memory corruption in rdp_in_unistr - CVE-2018-20177
* Fix Denial of Service in process_demand_active - CVE-2018-20178
* Fix remote code execution in lspci_process - CVE-2018-20179
* Fix remote code execution in rdpsnddbg_process - CVE-2018-20180
* Fix remote code execution in seamless_process - CVE-2018-20181
* Fix remote code execution in seamless_process_line - CVE-2018-20182 |
get_zmbuf(struct vhost_virtqueue *vq)
{
uint16_t i;
uint16_t last;
int tries = 0;
/* search [last_zmbuf_idx, zmbuf_size) */
i = vq->last_zmbuf_idx;
last = vq->zmbuf_size;
again:
for (; i < last; i++) {
if (vq->zmbufs[i].in_use == 0) {
vq->last_zmbuf_idx = i + 1;
vq->zmbufs[i].in_use = 1;
return &vq->zmbufs[i];
}
}
tries++;
if (tries == 1) {
/* search [0, last_zmbuf_idx) */
i = 0;
last = vq->last_zmbuf_idx;
goto again;
}
return NULL;
} | 0 | [
"CWE-665"
]
| dpdk | 97ecc1c85c95c13bc66a87435758e93406c35c48 | 139,384,680,541,948,250,000,000,000,000,000,000,000 | 29 | vhost: fix translated address not checked
Malicious guest can construct desc with invalid address and zero buffer
length. That will request vhost to check both translated address and
translated data length. This patch will add missed address check.
CVE-2020-10725
Fixes: 75ed51697820 ("vhost: add packed ring batch dequeue")
Fixes: ef861692c398 ("vhost: add packed ring batch enqueue")
Cc: [email protected]
Signed-off-by: Marvin Liu <[email protected]>
Reviewed-by: Maxime Coquelin <[email protected]> |
cmsBool AddMLUBlock(cmsMLU* mlu, cmsUInt32Number size, const wchar_t *Block,
cmsUInt16Number LanguageCode, cmsUInt16Number CountryCode)
{
cmsUInt32Number Offset;
cmsUInt8Number* Ptr;
// Sanity check
if (mlu == NULL) return FALSE;
// Is there any room available?
if (mlu ->UsedEntries >= mlu ->AllocatedEntries) {
if (!GrowMLUtable(mlu)) return FALSE;
}
// Only one ASCII string
if (SearchMLUEntry(mlu, LanguageCode, CountryCode) >= 0) return FALSE; // Only one is allowed!
// Check for size
while ((mlu ->PoolSize - mlu ->PoolUsed) < size) {
if (!GrowMLUpool(mlu)) return FALSE;
}
Offset = mlu ->PoolUsed;
Ptr = (cmsUInt8Number*) mlu ->MemPool;
if (Ptr == NULL) return FALSE;
// Set the entry
memmove(Ptr + Offset, Block, size);
mlu ->PoolUsed += size;
mlu ->Entries[mlu ->UsedEntries].StrW = Offset;
mlu ->Entries[mlu ->UsedEntries].Len = size;
mlu ->Entries[mlu ->UsedEntries].Country = CountryCode;
mlu ->Entries[mlu ->UsedEntries].Language = LanguageCode;
mlu ->UsedEntries++;
return TRUE;
} | 0 | []
| Little-CMS | 886e2f524268efe8a1c3aa838c28e446fda24486 | 85,810,097,163,881,940,000,000,000,000,000,000,000 | 40 | Fixes from coverity check |
NOEXPORT void win_log(LPCTSTR txt) {
struct LIST *curr;
size_t txt_len;
static size_t log_len=0;
txt_len=_tcslen(txt);
curr=str_alloc_detached(sizeof(struct LIST)+txt_len*sizeof(TCHAR));
curr->len=txt_len;
_tcscpy(curr->txt, txt);
curr->next=NULL;
/* this critical section is performance critical */
CRYPTO_THREAD_write_lock(stunnel_locks[LOCK_WIN_LOG]);
if(tail)
tail->next=curr;
tail=curr;
if(!head)
head=tail;
log_len++;
new_logs=TRUE;
if(log_len>LOG_LINES) {
curr=head;
head=head->next;
log_len--;
} else {
curr=NULL;
}
CRYPTO_THREAD_unlock(stunnel_locks[LOCK_WIN_LOG]);
str_free(curr);
} | 0 | [
"CWE-295"
]
| stunnel | ebad9ddc4efb2635f37174c9d800d06206f1edf9 | 75,047,986,078,828,640,000,000,000,000,000,000,000 | 31 | stunnel-5.57 |
static void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
{
struct hci_ev_conn_complete *ev = (void *) skb->data;
struct inquiry_entry *ie;
struct hci_conn *conn;
BT_DBG("%s", hdev->name);
hci_dev_lock(hdev);
conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
if (!conn) {
/* Connection may not exist if auto-connected. Check the inquiry
* cache to see if we've already discovered this bdaddr before.
* If found and link is an ACL type, create a connection class
* automatically.
*/
ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
if (ie && ev->link_type == ACL_LINK) {
conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr,
HCI_ROLE_SLAVE);
if (!conn) {
bt_dev_err(hdev, "no memory for new conn");
goto unlock;
}
} else {
if (ev->link_type != SCO_LINK)
goto unlock;
conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK,
&ev->bdaddr);
if (!conn)
goto unlock;
conn->type = SCO_LINK;
}
}
if (!ev->status) {
conn->handle = __le16_to_cpu(ev->handle);
if (conn->type == ACL_LINK) {
conn->state = BT_CONFIG;
hci_conn_hold(conn);
if (!conn->out && !hci_conn_ssp_enabled(conn) &&
!hci_find_link_key(hdev, &ev->bdaddr))
conn->disc_timeout = HCI_PAIRING_TIMEOUT;
else
conn->disc_timeout = HCI_DISCONN_TIMEOUT;
} else
conn->state = BT_CONNECTED;
hci_debugfs_create_conn(conn);
hci_conn_add_sysfs(conn);
if (test_bit(HCI_AUTH, &hdev->flags))
set_bit(HCI_CONN_AUTH, &conn->flags);
if (test_bit(HCI_ENCRYPT, &hdev->flags))
set_bit(HCI_CONN_ENCRYPT, &conn->flags);
/* Get remote features */
if (conn->type == ACL_LINK) {
struct hci_cp_read_remote_features cp;
cp.handle = ev->handle;
hci_send_cmd(hdev, HCI_OP_READ_REMOTE_FEATURES,
sizeof(cp), &cp);
hci_req_update_scan(hdev);
}
/* Set packet type for incoming connection */
if (!conn->out && hdev->hci_ver < BLUETOOTH_VER_2_0) {
struct hci_cp_change_conn_ptype cp;
cp.handle = ev->handle;
cp.pkt_type = cpu_to_le16(conn->pkt_type);
hci_send_cmd(hdev, HCI_OP_CHANGE_CONN_PTYPE, sizeof(cp),
&cp);
}
} else {
conn->state = BT_CLOSED;
if (conn->type == ACL_LINK)
mgmt_connect_failed(hdev, &conn->dst, conn->type,
conn->dst_type, ev->status);
}
if (conn->type == ACL_LINK)
hci_sco_setup(conn, ev->status);
if (ev->status) {
hci_connect_cfm(conn, ev->status);
hci_conn_del(conn);
} else if (ev->link_type == SCO_LINK) {
switch (conn->setting & SCO_AIRMODE_MASK) {
case SCO_AIRMODE_CVSD:
if (hdev->notify)
hdev->notify(hdev, HCI_NOTIFY_ENABLE_SCO_CVSD);
break;
}
hci_connect_cfm(conn, ev->status);
}
unlock:
hci_dev_unlock(hdev);
hci_conn_check_pending(hdev);
} | 0 | [
"CWE-290"
]
| linux | 3ca44c16b0dcc764b641ee4ac226909f5c421aa3 | 131,765,125,059,125,150,000,000,000,000,000,000,000 | 109 | Bluetooth: Consolidate encryption handling in hci_encrypt_cfm
This makes hci_encrypt_cfm calls hci_connect_cfm in case the connection
state is BT_CONFIG so callers don't have to check the state.
Signed-off-by: Luiz Augusto von Dentz <[email protected]>
Signed-off-by: Marcel Holtmann <[email protected]> |
void sdma_ahg_free(struct sdma_engine *sde, int ahg_index)
{
if (!sde)
return;
trace_hfi1_ahg_deallocate(sde, ahg_index);
if (ahg_index < 0 || ahg_index > 31)
return;
clear_bit(ahg_index, &sde->ahg_bits);
} | 0 | [
"CWE-400",
"CWE-401"
]
| linux | 34b3be18a04ecdc610aae4c48e5d1b799d8689f6 | 198,830,293,072,986,330,000,000,000,000,000,000,000 | 9 | RDMA/hfi1: Prevent memory leak in sdma_init
In sdma_init if rhashtable_init fails the allocated memory for
tmp_sdma_rht should be released.
Fixes: 5a52a7acf7e2 ("IB/hfi1: NULL pointer dereference when freeing rhashtable")
Link: https://lore.kernel.org/r/[email protected]
Signed-off-by: Navid Emamdoost <[email protected]>
Acked-by: Dennis Dalessandro <[email protected]>
Signed-off-by: Jason Gunthorpe <[email protected]> |
Bool gf_opus_parse_header(GF_OpusParser *opus, u8 *data, u32 data_len)
{
char tag[9];
GF_BitStream *bs = gf_bs_new(data, data_len, GF_BITSTREAM_READ);
gf_bs_read_data(bs, tag, 8);
tag[8]=0;
if (memcmp(data, "OpusHead", sizeof(char)*8)) {
gf_bs_del(bs);
return GF_FALSE;
}
/*Identification Header*/
opus->version = gf_bs_read_u8(bs); /*version*/
if (opus->version != 1) {
gf_bs_del(bs);
GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[Opus] Unsupported version %d\n", opus->version));
return GF_FALSE;
}
opus->OutputChannelCount = gf_bs_read_u8(bs);
opus->PreSkip = gf_bs_read_u16_le(bs);
opus->InputSampleRate = gf_bs_read_u32_le(bs);
opus->OutputGain = gf_bs_read_u16_le(bs);
opus->ChannelMappingFamily = gf_bs_read_u8(bs);
if (opus->ChannelMappingFamily != 0) {
opus->StreamCount = gf_bs_read_u8(bs);
opus->CoupledCount = gf_bs_read_u8(bs);
gf_bs_read_data(bs, (char *) opus->ChannelMapping, opus->OutputChannelCount);
}
gf_bs_del(bs);
return GF_TRUE;
} | 0 | [
"CWE-190",
"CWE-787"
]
| gpac | 51cdb67ff7c5f1242ac58c5aa603ceaf1793b788 | 31,895,025,489,748,200,000,000,000,000,000,000,000 | 31 | add safety in avc/hevc/vvc sps/pps/vps ID check - cf #1720 #1721 #1722 |
static int imap_tags_commit(struct Context *ctx, struct Header *hdr, char *buf)
{
struct Buffer *cmd = NULL;
char uid[11];
struct ImapData *idata = ctx->data;
if (*buf == '\0')
buf = NULL;
if (!mutt_bit_isset(idata->ctx->rights, MUTT_ACL_WRITE))
return 0;
snprintf(uid, sizeof(uid), "%u", HEADER_DATA(hdr)->uid);
/* Remove old custom flags */
if (HEADER_DATA(hdr)->flags_remote)
{
cmd = mutt_buffer_new();
if (!cmd)
{
mutt_debug(1, "unable to allocate buffer\n");
return -1;
}
cmd->dptr = cmd->data;
mutt_buffer_addstr(cmd, "UID STORE ");
mutt_buffer_addstr(cmd, uid);
mutt_buffer_addstr(cmd, " -FLAGS.SILENT (");
mutt_buffer_addstr(cmd, HEADER_DATA(hdr)->flags_remote);
mutt_buffer_addstr(cmd, ")");
/* Should we return here, or we are fine and we could
* continue to add new flags *
*/
if (imap_exec(idata, cmd->data, 0) != 0)
{
mutt_buffer_free(&cmd);
return -1;
}
mutt_buffer_free(&cmd);
}
/* Add new custom flags */
if (buf)
{
cmd = mutt_buffer_new();
if (!cmd)
{
mutt_debug(1, "fail to remove old flags\n");
return -1;
}
cmd->dptr = cmd->data;
mutt_buffer_addstr(cmd, "UID STORE ");
mutt_buffer_addstr(cmd, uid);
mutt_buffer_addstr(cmd, " +FLAGS.SILENT (");
mutt_buffer_addstr(cmd, buf);
mutt_buffer_addstr(cmd, ")");
if (imap_exec(idata, cmd->data, 0) != 0)
{
mutt_debug(1, "fail to add new flags\n");
mutt_buffer_free(&cmd);
return -1;
}
mutt_buffer_free(&cmd);
}
/* We are good sync them */
mutt_debug(1, "NEW TAGS: %d\n", buf);
driver_tags_replace(&hdr->tags, buf);
FREE(&HEADER_DATA(hdr)->flags_remote);
HEADER_DATA(hdr)->flags_remote = driver_tags_get_with_hidden(&hdr->tags);
return 0;
} | 0 | [
"CWE-78",
"CWE-77"
]
| neomutt | 95e80bf9ff10f68cb6443f760b85df4117cb15eb | 327,187,860,895,288,060,000,000,000,000,000,000,000 | 76 | Quote path in imap_subscribe |
static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb,
void *frame, struct net_device *dev, void *data, int tp_len,
__be16 proto, unsigned char *addr, int hlen, int copylen,
const struct sockcm_cookie *sockc)
{
union tpacket_uhdr ph;
int to_write, offset, len, nr_frags, len_max;
struct socket *sock = po->sk.sk_socket;
struct page *page;
int err;
ph.raw = frame;
skb->protocol = proto;
skb->dev = dev;
skb->priority = po->sk.sk_priority;
skb->mark = po->sk.sk_mark;
sock_tx_timestamp(&po->sk, sockc->tsflags, &skb_shinfo(skb)->tx_flags);
skb_shinfo(skb)->destructor_arg = ph.raw;
skb_reserve(skb, hlen);
skb_reset_network_header(skb);
to_write = tp_len;
if (sock->type == SOCK_DGRAM) {
err = dev_hard_header(skb, dev, ntohs(proto), addr,
NULL, tp_len);
if (unlikely(err < 0))
return -EINVAL;
} else if (copylen) {
int hdrlen = min_t(int, copylen, tp_len);
skb_push(skb, dev->hard_header_len);
skb_put(skb, copylen - dev->hard_header_len);
err = skb_store_bits(skb, 0, data, hdrlen);
if (unlikely(err))
return err;
if (!dev_validate_header(dev, skb->data, hdrlen))
return -EINVAL;
if (!skb->protocol)
tpacket_set_protocol(dev, skb);
data += hdrlen;
to_write -= hdrlen;
}
offset = offset_in_page(data);
len_max = PAGE_SIZE - offset;
len = ((to_write > len_max) ? len_max : to_write);
skb->data_len = to_write;
skb->len += to_write;
skb->truesize += to_write;
refcount_add(to_write, &po->sk.sk_wmem_alloc);
while (likely(to_write)) {
nr_frags = skb_shinfo(skb)->nr_frags;
if (unlikely(nr_frags >= MAX_SKB_FRAGS)) {
pr_err("Packet exceed the number of skb frags(%lu)\n",
MAX_SKB_FRAGS);
return -EFAULT;
}
page = pgv_to_page(data);
data += len;
flush_dcache_page(page);
get_page(page);
skb_fill_page_desc(skb, nr_frags, page, offset, len);
to_write -= len;
offset = 0;
len_max = PAGE_SIZE;
len = ((to_write > len_max) ? len_max : to_write);
}
skb_probe_transport_header(skb, 0);
return tp_len;
} | 0 | [
"CWE-119",
"CWE-787"
]
| linux | edbd58be15a957f6a760c4a514cd475217eb97fd | 117,956,059,503,708,500,000,000,000,000,000,000,000 | 80 | packet: Don't write vnet header beyond end of buffer
... which may happen with certain values of tp_reserve and maclen.
Fixes: 58d19b19cd99 ("packet: vnet_hdr support for tpacket_rcv")
Signed-off-by: Benjamin Poirier <[email protected]>
Cc: Willem de Bruijn <[email protected]>
Acked-by: Willem de Bruijn <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
static php_iconv_err_t _php_iconv_mime_decode(smart_str *pretval, const char *str, size_t str_nbytes, const char *enc, const char **next_pos, int mode)
{
php_iconv_err_t err = PHP_ICONV_ERR_SUCCESS;
iconv_t cd = (iconv_t)(-1), cd_pl = (iconv_t)(-1);
const char *p1;
size_t str_left;
unsigned int scan_stat = 0;
const char *csname = NULL;
size_t csname_len;
const char *encoded_text = NULL;
size_t encoded_text_len = 0;
const char *encoded_word = NULL;
const char *spaces = NULL;
php_iconv_enc_scheme_t enc_scheme = PHP_ICONV_ENC_SCHEME_BASE64;
if (next_pos != NULL) {
*next_pos = NULL;
}
cd_pl = iconv_open(enc, ICONV_ASCII_ENCODING);
if (cd_pl == (iconv_t)(-1)) {
#if ICONV_SUPPORTS_ERRNO
if (errno == EINVAL) {
err = PHP_ICONV_ERR_WRONG_CHARSET;
} else {
err = PHP_ICONV_ERR_CONVERTER;
}
#else
err = PHP_ICONV_ERR_UNKNOWN;
#endif
goto out;
}
p1 = str;
for (str_left = str_nbytes; str_left > 0; str_left--, p1++) {
int eos = 0;
switch (scan_stat) {
case 0: /* expecting any character */
switch (*p1) {
case '\r': /* part of an EOL sequence? */
scan_stat = 7;
break;
case '\n':
scan_stat = 8;
break;
case '=': /* first letter of an encoded chunk */
encoded_word = p1;
scan_stat = 1;
break;
case ' ': case '\t': /* a chunk of whitespaces */
spaces = p1;
scan_stat = 11;
break;
default: /* first letter of a non-encoded word */
err = _php_iconv_appendc(pretval, *p1, cd_pl);
if (err != PHP_ICONV_ERR_SUCCESS) {
if (mode & PHP_ICONV_MIME_DECODE_CONTINUE_ON_ERROR) {
err = PHP_ICONV_ERR_SUCCESS;
} else {
goto out;
}
}
encoded_word = NULL;
if ((mode & PHP_ICONV_MIME_DECODE_STRICT)) {
scan_stat = 12;
}
break;
}
break;
case 1: /* expecting a delimiter */
if (*p1 != '?') {
if (*p1 == '\r' || *p1 == '\n') {
--p1;
}
err = _php_iconv_appendl(pretval, encoded_word, (size_t)((p1 + 1) - encoded_word), cd_pl);
if (err != PHP_ICONV_ERR_SUCCESS) {
goto out;
}
encoded_word = NULL;
if ((mode & PHP_ICONV_MIME_DECODE_STRICT)) {
scan_stat = 12;
} else {
scan_stat = 0;
}
break;
}
csname = p1 + 1;
scan_stat = 2;
break;
case 2: /* expecting a charset name */
switch (*p1) {
case '?': /* normal delimiter: encoding scheme follows */
scan_stat = 3;
break;
case '*': /* new style delimiter: locale id follows */
scan_stat = 10;
break;
case '\r': case '\n': /* not an encoded-word */
--p1;
_php_iconv_appendc(pretval, '=', cd_pl);
_php_iconv_appendc(pretval, '?', cd_pl);
err = _php_iconv_appendl(pretval, csname, (size_t)((p1 + 1) - csname), cd_pl);
if (err != PHP_ICONV_ERR_SUCCESS) {
goto out;
}
csname = NULL;
if ((mode & PHP_ICONV_MIME_DECODE_STRICT)) {
scan_stat = 12;
}
else {
scan_stat = 0;
}
continue;
}
if (scan_stat != 2) {
char tmpbuf[80];
if (csname == NULL) {
err = PHP_ICONV_ERR_MALFORMED;
goto out;
}
csname_len = (size_t)(p1 - csname);
if (csname_len > sizeof(tmpbuf) - 1) {
if ((mode & PHP_ICONV_MIME_DECODE_CONTINUE_ON_ERROR)) {
err = _php_iconv_appendl(pretval, encoded_word, (size_t)((p1 + 1) - encoded_word), cd_pl);
if (err != PHP_ICONV_ERR_SUCCESS) {
goto out;
}
encoded_word = NULL;
if ((mode & PHP_ICONV_MIME_DECODE_STRICT)) {
scan_stat = 12;
} else {
scan_stat = 0;
}
break;
} else {
err = PHP_ICONV_ERR_MALFORMED;
goto out;
}
}
memcpy(tmpbuf, csname, csname_len);
tmpbuf[csname_len] = '\0';
if (cd != (iconv_t)(-1)) {
iconv_close(cd);
}
cd = iconv_open(enc, tmpbuf);
if (cd == (iconv_t)(-1)) {
if ((mode & PHP_ICONV_MIME_DECODE_CONTINUE_ON_ERROR)) {
/* Bad character set, but the user wants us to
* press on. In this case, we'll just insert the
* undecoded encoded word, since there isn't really
* a more sensible behaviour available; the only
* other options are to swallow the encoded word
* entirely or decode it with an arbitrarily chosen
* single byte encoding, both of which seem to have
* a higher WTF factor than leaving it undecoded.
*
* Given this approach, we need to skip ahead to
* the end of the encoded word. */
int qmarks = 2;
while (qmarks > 0 && str_left > 1) {
if (*(++p1) == '?') {
--qmarks;
}
--str_left;
}
/* Look ahead to check for the terminating = that
* should be there as well; if it's there, we'll
* also include that. If it's not, there isn't much
* we can do at this point. */
if (*(p1 + 1) == '=') {
++p1;
--str_left;
}
err = _php_iconv_appendl(pretval, encoded_word, (size_t)((p1 + 1) - encoded_word), cd_pl);
if (err != PHP_ICONV_ERR_SUCCESS) {
goto out;
}
/* Let's go back and see if there are further
* encoded words or bare content, and hope they
* might actually have a valid character set. */
scan_stat = 12;
break;
} else {
#if ICONV_SUPPORTS_ERRNO
if (errno == EINVAL) {
err = PHP_ICONV_ERR_WRONG_CHARSET;
} else {
err = PHP_ICONV_ERR_CONVERTER;
}
#else
err = PHP_ICONV_ERR_UNKNOWN;
#endif
goto out;
}
}
}
break;
case 3: /* expecting a encoding scheme specifier */
switch (*p1) {
case 'b':
case 'B':
enc_scheme = PHP_ICONV_ENC_SCHEME_BASE64;
scan_stat = 4;
break;
case 'q':
case 'Q':
enc_scheme = PHP_ICONV_ENC_SCHEME_QPRINT;
scan_stat = 4;
break;
default:
if ((mode & PHP_ICONV_MIME_DECODE_CONTINUE_ON_ERROR)) {
err = _php_iconv_appendl(pretval, encoded_word, (size_t)((p1 + 1) - encoded_word), cd_pl);
if (err != PHP_ICONV_ERR_SUCCESS) {
goto out;
}
encoded_word = NULL;
if ((mode & PHP_ICONV_MIME_DECODE_STRICT)) {
scan_stat = 12;
} else {
scan_stat = 0;
}
break;
} else {
err = PHP_ICONV_ERR_MALFORMED;
goto out;
}
}
break;
case 4: /* expecting a delimiter */
if (*p1 != '?') {
if ((mode & PHP_ICONV_MIME_DECODE_CONTINUE_ON_ERROR)) {
/* pass the entire chunk through the converter */
err = _php_iconv_appendl(pretval, encoded_word, (size_t)((p1 + 1) - encoded_word), cd_pl);
if (err != PHP_ICONV_ERR_SUCCESS) {
goto out;
}
encoded_word = NULL;
if ((mode & PHP_ICONV_MIME_DECODE_STRICT)) {
scan_stat = 12;
} else {
scan_stat = 0;
}
break;
} else {
err = PHP_ICONV_ERR_MALFORMED;
goto out;
}
}
encoded_text = p1 + 1;
scan_stat = 5;
break;
case 5: /* expecting an encoded portion */
if (*p1 == '?') {
encoded_text_len = (size_t)(p1 - encoded_text);
scan_stat = 6;
}
break;
case 7: /* expecting a "\n" character */
if (*p1 == '\n') {
scan_stat = 8;
} else {
/* bare CR */
_php_iconv_appendc(pretval, '\r', cd_pl);
_php_iconv_appendc(pretval, *p1, cd_pl);
scan_stat = 0;
}
break;
case 8: /* checking whether the following line is part of a
folded header */
if (*p1 != ' ' && *p1 != '\t') {
--p1;
str_left = 1; /* quit_loop */
break;
}
if (encoded_word == NULL) {
_php_iconv_appendc(pretval, ' ', cd_pl);
}
spaces = NULL;
scan_stat = 11;
break;
case 6: /* expecting a End-Of-Chunk character "=" */
if (*p1 != '=') {
if ((mode & PHP_ICONV_MIME_DECODE_CONTINUE_ON_ERROR)) {
/* pass the entire chunk through the converter */
err = _php_iconv_appendl(pretval, encoded_word, (size_t)((p1 + 1) - encoded_word), cd_pl);
if (err != PHP_ICONV_ERR_SUCCESS) {
goto out;
}
encoded_word = NULL;
if ((mode & PHP_ICONV_MIME_DECODE_STRICT)) {
scan_stat = 12;
} else {
scan_stat = 0;
}
break;
} else {
err = PHP_ICONV_ERR_MALFORMED;
goto out;
}
}
scan_stat = 9;
if (str_left == 1) {
eos = 1;
} else {
break;
}
case 9: /* choice point, seeing what to do next.*/
switch (*p1) {
default:
/* Handle non-RFC-compliant formats
*
* RFC2047 requires the character that comes right
* after an encoded word (chunk) to be a whitespace,
* while there are lots of broken implementations that
* generate such malformed headers that don't fulfill
* that requirement.
*/
if (!eos) {
if ((mode & PHP_ICONV_MIME_DECODE_STRICT)) {
/* pass the entire chunk through the converter */
err = _php_iconv_appendl(pretval, encoded_word, (size_t)((p1 + 1) - encoded_word), cd_pl);
if (err != PHP_ICONV_ERR_SUCCESS) {
goto out;
}
scan_stat = 12;
break;
}
}
/* break is omitted intentionally */
case '\r': case '\n': case ' ': case '\t': {
zend_string *decoded_text;
switch (enc_scheme) {
case PHP_ICONV_ENC_SCHEME_BASE64:
decoded_text = php_base64_decode((unsigned char*)encoded_text, encoded_text_len);
break;
case PHP_ICONV_ENC_SCHEME_QPRINT:
decoded_text = php_quot_print_decode((unsigned char*)encoded_text, encoded_text_len, 1);
break;
default:
decoded_text = NULL;
break;
}
if (decoded_text == NULL) {
if ((mode & PHP_ICONV_MIME_DECODE_CONTINUE_ON_ERROR)) {
/* pass the entire chunk through the converter */
err = _php_iconv_appendl(pretval, encoded_word, (size_t)((p1 + 1) - encoded_word), cd_pl);
if (err != PHP_ICONV_ERR_SUCCESS) {
goto out;
}
encoded_word = NULL;
if ((mode & PHP_ICONV_MIME_DECODE_STRICT)) {
scan_stat = 12;
} else {
scan_stat = 0;
}
break;
} else {
err = PHP_ICONV_ERR_UNKNOWN;
goto out;
}
}
err = _php_iconv_appendl(pretval, ZSTR_VAL(decoded_text), ZSTR_LEN(decoded_text), cd);
zend_string_release_ex(decoded_text, 0);
if (err != PHP_ICONV_ERR_SUCCESS) {
if ((mode & PHP_ICONV_MIME_DECODE_CONTINUE_ON_ERROR)) {
/* pass the entire chunk through the converter */
err = _php_iconv_appendl(pretval, encoded_word, (size_t)(p1 - encoded_word), cd_pl);
encoded_word = NULL;
if (err != PHP_ICONV_ERR_SUCCESS) {
break;
}
} else {
goto out;
}
}
if (eos) { /* reached end-of-string. done. */
scan_stat = 0;
break;
}
switch (*p1) {
case '\r': /* part of an EOL sequence? */
scan_stat = 7;
break;
case '\n':
scan_stat = 8;
break;
case '=': /* first letter of an encoded chunk */
scan_stat = 1;
break;
case ' ': case '\t': /* medial whitespaces */
spaces = p1;
scan_stat = 11;
break;
default: /* first letter of a non-encoded word */
_php_iconv_appendc(pretval, *p1, cd_pl);
scan_stat = 12;
break;
}
} break;
}
break;
case 10: /* expects a language specifier. dismiss it for now */
if (*p1 == '?') {
scan_stat = 3;
}
break;
case 11: /* expecting a chunk of whitespaces */
switch (*p1) {
case '\r': /* part of an EOL sequence? */
scan_stat = 7;
break;
case '\n':
scan_stat = 8;
break;
case '=': /* first letter of an encoded chunk */
if (spaces != NULL && encoded_word == NULL) {
_php_iconv_appendl(pretval, spaces, (size_t)(p1 - spaces), cd_pl);
spaces = NULL;
}
encoded_word = p1;
scan_stat = 1;
break;
case ' ': case '\t':
break;
default: /* first letter of a non-encoded word */
if (spaces != NULL) {
_php_iconv_appendl(pretval, spaces, (size_t)(p1 - spaces), cd_pl);
spaces = NULL;
}
_php_iconv_appendc(pretval, *p1, cd_pl);
encoded_word = NULL;
if ((mode & PHP_ICONV_MIME_DECODE_STRICT)) {
scan_stat = 12;
} else {
scan_stat = 0;
}
break;
}
break;
case 12: /* expecting a non-encoded word */
switch (*p1) {
case '\r': /* part of an EOL sequence? */
scan_stat = 7;
break;
case '\n':
scan_stat = 8;
break;
case ' ': case '\t':
spaces = p1;
scan_stat = 11;
break;
case '=': /* first letter of an encoded chunk */
if (!(mode & PHP_ICONV_MIME_DECODE_STRICT)) {
encoded_word = p1;
scan_stat = 1;
break;
}
/* break is omitted intentionally */
default:
_php_iconv_appendc(pretval, *p1, cd_pl);
break;
}
break;
}
}
switch (scan_stat) {
case 0: case 8: case 11: case 12:
break;
default:
if ((mode & PHP_ICONV_MIME_DECODE_CONTINUE_ON_ERROR)) {
if (scan_stat == 1) {
_php_iconv_appendc(pretval, '=', cd_pl);
}
err = PHP_ICONV_ERR_SUCCESS;
} else {
err = PHP_ICONV_ERR_MALFORMED;
goto out;
}
}
if (next_pos != NULL) {
*next_pos = p1;
}
smart_str_0(pretval);
out:
if (cd != (iconv_t)(-1)) {
iconv_close(cd);
}
if (cd_pl != (iconv_t)(-1)) {
iconv_close(cd_pl);
}
return err;
} | 1 | [
"CWE-125"
]
| php-src | 70523ce41ff400ea00343a03f297332cb1f1b77b | 180,898,520,077,366,680,000,000,000,000,000,000,000 | 549 | Fix bug #78069 - Out-of-bounds read in iconv.c:_php_iconv_mime_decode() due to integer overflow
(cherry picked from commit 7cf7148a8f8f4f55fb04de2a517d740bb6253eac) |
static inline int sem_lock(struct sem_array *sma, struct sembuf *sops,
int nsops)
{
int locknum;
again:
if (nsops == 1 && !sma->complex_count) {
struct sem *sem = sma->sem_base + sops->sem_num;
/* Lock just the semaphore we are interested in. */
spin_lock(&sem->lock);
/*
* If sma->complex_count was set while we were spinning,
* we may need to look at things we did not lock here.
*/
if (unlikely(sma->complex_count)) {
spin_unlock(&sem->lock);
goto lock_array;
}
/*
* Another process is holding the global lock on the
* sem_array; we cannot enter our critical section,
* but have to wait for the global lock to be released.
*/
if (unlikely(spin_is_locked(&sma->sem_perm.lock))) {
spin_unlock(&sem->lock);
spin_unlock_wait(&sma->sem_perm.lock);
goto again;
}
locknum = sops->sem_num;
} else {
int i;
/*
* Lock the semaphore array, and wait for all of the
* individual semaphore locks to go away. The code
* above ensures no new single-lock holders will enter
* their critical section while the array lock is held.
*/
lock_array:
spin_lock(&sma->sem_perm.lock);
for (i = 0; i < sma->sem_nsems; i++) {
struct sem *sem = sma->sem_base + i;
spin_unlock_wait(&sem->lock);
}
locknum = -1;
}
return locknum;
} | 0 | [
"CWE-703",
"CWE-189"
]
| linux | 6062a8dc0517bce23e3c2f7d2fea5e22411269a3 | 92,437,351,060,152,160,000,000,000,000,000,000,000 | 50 | ipc,sem: fine grained locking for semtimedop
Introduce finer grained locking for semtimedop, to handle the common case
of a program wanting to manipulate one semaphore from an array with
multiple semaphores.
If the call is a semop manipulating just one semaphore in an array with
multiple semaphores, only take the lock for that semaphore itself.
If the call needs to manipulate multiple semaphores, or another caller is
in a transaction that manipulates multiple semaphores, the sem_array lock
is taken, as well as all the locks for the individual semaphores.
On a 24 CPU system, performance numbers with the semop-multi
test with N threads and N semaphores, look like this:
vanilla Davidlohr's Davidlohr's + Davidlohr's +
threads patches rwlock patches v3 patches
10 610652 726325 1783589 2142206
20 341570 365699 1520453 1977878
30 288102 307037 1498167 2037995
40 290714 305955 1612665 2256484
50 288620 312890 1733453 2650292
60 289987 306043 1649360 2388008
70 291298 306347 1723167 2717486
80 290948 305662 1729545 2763582
90 290996 306680 1736021 2757524
100 292243 306700 1773700 3059159
[[email protected]: do not call sem_lock when bogus sma]
[[email protected]: make refcounter atomic]
Signed-off-by: Rik van Riel <[email protected]>
Suggested-by: Linus Torvalds <[email protected]>
Acked-by: Davidlohr Bueso <[email protected]>
Cc: Chegu Vinod <[email protected]>
Cc: Jason Low <[email protected]>
Reviewed-by: Michel Lespinasse <[email protected]>
Cc: Peter Hurley <[email protected]>
Cc: Stanislav Kinsbursky <[email protected]>
Tested-by: Emmanuel Benisty <[email protected]>
Tested-by: Sedat Dilek <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]> |
void PngImage::writeMetadata()
{
if (io_->open() != 0) {
throw Error(kerDataSourceOpenFailed, io_->path(), strError());
}
IoCloser closer(*io_);
BasicIo::UniquePtr tempIo(new MemIo);
assert(tempIo.get() != 0);
doWriteMetadata(*tempIo); // may throw
io_->close();
io_->transfer(*tempIo); // may throw
} // PngImage::writeMetadata | 0 | [
"CWE-190"
]
| exiv2 | 491c3ebe3b3faa6d8f75fb28146186792c2439da | 183,356,677,243,106,430,000,000,000,000,000,000,000 | 14 | Avoid negative integer overflow when `iccOffset > chunkLength`.
This fixes #790.
(cherry picked from commit 6fa2e31206127bd8bcac0269311f3775a8d6ea21) |
//! Save image as a sub-image into an existing .cimg file \overloading.
const CImg<T>& save_cimg(std::FILE *const file,
const unsigned int n0,
const unsigned int x0, const unsigned int y0,
const unsigned int z0, const unsigned int c0) const {
CImgList<T>(*this,true).save_cimg(file,n0,x0,y0,z0,c0);
return *this; | 0 | [
"CWE-125"
]
| CImg | 10af1e8c1ad2a58a0a3342a856bae63e8f257abb | 38,140,404,763,014,920,000,000,000,000,000,000,000 | 7 | Fix other issues in 'CImg<T>::load_bmp()'. |
vte_sequence_handler_screen_alignment_test (VteTerminal *terminal, GValueArray *params)
{
long row;
VteRowData *rowdata;
VteScreen *screen;
VteCell cell;
screen = terminal->pvt->screen;
for (row = terminal->pvt->screen->insert_delta;
row < terminal->pvt->screen->insert_delta + terminal->row_count;
row++) {
/* Find this row. */
while (_vte_ring_next(screen->row_data) <= row)
_vte_terminal_ring_append (terminal, FALSE);
_vte_terminal_adjust_adjustments(terminal);
rowdata = _vte_ring_index_writable (screen->row_data, row);
g_assert(rowdata != NULL);
/* Clear this row. */
_vte_row_data_shrink (rowdata, 0);
_vte_terminal_emit_text_deleted(terminal);
/* Fill this row. */
cell.c = 'E';
cell.attr = basic_cell.cell.attr;
cell.attr.columns = 1;
_vte_row_data_fill (rowdata, &cell, terminal->column_count);
_vte_terminal_emit_text_inserted(terminal);
}
_vte_invalidate_all(terminal);
/* We modified the display, so make a note of it for completeness. */
terminal->pvt->text_modified_flag = TRUE;
} | 0 | []
| vte | 8b971a7b2c59902914ecbbc3915c45dd21530a91 | 160,448,306,784,363,500,000,000,000,000,000,000,000 | 34 | Fix terminal title reporting
Fixed CVE-2003-0070 again.
See also http://marc.info/?l=bugtraq&m=104612710031920&w=2 .
(cherry picked from commit 6042c75b5a6daa0e499e61c8e07242d890d38ff1) |
PolicyParseException(rapidjson::ParseResult&& pr)
: pr(pr) { } | 0 | [
"CWE-617"
]
| ceph | b3118cabb8060a8cc6a01c4e8264cb18e7b1745a | 46,667,695,783,923,040,000,000,000,000,000,000,000 | 2 | rgw: Remove assertions in IAM Policy
A couple of them could be triggered by user input.
Signed-off-by: Adam C. Emerson <[email protected]> |
static ssize_t status_show(struct device *dev,
struct device_attribute *attr, char *out)
{
char *s = out;
int pdev_nr;
out += sprintf(out,
"hub port sta spd dev socket local_busid\n");
pdev_nr = status_name_to_id(attr->attr.name);
if (pdev_nr < 0)
out += status_show_not_ready(pdev_nr, out);
else
out += status_show_vhci(pdev_nr, out);
return out - s;
} | 0 | [
"CWE-200"
]
| linux | 2f2d0088eb93db5c649d2a5e34a3800a8a935fc5 | 27,966,582,594,237,590,000,000,000,000,000,000,000 | 17 | usbip: prevent vhci_hcd driver from leaking a socket pointer address
When a client has a USB device attached over IP, the vhci_hcd driver is
locally leaking a socket pointer address via the
/sys/devices/platform/vhci_hcd/status file (world-readable) and in debug
output when "usbip --debug port" is run.
Fix it to not leak. The socket pointer address is not used at the moment
and it was made visible as a convenient way to find IP address from socket
pointer address by looking up /proc/net/{tcp,tcp6}.
As this opens a security hole, the fix replaces socket pointer address with
sockfd.
Reported-by: Secunia Research <[email protected]>
Cc: stable <[email protected]>
Signed-off-by: Shuah Khan <[email protected]>
Signed-off-by: Greg Kroah-Hartman <[email protected]> |
setBaseURL(instanceData *pData, es_str_t **url)
{
char portBuf[64];
int r;
DEFiRet;
*url = es_newStr(128);
snprintf(portBuf, sizeof(portBuf), "%d", pData->port);
r = es_addBuf(url, "http://", sizeof("http://")-1);
if(r == 0) r = es_addBuf(url, (char*)pData->server, strlen((char*)pData->server));
if(r == 0) r = es_addChar(url, ':');
if(r == 0) r = es_addBuf(url, portBuf, strlen(portBuf));
if(r == 0) r = es_addChar(url, '/');
RETiRet;
} | 0 | [
"CWE-399"
]
| rsyslog | 80f88242982c9c6ad6ce8628fc5b94ea74051cf4 | 170,685,328,390,568,980,000,000,000,000,000,000,000 | 15 | bugfix: double-free in omelasticsearch
closes: http://bugzilla.adiscon.com/show_bug.cgi?id=461
Thanks to Marius Ionescu for providing a detailled bug report |
inline void TransposeConv(const float* input_data, const Dims<4>& input_dims,
const float* filter_data, const Dims<4>& filter_dims,
int stride_width, int stride_height, int pad_width,
int pad_height, float* output_data,
const Dims<4>& output_dims, float* im2col_data,
const Dims<4>& im2col_dims) {
tflite::ConvParams op_params;
// Padding type is ignored, but still set.
op_params.padding_type = PaddingType::kSame;
op_params.padding_values.width = pad_width;
op_params.padding_values.height = pad_height;
op_params.stride_width = stride_width;
op_params.stride_height = stride_height;
TransposeConv(op_params, DimsToShape(input_dims), input_data,
DimsToShape(filter_dims), filter_data, DimsToShape(output_dims),
output_data, DimsToShape(im2col_dims), im2col_data);
} | 0 | [
"CWE-703",
"CWE-835"
]
| tensorflow | dfa22b348b70bb89d6d6ec0ff53973bacb4f4695 | 233,320,326,984,904,800,000,000,000,000,000,000,000 | 18 | Prevent a division by 0 in average ops.
PiperOrigin-RevId: 385184660
Change-Id: I7affd4554f9b336fca29ac68f633232c094d0bd3 |
int _libssh2_transport_send(LIBSSH2_SESSION *session,
const unsigned char *data, size_t data_len,
const unsigned char *data2, size_t data2_len)
{
int blocksize =
(session->state & LIBSSH2_STATE_NEWKEYS) ?
session->local.crypt->blocksize : 8;
int padding_length;
size_t packet_length;
int total_length;
#ifdef RANDOM_PADDING
int rand_max;
int seed = data[0]; /* FIXME: make this random */
#endif
struct transportpacket *p = &session->packet;
int encrypted;
int compressed;
ssize_t ret;
int rc;
const unsigned char *orgdata = data;
size_t orgdata_len = data_len;
/*
* If the last read operation was interrupted in the middle of a key
* exchange, we must complete that key exchange before continuing to write
* further data.
*
* See the similar block in _libssh2_transport_read for more details.
*/
if(session->state & LIBSSH2_STATE_EXCHANGING_KEYS &&
!(session->state & LIBSSH2_STATE_KEX_ACTIVE)) {
/* Don't write any new packets if we're still in the middle of a key
* exchange. */
_libssh2_debug(session, LIBSSH2_TRACE_TRANS, "Redirecting into the"
" key re-exchange from _libssh2_transport_send");
rc = _libssh2_kex_exchange(session, 1, &session->startup_key_state);
if(rc)
return rc;
}
debugdump(session, "libssh2_transport_write plain", data, data_len);
if(data2)
debugdump(session, "libssh2_transport_write plain2", data2, data2_len);
/* FIRST, check if we have a pending write to complete. send_existing
only sanity-check data and data_len and not data2 and data2_len!! */
rc = send_existing(session, data, data_len, &ret);
if(rc)
return rc;
session->socket_block_directions &= ~LIBSSH2_SESSION_BLOCK_OUTBOUND;
if(ret)
/* set by send_existing if data was sent */
return rc;
encrypted = (session->state & LIBSSH2_STATE_NEWKEYS) ? 1 : 0;
compressed =
session->local.comp != NULL &&
session->local.comp->compress &&
((session->state & LIBSSH2_STATE_AUTHENTICATED) ||
session->local.comp->use_in_auth);
if(encrypted && compressed) {
/* the idea here is that these function must fail if the output gets
larger than what fits in the assigned buffer so thus they don't
check the input size as we don't know how much it compresses */
size_t dest_len = MAX_SSH_PACKET_LEN-5-256;
size_t dest2_len = dest_len;
/* compress directly to the target buffer */
rc = session->local.comp->comp(session,
&p->outbuf[5], &dest_len,
data, data_len,
&session->local.comp_abstract);
if(rc)
return rc; /* compression failure */
if(data2 && data2_len) {
/* compress directly to the target buffer right after where the
previous call put data */
dest2_len -= dest_len;
rc = session->local.comp->comp(session,
&p->outbuf[5 + dest_len], &dest2_len,
data2, data2_len,
&session->local.comp_abstract);
}
else
dest2_len = 0;
if(rc)
return rc; /* compression failure */
data_len = dest_len + dest2_len; /* use the combined length */
}
else {
if((data_len + data2_len) >= (MAX_SSH_PACKET_LEN-0x100))
/* too large packet, return error for this until we make this
function split it up and send multiple SSH packets */
return LIBSSH2_ERROR_INVAL;
/* copy the payload data */
memcpy(&p->outbuf[5], data, data_len);
if(data2 && data2_len)
memcpy(&p->outbuf[5 + data_len], data2, data2_len);
data_len += data2_len; /* use the combined length */
}
/* RFC4253 says: Note that the length of the concatenation of
'packet_length', 'padding_length', 'payload', and 'random padding'
MUST be a multiple of the cipher block size or 8, whichever is
larger. */
/* Plain math: (4 + 1 + packet_length + padding_length) % blocksize == 0 */
packet_length = data_len + 1 + 4; /* 1 is for padding_length field
4 for the packet_length field */
/* at this point we have it all except the padding */
/* first figure out our minimum padding amount to make it an even
block size */
padding_length = blocksize - (packet_length % blocksize);
/* if the padding becomes too small we add another blocksize worth
of it (taken from the original libssh2 where it didn't have any
real explanation) */
if(padding_length < 4) {
padding_length += blocksize;
}
#ifdef RANDOM_PADDING
/* FIXME: we can add padding here, but that also makes the packets
bigger etc */
/* now we can add 'blocksize' to the padding_length N number of times
(to "help thwart traffic analysis") but it must be less than 255 in
total */
rand_max = (255 - padding_length) / blocksize + 1;
padding_length += blocksize * (seed % rand_max);
#endif
packet_length += padding_length;
/* append the MAC length to the total_length size */
total_length =
packet_length + (encrypted ? session->local.mac->mac_len : 0);
/* store packet_length, which is the size of the whole packet except
the MAC and the packet_length field itself */
_libssh2_htonu32(p->outbuf, packet_length - 4);
/* store padding_length */
p->outbuf[4] = (unsigned char)padding_length;
/* fill the padding area with random junk */
_libssh2_random(p->outbuf + 5 + data_len, padding_length);
if(encrypted) {
size_t i;
/* Calculate MAC hash. Put the output at index packet_length,
since that size includes the whole packet. The MAC is
calculated on the entire unencrypted packet, including all
fields except the MAC field itself. */
session->local.mac->hash(session, p->outbuf + packet_length,
session->local.seqno, p->outbuf,
packet_length, NULL, 0,
&session->local.mac_abstract);
/* Encrypt the whole packet data, one block size at a time.
The MAC field is not encrypted. */
for(i = 0; i < packet_length; i += session->local.crypt->blocksize) {
unsigned char *ptr = &p->outbuf[i];
if(session->local.crypt->crypt(session, ptr,
session->local.crypt->blocksize,
&session->local.crypt_abstract))
return LIBSSH2_ERROR_ENCRYPT; /* encryption failure */
}
}
session->local.seqno++;
ret = LIBSSH2_SEND(session, p->outbuf, total_length,
LIBSSH2_SOCKET_SEND_FLAGS(session));
if(ret < 0)
_libssh2_debug(session, LIBSSH2_TRACE_SOCKET,
"Error sending %d bytes: %d", total_length, -ret);
else {
_libssh2_debug(session, LIBSSH2_TRACE_SOCKET, "Sent %d/%d bytes at %p",
ret, total_length, p->outbuf);
debugdump(session, "libssh2_transport_write send()", p->outbuf, ret);
}
if(ret != total_length) {
if(ret >= 0 || ret == -EAGAIN) {
/* the whole packet could not be sent, save the rest */
session->socket_block_directions |= LIBSSH2_SESSION_BLOCK_OUTBOUND;
p->odata = orgdata;
p->olen = orgdata_len;
p->osent = ret <= 0 ? 0 : ret;
p->ototal_num = total_length;
return LIBSSH2_ERROR_EAGAIN;
}
return LIBSSH2_ERROR_SOCKET_SEND;
}
/* the whole thing got sent away */
p->odata = NULL;
p->olen = 0;
return LIBSSH2_ERROR_NONE; /* all is good */
} | 0 | [
"CWE-787"
]
| libssh2 | dc109a7f518757741590bb993c0c8412928ccec2 | 10,454,402,877,076,280,000,000,000,000,000,000,000 | 213 | Security fixes (#315)
* Bounds checks
Fixes for CVEs
https://www.libssh2.org/CVE-2019-3863.html
https://www.libssh2.org/CVE-2019-3856.html
* Packet length bounds check
CVE
https://www.libssh2.org/CVE-2019-3855.html
* Response length check
CVE
https://www.libssh2.org/CVE-2019-3859.html
* Bounds check
CVE
https://www.libssh2.org/CVE-2019-3857.html
* Bounds checking
CVE
https://www.libssh2.org/CVE-2019-3859.html
and additional data validation
* Check bounds before reading into buffers
* Bounds checking
CVE
https://www.libssh2.org/CVE-2019-3859.html
* declare SIZE_MAX and UINT_MAX if needed |
do_exedit(
exarg_T *eap,
win_T *old_curwin) // curwin before doing a split or NULL
{
int n;
int need_hide;
int exmode_was = exmode_active;
if ((eap->cmdidx != CMD_pedit && ERROR_IF_POPUP_WINDOW)
|| ERROR_IF_TERM_POPUP_WINDOW)
return;
/*
* ":vi" command ends Ex mode.
*/
if (exmode_active && (eap->cmdidx == CMD_visual
|| eap->cmdidx == CMD_view))
{
exmode_active = FALSE;
ex_pressedreturn = FALSE;
if (*eap->arg == NUL)
{
// Special case: ":global/pat/visual\NLvi-commands"
if (global_busy)
{
int rd = RedrawingDisabled;
int nwr = no_wait_return;
int ms = msg_scroll;
#ifdef FEAT_GUI
int he = hold_gui_events;
#endif
if (eap->nextcmd != NULL)
{
stuffReadbuff(eap->nextcmd);
eap->nextcmd = NULL;
}
if (exmode_was != EXMODE_VIM)
settmode(TMODE_RAW);
RedrawingDisabled = 0;
no_wait_return = 0;
need_wait_return = FALSE;
msg_scroll = 0;
#ifdef FEAT_GUI
hold_gui_events = 0;
#endif
must_redraw = CLEAR;
pending_exmode_active = TRUE;
main_loop(FALSE, TRUE);
pending_exmode_active = FALSE;
RedrawingDisabled = rd;
no_wait_return = nwr;
msg_scroll = ms;
#ifdef FEAT_GUI
hold_gui_events = he;
#endif
}
return;
}
}
if ((eap->cmdidx == CMD_new
|| eap->cmdidx == CMD_tabnew
|| eap->cmdidx == CMD_tabedit
|| eap->cmdidx == CMD_vnew) && *eap->arg == NUL)
{
// ":new" or ":tabnew" without argument: edit an new empty buffer
setpcmark();
(void)do_ecmd(0, NULL, NULL, eap, ECMD_ONE,
ECMD_HIDE + (eap->forceit ? ECMD_FORCEIT : 0),
old_curwin == NULL ? curwin : NULL);
}
else if ((eap->cmdidx != CMD_split && eap->cmdidx != CMD_vsplit)
|| *eap->arg != NUL
#ifdef FEAT_BROWSE
|| (cmdmod.cmod_flags & CMOD_BROWSE)
#endif
)
{
// Can't edit another file when "curbuf_lock" is set. Only ":edit"
// can bring us here, others are stopped earlier.
if (*eap->arg != NUL && curbuf_locked())
return;
n = readonlymode;
if (eap->cmdidx == CMD_view || eap->cmdidx == CMD_sview)
readonlymode = TRUE;
else if (eap->cmdidx == CMD_enew)
readonlymode = FALSE; // 'readonly' doesn't make sense in an
// empty buffer
if (eap->cmdidx != CMD_balt && eap->cmdidx != CMD_badd)
setpcmark();
if (do_ecmd(0, (eap->cmdidx == CMD_enew ? NULL : eap->arg),
NULL, eap,
// ":edit" goes to first line if Vi compatible
(*eap->arg == NUL && eap->do_ecmd_lnum == 0
&& vim_strchr(p_cpo, CPO_GOTO1) != NULL)
? ECMD_ONE : eap->do_ecmd_lnum,
(buf_hide(curbuf) ? ECMD_HIDE : 0)
+ (eap->forceit ? ECMD_FORCEIT : 0)
// after a split we can use an existing buffer
+ (old_curwin != NULL ? ECMD_OLDBUF : 0)
+ (eap->cmdidx == CMD_badd ? ECMD_ADDBUF : 0)
+ (eap->cmdidx == CMD_balt ? ECMD_ALTBUF : 0)
, old_curwin == NULL ? curwin : NULL) == FAIL)
{
// Editing the file failed. If the window was split, close it.
if (old_curwin != NULL)
{
need_hide = (curbufIsChanged() && curbuf->b_nwindows <= 1);
if (!need_hide || buf_hide(curbuf))
{
#if defined(FEAT_EVAL)
cleanup_T cs;
// Reset the error/interrupt/exception state here so that
// aborting() returns FALSE when closing a window.
enter_cleanup(&cs);
#endif
#ifdef FEAT_GUI
need_mouse_correct = TRUE;
#endif
win_close(curwin, !need_hide && !buf_hide(curbuf));
#if defined(FEAT_EVAL)
// Restore the error/interrupt/exception state if not
// discarded by a new aborting error, interrupt, or
// uncaught exception.
leave_cleanup(&cs);
#endif
}
}
}
else if (readonlymode && curbuf->b_nwindows == 1)
{
// When editing an already visited buffer, 'readonly' won't be set
// but the previous value is kept. With ":view" and ":sview" we
// want the file to be readonly, except when another window is
// editing the same buffer.
curbuf->b_p_ro = TRUE;
}
readonlymode = n;
}
else
{
if (eap->do_ecmd_cmd != NULL)
do_cmd_argument(eap->do_ecmd_cmd);
n = curwin->w_arg_idx_invalid;
check_arg_idx(curwin);
if (n != curwin->w_arg_idx_invalid)
maketitle();
}
/*
* if ":split file" worked, set alternate file name in old window to new
* file
*/
if (old_curwin != NULL
&& *eap->arg != NUL
&& curwin != old_curwin
&& win_valid(old_curwin)
&& old_curwin->w_buffer != curbuf
&& (cmdmod.cmod_flags & CMOD_KEEPALT) == 0)
old_curwin->w_alt_fnum = curbuf->b_fnum;
ex_no_reprint = TRUE;
} | 0 | [
"CWE-416"
]
| vim | e031fe90cf2e375ce861ff5e5e281e4ad229ebb9 | 243,607,807,955,892,670,000,000,000,000,000,000,000 | 169 | patch 8.2.3741: using freed memory in open command
Problem: Using freed memory in open command.
Solution: Make a copy of the current line. |
TEST_F(QueryPlannerTest, IntersectSortFromAndHash) {
params.options = QueryPlannerParams::NO_TABLE_SCAN | QueryPlannerParams::INDEX_INTERSECTION;
addIndex(BSON("a" << 1));
addIndex(BSON("b" << 1));
runQuerySortProj(fromjson("{a: 1, b:{$gt: 1}}"), fromjson("{b:1}"), BSONObj());
// This provides the sort.
assertSolutionExists(
"{fetch: {filter: null, node: {andHash: {nodes: ["
"{ixscan: {filter: null, pattern: {a:1}}},"
"{ixscan: {filter: null, pattern: {b:1}}}]}}}}");
// Rearrange the preds, shouldn't matter.
runQuerySortProj(fromjson("{b: 1, a:{$lt: 7}}"), fromjson("{b:1}"), BSONObj());
assertSolutionExists(
"{fetch: {filter: null, node: {andHash: {nodes: ["
"{ixscan: {filter: null, pattern: {a:1}}},"
"{ixscan: {filter: null, pattern: {b:1}}}]}}}}");
} | 0 | []
| mongo | 64095239f41e9f3841d8be9088347db56d35c891 | 28,696,748,727,592,540,000,000,000,000,000,000,000 | 19 | SERVER-51083 Reject invalid UTF-8 from $regex match expressions |
bool RemoveControlInput(NodeDef* node, const string& control_input_to_remove,
NodeMap* node_map) {
for (int pos = node->input_size() - 1; pos >= 0; --pos) {
const string& input = node->input(pos);
if (input[0] != '^') break;
if (input == control_input_to_remove) {
node->mutable_input()->SwapElements(pos, node->input_size() - 1);
node->mutable_input()->RemoveLast();
node_map->RemoveOutput(NodeName(input), node->name());
return true;
}
}
return false;
} | 0 | [
"CWE-476"
]
| tensorflow | e6340f0665d53716ef3197ada88936c2a5f7a2d3 | 80,445,113,311,466,340,000,000,000,000,000,000,000 | 14 | Handle a special grappler case resulting in crash.
It might happen that a malformed input could be used to trick Grappler into trying to optimize a node with no inputs. This, in turn, would produce a null pointer dereference and a segfault.
PiperOrigin-RevId: 369242852
Change-Id: I2e5cbe7aec243d34a6d60220ac8ac9b16f136f6b |
static void emulator_set_idt(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt)
{
kvm_x86_ops->set_idt(emul_to_vcpu(ctxt), dt);
} | 0 | []
| kvm | 0769c5de24621141c953fbe1f943582d37cb4244 | 298,860,625,850,850,630,000,000,000,000,000,000,000 | 4 | KVM: x86: extend "struct x86_emulate_ops" with "get_cpuid"
In order to be able to proceed checks on CPU-specific properties
within the emulator, function "get_cpuid" is introduced.
With "get_cpuid" it is possible to virtually call the guests
"cpuid"-opcode without changing the VM's context.
[mtosatti: cleanup/beautify code]
Signed-off-by: Stephan Baerwolf <[email protected]>
Signed-off-by: Marcelo Tosatti <[email protected]> |
static int nonpaging_sync_page(struct kvm_vcpu *vcpu,
struct kvm_mmu_page *sp)
{
return -1;
} | 0 | [
"CWE-476"
]
| linux | 9f46c187e2e680ecd9de7983e4d081c3391acc76 | 69,263,542,557,187,610,000,000,000,000,000,000,000 | 5 | KVM: x86/mmu: fix NULL pointer dereference on guest INVPCID
With shadow paging enabled, the INVPCID instruction results in a call
to kvm_mmu_invpcid_gva. If INVPCID is executed with CR0.PG=0, the
invlpg callback is not set and the result is a NULL pointer dereference.
Fix it trivially by checking for mmu->invlpg before every call.
There are other possibilities:
- check for CR0.PG, because KVM (like all Intel processors after P5)
flushes guest TLB on CR0.PG changes so that INVPCID/INVLPG are a
nop with paging disabled
- check for EFER.LMA, because KVM syncs and flushes when switching
MMU contexts outside of 64-bit mode
All of these are tricky, go for the simple solution. This is CVE-2022-1789.
Reported-by: Yongkang Jia <[email protected]>
Cc: [email protected]
Signed-off-by: Paolo Bonzini <[email protected]> |
int wc_Ed448PrivateKeyToDer(ed448_key* key, byte* output, word32 inLen)
{
return wc_BuildEd448KeyDer(key, output, inLen, 0);
} | 0 | [
"CWE-125",
"CWE-345"
]
| wolfssl | f93083be72a3b3d956b52a7ec13f307a27b6e093 | 263,890,577,287,307,180,000,000,000,000,000,000,000 | 4 | OCSP: improve handling of OCSP no check extension |
TEST(BSONValidate, RandomData) {
PseudoRandom r(17);
int numValid = 0;
int numToRun = 1000;
long long jsonSize = 0;
for ( int i=0; i<numToRun; i++ ) {
int size = 1234;
char* x = new char[size];
int* xx = reinterpret_cast<int*>(x);
xx[0] = size;
for ( int i=4; i<size; i++ ) {
x[i] = r.nextInt32( 255 );
}
x[size-1] = 0;
BSONObj o( x );
ASSERT_EQUALS( size, o.objsize() );
if ( o.valid() ) {
numValid++;
jsonSize += o.jsonString().size();
}
delete[] x;
}
log() << "RandomData: didn't crash valid/total: " << numValid << "/" << numToRun << " (want few valid ones)"
<< " jsonSize: " << jsonSize << endl;
} | 1 | [
"CWE-20"
]
| mongo | 6889d1658136c753998b4a408dc8d1a3ec28e3b9 | 37,185,774,658,262,866,000,000,000,000,000,000,000 | 36 | SERVER-7769 - fast bson validate |
static __inline__ unsigned hash_dst(u32 *dst, u8 protocol, u8 tunnelid)
{
unsigned h = dst[RSVP_DST_LEN-1];
h ^= h>>16;
h ^= h>>8;
return (h ^ protocol ^ tunnelid) & 0xFF;
} | 0 | [
"CWE-200"
]
| linux-2.6 | 8a47077a0b5aa2649751c46e7a27884e6686ccbf | 330,348,935,688,063,180,000,000,000,000,000,000,000 | 7 | [NETLINK]: Missing padding fields in dumped structures
Plug holes with padding fields and initialized them to zero.
Signed-off-by: Patrick McHardy <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
qf_set_cwindow_options(void)
{
// switch off 'swapfile'
set_option_value_give_err((char_u *)"swf", 0L, NULL, OPT_LOCAL);
set_option_value_give_err((char_u *)"bt",
0L, (char_u *)"quickfix", OPT_LOCAL);
set_option_value_give_err((char_u *)"bh", 0L, (char_u *)"hide", OPT_LOCAL);
RESET_BINDING(curwin);
#ifdef FEAT_DIFF
curwin->w_p_diff = FALSE;
#endif
#ifdef FEAT_FOLDING
set_option_value_give_err((char_u *)"fdm", 0L, (char_u *)"manual",
OPT_LOCAL);
#endif
} | 0 | [
"CWE-416"
]
| vim | 4f1b083be43f351bc107541e7b0c9655a5d2c0bb | 317,946,894,252,712,400,000,000,000,000,000,000,000 | 16 | patch 9.0.0322: crash when no errors and 'quickfixtextfunc' is set
Problem: Crash when no errors and 'quickfixtextfunc' is set.
Solution: Do not handle errors if there aren't any. |
WandPrivate MagickBooleanType CLIListOperatorImages(MagickCLI *cli_wand,
const char *option,const char *arg1n,const char *arg2n)
{
const char /* percent escaped versions of the args */
*arg1,
*arg2;
Image
*new_images;
MagickStatusType
status;
ssize_t
parse;
#define _image_info (cli_wand->wand.image_info)
#define _images (cli_wand->wand.images)
#define _exception (cli_wand->wand.exception)
#define _draw_info (cli_wand->draw_info)
#define _quantize_info (cli_wand->quantize_info)
#define _process_flags (cli_wand->process_flags)
#define _option_type ((CommandOptionFlags) cli_wand->command->flags)
#define IfNormalOp (*option=='-')
#define IfPlusOp (*option!='-')
#define IsNormalOp IfNormalOp ? MagickTrue : MagickFalse
assert(cli_wand != (MagickCLI *) NULL);
assert(cli_wand->signature == MagickWandSignature);
assert(cli_wand->wand.signature == MagickWandSignature);
assert(_images != (Image *) NULL); /* _images must be present */
if (cli_wand->wand.debug != MagickFalse)
(void) CLILogEvent(cli_wand,CommandEvent,GetMagickModule(),
"- List Operator: %s \"%s\" \"%s\"", option,
arg1n == (const char *) NULL ? "null" : arg1n,
arg2n == (const char *) NULL ? "null" : arg2n);
arg1 = arg1n;
arg2 = arg2n;
/* Interpret Percent Escapes in Arguments - using first image */
if ( (((_process_flags & ProcessInterpretProperities) != 0 )
|| ((_option_type & AlwaysInterpretArgsFlag) != 0)
) && ((_option_type & NeverInterpretArgsFlag) == 0) ) {
/* Interpret Percent escapes in argument 1 */
if (arg1n != (char *) NULL) {
arg1=InterpretImageProperties(_image_info,_images,arg1n,_exception);
if (arg1 == (char *) NULL) {
CLIWandException(OptionWarning,"InterpretPropertyFailure",option);
arg1=arg1n; /* use the given argument as is */
}
}
if (arg2n != (char *) NULL) {
arg2=InterpretImageProperties(_image_info,_images,arg2n,_exception);
if (arg2 == (char *) NULL) {
CLIWandException(OptionWarning,"InterpretPropertyFailure",option);
arg2=arg2n; /* use the given argument as is */
}
}
}
#undef _process_flags
#undef _option_type
status=MagickTrue;
new_images=NewImageList();
switch (*(option+1))
{
case 'a':
{
if (LocaleCompare("append",option+1) == 0)
{
new_images=AppendImages(_images,IsNormalOp,_exception);
break;
}
if (LocaleCompare("average",option+1) == 0)
{
CLIWandWarnReplaced("-evaluate-sequence Mean");
(void) CLIListOperatorImages(cli_wand,"-evaluate-sequence","Mean",
NULL);
break;
}
CLIWandExceptionBreak(OptionError,"UnrecognizedOption",option);
}
case 'c':
{
if (LocaleCompare("channel-fx",option+1) == 0)
{
new_images=ChannelFxImage(_images,arg1,_exception);
break;
}
if (LocaleCompare("clut",option+1) == 0)
{
Image
*clut_image;
/* FUTURE - make this a compose option, and thus can be used
with layers compose or even compose last image over all other
_images.
*/
new_images=RemoveFirstImageFromList(&_images);
clut_image=RemoveLastImageFromList(&_images);
/* FUTURE - produce Exception, rather than silent fail */
if (clut_image == (Image *) NULL)
break;
(void) ClutImage(new_images,clut_image,new_images->interpolate,
_exception);
clut_image=DestroyImage(clut_image);
break;
}
if (LocaleCompare("coalesce",option+1) == 0)
{
new_images=CoalesceImages(_images,_exception);
break;
}
if (LocaleCompare("combine",option+1) == 0)
{
parse=(ssize_t) _images->colorspace;
if (_images->number_channels < GetImageListLength(_images))
parse=sRGBColorspace;
if ( IfPlusOp )
parse=ParseCommandOption(MagickColorspaceOptions,MagickFalse,arg1);
if (parse < 0)
CLIWandExceptArgBreak(OptionError,"UnrecognizedColorspace",option,
arg1);
new_images=CombineImages(_images,(ColorspaceType) parse,_exception);
break;
}
if (LocaleCompare("compare",option+1) == 0)
{
double
distortion;
Image
*image,
*reconstruct_image;
MetricType
metric;
/*
Mathematically and visually annotate the difference between an
image and its reconstruction.
*/
image=RemoveFirstImageFromList(&_images);
reconstruct_image=RemoveFirstImageFromList(&_images);
/* FUTURE - produce Exception, rather than silent fail */
if (reconstruct_image == (Image *) NULL)
{
image=DestroyImage(image);
break;
}
metric=UndefinedErrorMetric;
option=GetImageOption(_image_info,"metric");
if (option != (const char *) NULL)
metric=(MetricType) ParseCommandOption(MagickMetricOptions,
MagickFalse,option);
new_images=CompareImages(image,reconstruct_image,metric,&distortion,
_exception);
(void) distortion;
reconstruct_image=DestroyImage(reconstruct_image);
image=DestroyImage(image);
break;
}
if (LocaleCompare("complex",option+1) == 0)
{
parse=ParseCommandOption(MagickComplexOptions,MagickFalse,arg1);
if (parse < 0)
CLIWandExceptArgBreak(OptionError,"UnrecognizedEvaluateOperator",
option,arg1);
new_images=ComplexImages(_images,(ComplexOperator) parse,_exception);
break;
}
if (LocaleCompare("composite",option+1) == 0)
{
CompositeOperator
compose;
const char*
value;
MagickBooleanType
clip_to_self;
Image
*mask_image,
*source_image;
RectangleInfo
geometry;
/* Compose value from "-compose" option only */
value=GetImageOption(_image_info,"compose");
if (value == (const char *) NULL)
compose=OverCompositeOp; /* use Over not source_image->compose */
else
compose=(CompositeOperator) ParseCommandOption(MagickComposeOptions,
MagickFalse,value);
/* Get "clip-to-self" expert setting (false is normal) */
clip_to_self=GetCompositeClipToSelf(compose);
value=GetImageOption(_image_info,"compose:clip-to-self");
if (value != (const char *) NULL)
clip_to_self=IsStringTrue(value);
value=GetImageOption(_image_info,"compose:outside-overlay");
if (value != (const char *) NULL)
clip_to_self=IsStringFalse(value); /* deprecated */
new_images=RemoveFirstImageFromList(&_images);
source_image=RemoveFirstImageFromList(&_images);
if (source_image == (Image *) NULL)
break; /* FUTURE - produce Exception, rather than silent fail */
/* FUTURE - this should not be here! - should be part of -geometry */
if (source_image->geometry != (char *) NULL)
{
RectangleInfo
resize_geometry;
(void) ParseRegionGeometry(source_image,source_image->geometry,
&resize_geometry,_exception);
if ((source_image->columns != resize_geometry.width) ||
(source_image->rows != resize_geometry.height))
{
Image
*resize_image;
resize_image=ResizeImage(source_image,resize_geometry.width,
resize_geometry.height,source_image->filter,_exception);
if (resize_image != (Image *) NULL)
{
source_image=DestroyImage(source_image);
source_image=resize_image;
}
}
}
SetGeometry(source_image,&geometry);
(void) ParseAbsoluteGeometry(source_image->geometry,&geometry);
GravityAdjustGeometry(new_images->columns,new_images->rows,
new_images->gravity, &geometry);
mask_image=RemoveFirstImageFromList(&_images);
if (mask_image == (Image *) NULL)
status&=CompositeImage(new_images,source_image,compose,clip_to_self,
geometry.x,geometry.y,_exception);
else
{
if ((compose == DisplaceCompositeOp) ||
(compose == DistortCompositeOp))
{
status&=CompositeImage(source_image,mask_image,
CopyGreenCompositeOp,MagickTrue,0,0,_exception);
status&=CompositeImage(new_images,source_image,compose,
clip_to_self,geometry.x,geometry.y,_exception);
}
else
{
Image
*clone_image;
clone_image=CloneImage(new_images,0,0,MagickTrue,_exception);
if (clone_image == (Image *) NULL)
break;
status&=CompositeImage(new_images,source_image,compose,
clip_to_self,geometry.x,geometry.y,_exception);
status&=CompositeImage(new_images,mask_image,
CopyAlphaCompositeOp,MagickTrue,0,0,_exception);
status&=CompositeImage(clone_image,new_images,OverCompositeOp,
clip_to_self,0,0,_exception);
new_images=DestroyImage(new_images);
new_images=clone_image;
}
mask_image=DestroyImage(mask_image);
}
source_image=DestroyImage(source_image);
break;
}
if (LocaleCompare("copy",option+1) == 0)
{
Image
*source_image;
OffsetInfo
offset;
RectangleInfo
geometry;
/*
Copy image pixels.
*/
if (IsGeometry(arg1) == MagickFalse)
CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1);
if (IsGeometry(arg2) == MagickFalse)
CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1);
(void) ParsePageGeometry(_images,arg2,&geometry,_exception);
offset.x=geometry.x;
offset.y=geometry.y;
source_image=_images;
if (source_image->next != (Image *) NULL)
source_image=source_image->next;
(void) ParsePageGeometry(source_image,arg1,&geometry,_exception);
(void) CopyImagePixels(_images,source_image,&geometry,&offset,
_exception);
break;
}
CLIWandExceptionBreak(OptionError,"UnrecognizedOption",option);
}
case 'd':
{
if (LocaleCompare("deconstruct",option+1) == 0)
{
CLIWandWarnReplaced("-layer CompareAny");
(void) CLIListOperatorImages(cli_wand,"-layer","CompareAny",NULL);
break;
}
if (LocaleCompare("delete",option+1) == 0)
{
if (IfNormalOp)
DeleteImages(&_images,arg1,_exception);
else
DeleteImages(&_images,"-1",_exception);
break;
}
if (LocaleCompare("duplicate",option+1) == 0)
{
if (IfNormalOp)
{
const char
*p;
size_t
number_duplicates;
if (IsGeometry(arg1) == MagickFalse)
CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,
arg1);
number_duplicates=(size_t) StringToLong(arg1);
p=strchr(arg1,',');
if (p == (const char *) NULL)
new_images=DuplicateImages(_images,number_duplicates,"-1",
_exception);
else
new_images=DuplicateImages(_images,number_duplicates,p,
_exception);
}
else
new_images=DuplicateImages(_images,1,"-1",_exception);
AppendImageToList(&_images, new_images);
new_images=(Image *) NULL;
break;
}
CLIWandExceptionBreak(OptionError,"UnrecognizedOption",option);
}
case 'e':
{
if (LocaleCompare("evaluate-sequence",option+1) == 0)
{
parse=ParseCommandOption(MagickEvaluateOptions,MagickFalse,arg1);
if (parse < 0)
CLIWandExceptArgBreak(OptionError,"UnrecognizedEvaluateOperator",
option,arg1);
new_images=EvaluateImages(_images,(MagickEvaluateOperator) parse,
_exception);
break;
}
CLIWandExceptionBreak(OptionError,"UnrecognizedOption",option);
}
case 'f':
{
if (LocaleCompare("fft",option+1) == 0)
{
new_images=ForwardFourierTransformImage(_images,IsNormalOp,
_exception);
break;
}
if (LocaleCompare("flatten",option+1) == 0)
{
/* REDIRECTED to use -layers flatten instead */
(void) CLIListOperatorImages(cli_wand,"-layers",option+1,NULL);
break;
}
if (LocaleCompare("fx",option+1) == 0)
{
new_images=FxImage(_images,arg1,_exception);
break;
}
CLIWandExceptionBreak(OptionError,"UnrecognizedOption",option);
}
case 'h':
{
if (LocaleCompare("hald-clut",option+1) == 0)
{
/* FUTURE - make this a compose option (and thus layers compose )
or perhaps compose last image over all other _images.
*/
Image
*hald_image;
new_images=RemoveFirstImageFromList(&_images);
hald_image=RemoveLastImageFromList(&_images);
if (hald_image == (Image *) NULL)
break;
(void) HaldClutImage(new_images,hald_image,_exception);
hald_image=DestroyImage(hald_image);
break;
}
CLIWandExceptionBreak(OptionError,"UnrecognizedOption",option);
}
case 'i':
{
if (LocaleCompare("ift",option+1) == 0)
{
Image
*magnitude_image,
*phase_image;
magnitude_image=RemoveFirstImageFromList(&_images);
phase_image=RemoveFirstImageFromList(&_images);
/* FUTURE - produce Exception, rather than silent fail */
if (phase_image == (Image *) NULL)
break;
new_images=InverseFourierTransformImage(magnitude_image,phase_image,
IsNormalOp,_exception);
magnitude_image=DestroyImage(magnitude_image);
phase_image=DestroyImage(phase_image);
break;
}
if (LocaleCompare("insert",option+1) == 0)
{
Image
*insert_image,
*index_image;
ssize_t
index;
if (IfNormalOp && (IsGeometry(arg1) == MagickFalse))
CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1);
index=0;
insert_image=RemoveLastImageFromList(&_images);
if (IfNormalOp)
index=(ssize_t) StringToLong(arg1);
index_image=insert_image;
if (index == 0)
PrependImageToList(&_images,insert_image);
else if (index == (ssize_t) GetImageListLength(_images))
AppendImageToList(&_images,insert_image);
else
{
index_image=GetImageFromList(_images,index-1);
if (index_image == (Image *) NULL)
CLIWandExceptArgBreak(OptionError,"NoSuchImage",option,arg1);
InsertImageInList(&index_image,insert_image);
}
_images=GetFirstImageInList(index_image);
break;
}
CLIWandExceptionBreak(OptionError,"UnrecognizedOption",option);
}
case 'l':
{
if (LocaleCompare("layers",option+1) == 0)
{
parse=ParseCommandOption(MagickLayerOptions,MagickFalse,arg1);
if ( parse < 0 )
CLIWandExceptArgBreak(OptionError,"UnrecognizedLayerMethod",
option,arg1);
switch ((LayerMethod) parse)
{
case CoalesceLayer:
{
new_images=CoalesceImages(_images,_exception);
break;
}
case CompareAnyLayer:
case CompareClearLayer:
case CompareOverlayLayer:
default:
{
new_images=CompareImagesLayers(_images,(LayerMethod) parse,
_exception);
break;
}
case MergeLayer:
case FlattenLayer:
case MosaicLayer:
case TrimBoundsLayer:
{
new_images=MergeImageLayers(_images,(LayerMethod) parse,
_exception);
break;
}
case DisposeLayer:
{
new_images=DisposeImages(_images,_exception);
break;
}
case OptimizeImageLayer:
{
new_images=OptimizeImageLayers(_images,_exception);
break;
}
case OptimizePlusLayer:
{
new_images=OptimizePlusImageLayers(_images,_exception);
break;
}
case OptimizeTransLayer:
{
OptimizeImageTransparency(_images,_exception);
break;
}
case RemoveDupsLayer:
{
RemoveDuplicateLayers(&_images,_exception);
break;
}
case RemoveZeroLayer:
{
RemoveZeroDelayLayers(&_images,_exception);
break;
}
case OptimizeLayer:
{ /* General Purpose, GIF Animation Optimizer. */
new_images=CoalesceImages(_images,_exception);
if (new_images == (Image *) NULL)
break;
_images=DestroyImageList(_images);
_images=OptimizeImageLayers(new_images,_exception);
if (_images == (Image *) NULL)
break;
new_images=DestroyImageList(new_images);
OptimizeImageTransparency(_images,_exception);
(void) RemapImages(_quantize_info,_images,(Image *) NULL,
_exception);
break;
}
case CompositeLayer:
{
Image
*source;
RectangleInfo
geometry;
CompositeOperator
compose;
const char*
value;
value=GetImageOption(_image_info,"compose");
compose=OverCompositeOp; /* Default to Over */
if (value != (const char *) NULL)
compose=(CompositeOperator) ParseCommandOption(
MagickComposeOptions,MagickFalse,value);
/* Split image sequence at the first 'NULL:' image. */
source=_images;
while (source != (Image *) NULL)
{
source=GetNextImageInList(source);
if ((source != (Image *) NULL) &&
(LocaleCompare(source->magick,"NULL") == 0))
break;
}
if (source != (Image *) NULL)
{
if ((GetPreviousImageInList(source) == (Image *) NULL) ||
(GetNextImageInList(source) == (Image *) NULL))
source=(Image *) NULL;
else
{ /* Separate the two lists, junk the null: image. */
source=SplitImageList(source->previous);
DeleteImageFromList(&source);
}
}
if (source == (Image *) NULL)
{
(void) ThrowMagickException(_exception,GetMagickModule(),
OptionError,"MissingNullSeparator","layers Composite");
break;
}
/* Adjust offset with gravity and virtual canvas. */
SetGeometry(_images,&geometry);
(void) ParseAbsoluteGeometry(_images->geometry,&geometry);
geometry.width=source->page.width != 0 ?
source->page.width : source->columns;
geometry.height=source->page.height != 0 ?
source->page.height : source->rows;
GravityAdjustGeometry(_images->page.width != 0 ?
_images->page.width : _images->columns,
_images->page.height != 0 ? _images->page.height :
_images->rows,_images->gravity,&geometry);
/* Compose the two image sequences together */
CompositeLayers(_images,compose,source,geometry.x,geometry.y,
_exception);
source=DestroyImageList(source);
break;
}
}
break;
}
CLIWandExceptionBreak(OptionError,"UnrecognizedOption",option);
}
case 'm':
{
if (LocaleCompare("map",option+1) == 0)
{
CLIWandWarnReplaced("+remap");
(void) RemapImages(_quantize_info,_images,(Image *) NULL,_exception);
break;
}
if (LocaleCompare("metric",option+1) == 0)
{
(void) SetImageOption(_image_info,option+1,arg1);
break;
}
if (LocaleCompare("morph",option+1) == 0)
{
Image
*morph_image;
if (IsGeometry(arg1) == MagickFalse)
CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1);
morph_image=MorphImages(_images,StringToUnsignedLong(arg1),
_exception);
if (morph_image == (Image *) NULL)
break;
_images=DestroyImageList(_images);
_images=morph_image;
break;
}
if (LocaleCompare("mosaic",option+1) == 0)
{
/* REDIRECTED to use -layers mosaic instead */
(void) CLIListOperatorImages(cli_wand,"-layers",option+1,NULL);
break;
}
CLIWandExceptionBreak(OptionError,"UnrecognizedOption",option);
}
case 'p':
{
if (LocaleCompare("poly",option+1) == 0)
{
double
*args;
ssize_t
count;
/* convert argument string into an array of doubles */
args = StringToArrayOfDoubles(arg1,&count,_exception);
if (args == (double *) NULL )
CLIWandExceptArgBreak(OptionError,"InvalidNumberList",option,arg1);
new_images=PolynomialImage(_images,(size_t) (count >> 1),args,
_exception);
args=(double *) RelinquishMagickMemory(args);
break;
}
if (LocaleCompare("process",option+1) == 0)
{
/* FUTURE: better parsing using ScriptToken() from string ??? */
char
**arguments;
int
j,
number_arguments;
arguments=StringToArgv(arg1,&number_arguments);
if (arguments == (char **) NULL)
break;
if (strchr(arguments[1],'=') != (char *) NULL)
{
char
breaker,
quote,
*token;
const char
*arguments;
int
next,
status;
size_t
length;
TokenInfo
*token_info;
/*
Support old style syntax, filter="-option arg1".
*/
assert(arg1 != (const char *) NULL);
length=strlen(arg1);
token=(char *) NULL;
if (~length >= (MagickPathExtent-1))
token=(char *) AcquireQuantumMemory(length+MagickPathExtent,
sizeof(*token));
if (token == (char *) NULL)
break;
next=0;
arguments=arg1;
token_info=AcquireTokenInfo();
status=Tokenizer(token_info,0,token,length,arguments,"","=",
"\"",'\0',&breaker,&next,"e);
token_info=DestroyTokenInfo(token_info);
if (status == 0)
{
const char
*argv;
argv=(&(arguments[next]));
(void) InvokeDynamicImageFilter(token,&_images,1,&argv,
_exception);
}
token=DestroyString(token);
break;
}
(void) SubstituteString(&arguments[1],"-","");
(void) InvokeDynamicImageFilter(arguments[1],&_images,
number_arguments-2,(const char **) arguments+2,_exception);
for (j=0; j < number_arguments; j++)
arguments[j]=DestroyString(arguments[j]);
arguments=(char **) RelinquishMagickMemory(arguments);
break;
}
CLIWandExceptionBreak(OptionError,"UnrecognizedOption",option);
}
case 'r':
{
if (LocaleCompare("remap",option+1) == 0)
{
(void) RemapImages(_quantize_info,_images,(Image *) NULL,_exception);
break;
}
if (LocaleCompare("reverse",option+1) == 0)
{
ReverseImageList(&_images);
break;
}
CLIWandExceptionBreak(OptionError,"UnrecognizedOption",option);
}
case 's':
{
if (LocaleCompare("smush",option+1) == 0)
{
/* FUTURE: this option needs more work to make better */
ssize_t
offset;
if (IsGeometry(arg1) == MagickFalse)
CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1);
offset=(ssize_t) StringToLong(arg1);
new_images=SmushImages(_images,IsNormalOp,offset,_exception);
break;
}
if (LocaleCompare("subimage",option+1) == 0)
{
Image
*base_image,
*compare_image;
const char
*value;
MetricType
metric;
double
similarity;
RectangleInfo
offset;
base_image=GetImageFromList(_images,0);
compare_image=GetImageFromList(_images,1);
/* Comparision Metric */
metric=UndefinedErrorMetric;
value=GetImageOption(_image_info,"metric");
if (value != (const char *) NULL)
metric=(MetricType) ParseCommandOption(MagickMetricOptions,
MagickFalse,value);
new_images=SimilarityImage(base_image,compare_image,metric,0.0,
&offset,&similarity,_exception);
if (new_images != (Image *) NULL)
{
char
result[MagickPathExtent];
(void) FormatLocaleString(result,MagickPathExtent,"%lf",
similarity);
(void) SetImageProperty(new_images,"subimage:similarity",result,
_exception);
(void) FormatLocaleString(result,MagickPathExtent,"%+ld",(long)
offset.x);
(void) SetImageProperty(new_images,"subimage:x",result,
_exception);
(void) FormatLocaleString(result,MagickPathExtent,"%+ld",(long)
offset.y);
(void) SetImageProperty(new_images,"subimage:y",result,
_exception);
(void) FormatLocaleString(result,MagickPathExtent,
"%lux%lu%+ld%+ld",(unsigned long) offset.width,(unsigned long)
offset.height,(long) offset.x,(long) offset.y);
(void) SetImageProperty(new_images,"subimage:offset",result,
_exception);
}
break;
}
if (LocaleCompare("swap",option+1) == 0)
{
Image
*p,
*q,
*swap;
ssize_t
index,
swap_index;
index=(-1);
swap_index=(-2);
if (IfNormalOp) {
GeometryInfo
geometry_info;
MagickStatusType
flags;
swap_index=(-1);
flags=ParseGeometry(arg1,&geometry_info);
if ((flags & RhoValue) == 0)
CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1);
index=(ssize_t) geometry_info.rho;
if ((flags & SigmaValue) != 0)
swap_index=(ssize_t) geometry_info.sigma;
}
p=GetImageFromList(_images,index);
q=GetImageFromList(_images,swap_index);
if ((p == (Image *) NULL) || (q == (Image *) NULL)) {
if (IfNormalOp)
CLIWandExceptArgBreak(OptionError,"InvalidImageIndex",option,arg1)
else
CLIWandExceptionBreak(OptionError,"TwoOrMoreImagesRequired",option);
}
if (p == q)
CLIWandExceptArgBreak(OptionError,"InvalidImageIndex",option,arg1);
swap=CloneImage(p,0,0,MagickTrue,_exception);
if (swap == (Image *) NULL)
CLIWandExceptArgBreak(ResourceLimitError,"MemoryAllocationFailed",
option,GetExceptionMessage(errno));
ReplaceImageInList(&p,CloneImage(q,0,0,MagickTrue,_exception));
ReplaceImageInList(&q,swap);
_images=GetFirstImageInList(q);
break;
}
CLIWandExceptionBreak(OptionError,"UnrecognizedOption",option);
}
default:
CLIWandExceptionBreak(OptionError,"UnrecognizedOption",option);
}
/* clean up percent escape interpreted strings */
if (arg1 != arg1n )
arg1=DestroyString((char *)arg1);
if (arg2 != arg2n )
arg2=DestroyString((char *)arg2);
/* if new image list generated, replace existing image list */
if (new_images == (Image *) NULL)
return(status == 0 ? MagickFalse : MagickTrue);
_images=DestroyImageList(_images);
_images=GetFirstImageInList(new_images);
return(status == 0 ? MagickFalse : MagickTrue);
#undef _image_info
#undef _images
#undef _exception
#undef _draw_info
#undef _quantize_info
#undef IfNormalOp
#undef IfPlusOp
#undef IsNormalOp
} | 0 | [
"CWE-399",
"CWE-401"
]
| ImageMagick | ce08a3691a8ac29125e29fc41967b3737fa3f425 | 23,845,984,895,030,457,000,000,000,000,000,000,000 | 893 | https://github.com/ImageMagick/ImageMagick/issues/1604 |
int in_gate_area_no_task(unsigned long addr)
{
#ifdef AT_SYSINFO_EHDR
if ((addr >= FIXADDR_USER_START) && (addr < FIXADDR_USER_END))
return 1;
#endif
return 0;
} | 0 | [
"CWE-20"
]
| linux-2.6 | 89f5b7da2a6bad2e84670422ab8192382a5aeb9f | 201,638,251,468,372,180,000,000,000,000,000,000,000 | 8 | Reinstate ZERO_PAGE optimization in 'get_user_pages()' and fix XIP
KAMEZAWA Hiroyuki and Oleg Nesterov point out that since the commit
557ed1fa2620dc119adb86b34c614e152a629a80 ("remove ZERO_PAGE") removed
the ZERO_PAGE from the VM mappings, any users of get_user_pages() will
generally now populate the VM with real empty pages needlessly.
We used to get the ZERO_PAGE when we did the "handle_mm_fault()", but
since fault handling no longer uses ZERO_PAGE for new anonymous pages,
we now need to handle that special case in follow_page() instead.
In particular, the removal of ZERO_PAGE effectively removed the core
file writing optimization where we would skip writing pages that had not
been populated at all, and increased memory pressure a lot by allocating
all those useless newly zeroed pages.
This reinstates the optimization by making the unmapped PTE case the
same as for a non-existent page table, which already did this correctly.
While at it, this also fixes the XIP case for follow_page(), where the
caller could not differentiate between the case of a page that simply
could not be used (because it had no "struct page" associated with it)
and a page that just wasn't mapped.
We do that by simply returning an error pointer for pages that could not
be turned into a "struct page *". The error is arbitrarily picked to be
EFAULT, since that was what get_user_pages() already used for the
equivalent IO-mapped page case.
[ Also removed an impossible test for pte_offset_map_lock() failing:
that's not how that function works ]
Acked-by: Oleg Nesterov <[email protected]>
Acked-by: Nick Piggin <[email protected]>
Cc: KAMEZAWA Hiroyuki <[email protected]>
Cc: Hugh Dickins <[email protected]>
Cc: Andrew Morton <[email protected]>
Cc: Ingo Molnar <[email protected]>
Cc: Roland McGrath <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]> |
static void fn_send_intr(struct vc_data *vc)
{
tty_insert_flip_char(&vc->port, 0, TTY_BREAK);
tty_schedule_flip(&vc->port);
} | 0 | [
"CWE-416"
]
| linux | 6ca03f90527e499dd5e32d6522909e2ad390896b | 307,497,098,203,710,260,000,000,000,000,000,000,000 | 5 | vt: keyboard, simplify vt_kdgkbsent
Use 'strlen' of the string, add one for NUL terminator and simply do
'copy_to_user' instead of the explicit 'for' loop. This makes the
KDGKBSENT case more compact.
The only thing we need to take care about is NULL 'func_table[i]'. Use
an empty string in that case.
The original check for overflow could never trigger as the func_buf
strings are always shorter or equal to 'struct kbsentry's.
Cc: <[email protected]>
Signed-off-by: Jiri Slaby <[email protected]>
Link: https://lore.kernel.org/r/[email protected]
Signed-off-by: Greg Kroah-Hartman <[email protected]> |
nvmet_fc_rcv_fcp_req(struct nvmet_fc_target_port *target_port,
struct nvmefc_tgt_fcp_req *fcpreq,
void *cmdiubuf, u32 cmdiubuf_len)
{
struct nvmet_fc_tgtport *tgtport = targetport_to_tgtport(target_port);
struct nvme_fc_cmd_iu *cmdiu = cmdiubuf;
struct nvmet_fc_tgt_queue *queue;
struct nvmet_fc_fcp_iod *fod;
struct nvmet_fc_defer_fcp_req *deferfcp;
unsigned long flags;
/* validate iu, so the connection id can be used to find the queue */
if ((cmdiubuf_len != sizeof(*cmdiu)) ||
(cmdiu->scsi_id != NVME_CMD_SCSI_ID) ||
(cmdiu->fc_id != NVME_CMD_FC_ID) ||
(be16_to_cpu(cmdiu->iu_len) != (sizeof(*cmdiu)/4)))
return -EIO;
queue = nvmet_fc_find_target_queue(tgtport,
be64_to_cpu(cmdiu->connection_id));
if (!queue)
return -ENOTCONN;
/*
* note: reference taken by find_target_queue
* After successful fod allocation, the fod will inherit the
* ownership of that reference and will remove the reference
* when the fod is freed.
*/
spin_lock_irqsave(&queue->qlock, flags);
fod = nvmet_fc_alloc_fcp_iod(queue);
if (fod) {
spin_unlock_irqrestore(&queue->qlock, flags);
fcpreq->nvmet_fc_private = fod;
fod->fcpreq = fcpreq;
memcpy(&fod->cmdiubuf, cmdiubuf, cmdiubuf_len);
nvmet_fc_queue_fcp_req(tgtport, queue, fcpreq);
return 0;
}
if (!tgtport->ops->defer_rcv) {
spin_unlock_irqrestore(&queue->qlock, flags);
/* release the queue lookup reference */
nvmet_fc_tgt_q_put(queue);
return -ENOENT;
}
deferfcp = list_first_entry_or_null(&queue->avail_defer_list,
struct nvmet_fc_defer_fcp_req, req_list);
if (deferfcp) {
/* Just re-use one that was previously allocated */
list_del(&deferfcp->req_list);
} else {
spin_unlock_irqrestore(&queue->qlock, flags);
/* Now we need to dynamically allocate one */
deferfcp = kmalloc(sizeof(*deferfcp), GFP_KERNEL);
if (!deferfcp) {
/* release the queue lookup reference */
nvmet_fc_tgt_q_put(queue);
return -ENOMEM;
}
spin_lock_irqsave(&queue->qlock, flags);
}
/* For now, use rspaddr / rsplen to save payload information */
fcpreq->rspaddr = cmdiubuf;
fcpreq->rsplen = cmdiubuf_len;
deferfcp->fcp_req = fcpreq;
/* defer processing till a fod becomes available */
list_add_tail(&deferfcp->req_list, &queue->pending_cmd_list);
/* NOTE: the queue lookup reference is still valid */
spin_unlock_irqrestore(&queue->qlock, flags);
return -EOVERFLOW;
} | 0 | [
"CWE-119",
"CWE-787"
]
| linux | 0c319d3a144d4b8f1ea2047fd614d2149b68f889 | 96,957,007,560,402,850,000,000,000,000,000,000,000 | 85 | nvmet-fc: ensure target queue id within range.
When searching for queue id's ensure they are within the expected range.
Signed-off-by: James Smart <[email protected]>
Signed-off-by: Christoph Hellwig <[email protected]>
Signed-off-by: Jens Axboe <[email protected]> |
virtual bool is_outer_field() const { DBUG_ASSERT(fixed); return FALSE; } | 0 | []
| mysql-server | f7316aa0c9a3909fc7498e7b95d5d3af044a7e21 | 278,328,552,176,488,200,000,000,000,000,000,000,000 | 1 | Bug#26361149 MYSQL SERVER CRASHES AT: COL IN(IFNULL(CONST,
COL), NAME_CONST('NAME', NULL))
Backport of Bug#19143243 fix.
NAME_CONST item can return NULL_ITEM type in case of incorrect arguments.
NULL_ITEM has special processing in Item_func_in function.
In Item_func_in::fix_length_and_dec an array of possible comparators is
created. Since NAME_CONST function has NULL_ITEM type, corresponding
array element is empty. Then NAME_CONST is wrapped to ITEM_CACHE.
ITEM_CACHE can not return proper type(NULL_ITEM) in Item_func_in::val_int(),
so the NULL_ITEM is attempted compared with an empty comparator.
The fix is to disable the caching of Item_name_const item. |
static void save_same_name_hfinfo(gpointer data)
{
same_name_hfinfo = (header_field_info*)data;
} | 0 | [
"CWE-401"
]
| wireshark | a9fc769d7bb4b491efb61c699d57c9f35269d871 | 324,369,783,332,760,530,000,000,000,000,000,000,000 | 4 | epan: Fix a memory leak.
Make sure _proto_tree_add_bits_ret_val allocates a bits array using the
packet scope, otherwise we leak memory. Fixes #17032. |
int main(int argc, char **argv)
{
jas_image_t *image;
cmdopts_t *cmdopts;
jas_stream_t *in;
jas_stream_t *out;
jas_tmr_t dectmr;
jas_tmr_t enctmr;
double dectime;
double enctime;
int_fast16_t numcmpts;
int i;
/* Determine the base name of this command. */
if ((cmdname = strrchr(argv[0], '/'))) {
++cmdname;
} else {
cmdname = argv[0];
}
if (jas_init()) {
abort();
}
/* Parse the command line options. */
if (!(cmdopts = cmdopts_parse(argc, argv))) {
fprintf(stderr, "error: cannot parse command line\n");
exit(EXIT_FAILURE);
}
if (cmdopts->version) {
printf("%s\n", JAS_VERSION);
fprintf(stderr, "libjasper %s\n", jas_getversion());
exit(EXIT_SUCCESS);
}
jas_setdbglevel(cmdopts->debug);
if (cmdopts->verbose) {
cmdinfo();
}
/* Open the input image file. */
if (cmdopts->infile) {
/* The input image is to be read from a file. */
if (!(in = jas_stream_fopen(cmdopts->infile, "rb"))) {
fprintf(stderr, "error: cannot open input image file %s\n",
cmdopts->infile);
exit(EXIT_FAILURE);
}
} else {
/* The input image is to be read from standard input. */
if (!(in = jas_stream_fdopen(0, "rb"))) {
fprintf(stderr, "error: cannot open standard input\n");
exit(EXIT_FAILURE);
}
}
/* Open the output image file. */
if (cmdopts->outfile) {
/* The output image is to be written to a file. */
if (!(out = jas_stream_fopen(cmdopts->outfile, "w+b"))) {
fprintf(stderr, "error: cannot open output image file %s\n",
cmdopts->outfile);
exit(EXIT_FAILURE);
}
} else {
/* The output image is to be written to standard output. */
if (!(out = jas_stream_fdopen(1, "w+b"))) {
fprintf(stderr, "error: cannot open standard output\n");
exit(EXIT_FAILURE);
}
}
if (cmdopts->infmt < 0) {
if ((cmdopts->infmt = jas_image_getfmt(in)) < 0) {
fprintf(stderr, "error: input image has unknown format\n");
exit(EXIT_FAILURE);
}
}
/* Get the input image data. */
jas_tmr_start(&dectmr);
if (!(image = jas_image_decode(in, cmdopts->infmt, cmdopts->inopts))) {
fprintf(stderr, "error: cannot load image data\n");
exit(EXIT_FAILURE);
}
jas_tmr_stop(&dectmr);
dectime = jas_tmr_get(&dectmr);
/* If requested, throw away all of the components except one.
Why might this be desirable? It is a hack, really.
None of the image formats other than the JPEG-2000 ones support
images with two, four, five, or more components. This hack
allows such images to be decoded with the non-JPEG-2000 decoders,
one component at a time. */
numcmpts = jas_image_numcmpts(image);
if (cmdopts->cmptno >= 0 && cmdopts->cmptno < numcmpts) {
for (i = numcmpts - 1; i >= 0; --i) {
if (i != cmdopts->cmptno) {
jas_image_delcmpt(image, i);
}
}
}
if (cmdopts->srgb) {
jas_image_t *newimage;
jas_cmprof_t *outprof;
jas_eprintf("forcing conversion to sRGB\n");
if (!(outprof = jas_cmprof_createfromclrspc(JAS_CLRSPC_SRGB))) {
jas_eprintf("cannot create sRGB profile\n");
exit(EXIT_FAILURE);
}
if (!(newimage = jas_image_chclrspc(image, outprof, JAS_CMXFORM_INTENT_PER))) {
jas_eprintf("cannot convert to sRGB\n");
exit(EXIT_FAILURE);
}
jas_image_destroy(image);
jas_cmprof_destroy(outprof);
image = newimage;
}
/* Generate the output image data. */
jas_tmr_start(&enctmr);
if (jas_image_encode(image, out, cmdopts->outfmt, cmdopts->outopts)) {
fprintf(stderr, "error: cannot encode image\n");
exit(EXIT_FAILURE);
}
jas_stream_flush(out);
jas_tmr_stop(&enctmr);
enctime = jas_tmr_get(&enctmr);
if (cmdopts->verbose) {
fprintf(stderr, "decoding time = %f\n", dectime);
fprintf(stderr, "encoding time = %f\n", enctime);
}
/* If this fails, we don't care. */
(void) jas_stream_close(in);
/* Close the output image stream. */
if (jas_stream_close(out)) {
fprintf(stderr, "error: cannot close output image file\n");
exit(EXIT_FAILURE);
}
cmdopts_destroy(cmdopts);
jas_image_destroy(image);
jas_image_clearfmts();
/* Success at last! :-) */
return EXIT_SUCCESS;
} | 1 | [
"CWE-119"
]
| jasper | 65536647d380571d1a9a6c91fa03775fb5bbd256 | 332,148,609,621,238,360,000,000,000,000,000,000,000 | 153 | A new experimental memory allocator has been introduced. The allocator
is experimental in the sense that its API is not considered stable and
the allocator may change or disappear entirely in future versions of
the code. This new allocator tracks how much memory is being used by
jas_malloc and friends. A maximum upper bound on the memory usage can be
set via the experimental API provided and a default value can be set at
build time as well. Such functionality may be useful in run-time
environments where the user wants to be able to limit the amount of
memory used by JasPer. This allocator is not used by default.
Note: This feature needs C11 functionality.
Note: The memory allocator is not thread safe in its current form.
A new --memory-limit CLI option has been added to the jasper, imginfo,
imgcmp, and jiv programs. The option is only available when the code is
built with the new memory allocator.
The support for my old debug memory allocator from the 1990s has been
purged from the code. The debug memory allocator is probably not
a very useful thing with the advent of GCC/Clang code sanitizers.
The safe size_t integer functions no longer set their result upon failure.
A safe subtract operation was also added. |
int pn_ssl_get_peer_hostname(pn_ssl_t *ssl0, char *hostname, size_t *bufsize)
{
pni_ssl_t *ssl = get_ssl_internal(ssl0);
if (!ssl) return -1;
if (!ssl->peer_hostname) {
*bufsize = 0;
if (hostname) *hostname = '\0';
return 0;
}
unsigned len = strlen(ssl->peer_hostname);
if (hostname) {
if (len >= *bufsize) return -1;
strcpy( hostname, ssl->peer_hostname );
}
*bufsize = len;
return 0;
} | 0 | []
| qpid-proton | 4aea0fd8502f5e9af7f22fd60645eeec07bce0b2 | 97,765,512,563,786,920,000,000,000,000,000,000,000 | 17 | PROTON-2014: [c] Ensure SSL mutual authentication
(cherry picked from commit 97c7733f07712665f3d08091c82c393e4c3adbf7) |
mrb_exec_irep(mrb_state *mrb, mrb_value self, struct RProc *p)
{
mrb_callinfo *ci = mrb->c->ci;
if (ci->cci == CINFO_NONE) {
return exec_irep(mrb, self, p);
}
else {
mrb_value ret;
if (MRB_PROC_CFUNC_P(p)) {
if (MRB_PROC_NOARG_P(p)) {
check_method_noarg(mrb, ci);
}
cipush(mrb, 0, CINFO_DIRECT, mrb_vm_ci_target_class(ci), p, ci->mid, ci->n|(ci->nk<<4));
ret = MRB_PROC_CFUNC(p)(mrb, self);
cipop(mrb);
}
else {
mrb_int keep = mrb_ci_bidx(ci) + 1; /* receiver + block */
ret = mrb_top_run(mrb, p, self, keep);
}
if (mrb->exc && mrb->jmp) {
mrb_exc_raise(mrb, mrb_obj_value(mrb->exc));
}
return ret;
}
} | 0 | [
"CWE-703",
"CWE-125"
]
| mruby | a4d97934d51cb88954cc49161dc1d151f64afb6b | 150,356,856,346,039,220,000,000,000,000,000,000,000 | 26 | vm.c: check if target_class is NULL (when prepended). |
int wc_InitRsaKey(RsaKey* key, void* heap)
{
return wc_InitRsaKey_ex(key, heap, INVALID_DEVID);
} | 0 | [
"CWE-310",
"CWE-787"
]
| wolfssl | fb2288c46dd4c864b78f00a47a364b96a09a5c0f | 46,321,356,894,619,820,000,000,000,000,000,000,000 | 4 | RSA-PSS: Handle edge case with encoding message to hash
When the key is small relative to the digest (1024-bit key, 64-byte
hash, 61-byte salt length), the internal message to hash is larger than
the output size.
Allocate a buffer for the message when this happens. |
GF_Err gf_isom_box_parse_ex(GF_Box **outBox, GF_BitStream *bs, u32 parent_type, Bool is_root_box, u64 parent_size)
{
u32 type, uuid_type, hdr_size, restore_type;
u64 size, start, comp_start, end;
char uuid[16];
GF_Err e;
GF_BitStream *uncomp_bs = NULL;
u8 *uncomp_data = NULL;
u32 compressed_size=0;
GF_Box *newBox;
Bool skip_logs = (gf_bs_get_cookie(bs) & GF_ISOM_BS_COOKIE_NO_LOGS ) ? GF_TRUE : GF_FALSE;
Bool is_special = GF_TRUE;
if ((bs == NULL) || (outBox == NULL) ) return GF_BAD_PARAM;
*outBox = NULL;
if (gf_bs_available(bs) < 8) {
return GF_ISOM_INCOMPLETE_FILE;
}
comp_start = start = gf_bs_get_position(bs);
uuid_type = 0;
size = (u64) gf_bs_read_u32(bs);
hdr_size = 4;
/*fix for some boxes found in some old hinted files*/
if ((size >= 2) && (size <= 4)) {
size = 4;
type = GF_ISOM_BOX_TYPE_VOID;
} else {
type = gf_bs_read_u32(bs);
hdr_size += 4;
/*no size means till end of file - EXCEPT FOR some old QuickTime boxes...*/
if (type == GF_ISOM_BOX_TYPE_TOTL)
size = 12;
if (!size) {
if (is_root_box) {
if (!skip_logs) {
GF_LOG(GF_LOG_DEBUG, GF_LOG_CONTAINER, ("[iso file] Warning Read Box type %s (0x%08X) size 0 reading till the end of file\n", gf_4cc_to_str(type), type));
}
size = gf_bs_available(bs) + 8;
} else {
if (!skip_logs) {
GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[iso file] Read Box type %s (0x%08X) at position "LLU" has size 0 but is not at root/file level. Forbidden, skipping end of parent box !\n", gf_4cc_to_str(type), type, start));
return GF_SKIP_BOX;
}
return GF_OK;
}
}
if (is_root_box && (size>=8)) {
Bool do_uncompress = GF_FALSE;
u8 *compb = NULL;
u32 osize = 0;
u32 otype = type;
if (type==GF_4CC('!', 'm', 'o', 'f')) {
do_uncompress = GF_TRUE;
type = GF_ISOM_BOX_TYPE_MOOF;
}
else if (type==GF_4CC('!', 'm', 'o', 'v')) {
do_uncompress = GF_TRUE;
type = GF_ISOM_BOX_TYPE_MOOV;
}
else if (type==GF_4CC('!', 's', 'i', 'x')) {
do_uncompress = GF_TRUE;
type = GF_ISOM_BOX_TYPE_SIDX;
}
else if (type==GF_4CC('!', 's', 's', 'x')) {
do_uncompress = GF_TRUE;
type = GF_ISOM_BOX_TYPE_SSIX;
}
if (do_uncompress) {
compb = gf_malloc((u32) (size-8));
compressed_size = (u32) (size - 8);
gf_bs_read_data(bs, compb, compressed_size);
e = gf_gz_decompress_payload(compb, compressed_size, &uncomp_data, &osize);
if (e) {
GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[iso file] Failed to uncompress payload for box type %s (0x%08X)\n", gf_4cc_to_str(otype), otype));
return e;
}
//keep size as complete box size for tests below
size = osize + 8;
uncomp_bs = gf_bs_new(uncomp_data, osize, GF_BITSTREAM_READ);
bs = uncomp_bs;
start = 0;
gf_free(compb);
}
}
}
/*handle uuid*/
memset(uuid, 0, 16);
if (type == GF_ISOM_BOX_TYPE_UUID ) {
if (gf_bs_available(bs) < 16) {
return GF_ISOM_INCOMPLETE_FILE;
}
gf_bs_read_data(bs, uuid, 16);
hdr_size += 16;
uuid_type = gf_isom_solve_uuid_box(uuid);
}
//handle large box
if (size == 1) {
if (gf_bs_available(bs) < 8) {
return GF_ISOM_INCOMPLETE_FILE;
}
size = gf_bs_read_u64(bs);
hdr_size += 8;
}
if (!skip_logs)
GF_LOG(GF_LOG_DEBUG, GF_LOG_CONTAINER, ("[iso file] Read Box type %s size "LLD" start "LLD"\n", gf_4cc_to_str(type), size, start));
if ( size < hdr_size ) {
GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[iso file] Box %s size "LLD" less than box header size %d\n", gf_4cc_to_str(type), size, hdr_size));
return GF_ISOM_INVALID_FILE;
}
//if parent size is given, make sure box fits within parent
if (parent_size && (parent_size<size)) {
GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[iso file] Box %s size "LLU" is larger than remaining parent size "LLU"\n", gf_4cc_to_str(type), size, parent_size ));
return GF_ISOM_INVALID_FILE;
}
restore_type = 0;
if ((parent_type==GF_ISOM_BOX_TYPE_STSD) && (type==GF_QT_SUBTYPE_RAW) ) {
u64 cookie = gf_bs_get_cookie(bs);
restore_type = type;
if (cookie & GF_ISOM_BS_COOKIE_VISUAL_TRACK)
type = GF_QT_SUBTYPE_RAW_VID;
else
type = GF_QT_SUBTYPE_RAW_AUD;
}
//some special boxes (references and track groups) are handled by a single generic box with an associated ref/group type
if (parent_type && (parent_type == GF_ISOM_BOX_TYPE_TREF)) {
newBox = gf_isom_box_new(GF_ISOM_BOX_TYPE_REFT);
if (!newBox) return GF_OUT_OF_MEM;
((GF_TrackReferenceTypeBox*)newBox)->reference_type = type;
} else if (parent_type && (parent_type == GF_ISOM_BOX_TYPE_IREF)) {
newBox = gf_isom_box_new(GF_ISOM_BOX_TYPE_REFI);
if (!newBox) return GF_OUT_OF_MEM;
((GF_ItemReferenceTypeBox*)newBox)->reference_type = type;
} else if (parent_type && (parent_type == GF_ISOM_BOX_TYPE_TRGR)) {
newBox = gf_isom_box_new(GF_ISOM_BOX_TYPE_TRGT);
if (!newBox) return GF_OUT_OF_MEM;
((GF_TrackGroupTypeBox*)newBox)->group_type = type;
} else if (parent_type && (parent_type == GF_ISOM_BOX_TYPE_GRPL)) {
newBox = gf_isom_box_new(GF_ISOM_BOX_TYPE_GRPT);
if (!newBox) return GF_OUT_OF_MEM;
((GF_EntityToGroupTypeBox*)newBox)->grouping_type = type;
} else {
//OK, create the box based on the type
is_special = GF_FALSE;
newBox = gf_isom_box_new_ex(uuid_type ? uuid_type : type, parent_type, skip_logs, is_root_box);
if (!newBox) return GF_OUT_OF_MEM;
}
//OK, init and read this box
if (type==GF_ISOM_BOX_TYPE_UUID && !is_special) {
memcpy(((GF_UUIDBox *)newBox)->uuid, uuid, 16);
((GF_UUIDBox *)newBox)->internal_4cc = uuid_type;
}
if (!newBox->type) newBox->type = type;
if (restore_type)
newBox->type = restore_type;
end = gf_bs_available(bs);
if (size - hdr_size > end ) {
newBox->size = size - hdr_size - end;
*outBox = newBox;
return GF_ISOM_INCOMPLETE_FILE;
}
newBox->size = size - hdr_size;
e = gf_isom_full_box_read(newBox, bs);
if (!e) e = gf_isom_box_read(newBox, bs);
if (e) {
if (gf_opts_get_bool("core", "no-check"))
e = GF_OK;
}
newBox->size = size;
end = gf_bs_get_position(bs);
if (uncomp_bs) {
gf_free(uncomp_data);
gf_bs_del(uncomp_bs);
if (e) {
gf_isom_box_del(newBox);
*outBox = NULL;
return e;
}
//move size to real bitstream offsets for tests below
size -= 8;
//remember compressed vs real size info for moof in order to properly recompute data_offset/base_data_offset
if (type==GF_ISOM_BOX_TYPE_MOOF) {
((GF_MovieFragmentBox *)newBox)->compressed_diff = (s32)size - (s32)compressed_size;
}
//remember compressed vs real size info for moov in order to properly recompute chunk offset
else if (type==GF_ISOM_BOX_TYPE_MOOV) {
((GF_MovieBox *)newBox)->compressed_diff = (s32)size - (s32)compressed_size;
((GF_MovieBox *)newBox)->file_offset = comp_start;
}
//remember compressed vs real size info for dump
else if (type==GF_ISOM_BOX_TYPE_SIDX) {
((GF_SegmentIndexBox *)newBox)->compressed_diff = (s32)size - (s32)compressed_size;
}
//remember compressed vs real size info for dump
else if (type==GF_ISOM_BOX_TYPE_SSIX) {
((GF_SubsegmentIndexBox *)newBox)->compressed_diff = (s32)size - (s32)compressed_size;
}
newBox->internal_flags = GF_ISOM_BOX_COMPRESSED;
}
if (e && (e != GF_ISOM_INCOMPLETE_FILE)) {
gf_isom_box_del(newBox);
*outBox = NULL;
if (is_root_box && (e==GF_SKIP_BOX))
e = GF_ISOM_INVALID_FILE;
if (!skip_logs && (e!=GF_SKIP_BOX)) {
GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[iso file] Read Box \"%s\" (start "LLU") failed (%s) - skipping\n", gf_4cc_to_str(type), start, gf_error_to_string(e)));
}
//we don't try to reparse known boxes that have been failing (too dangerous)
return e;
}
if (end-start > size) {
if (!skip_logs) {
GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[iso file] Box \"%s\" size "LLU" (start "LLU") invalid (read "LLU")\n", gf_4cc_to_str(type), size, start, (end-start) ));
}
/*let's still try to load the file since no error was notified*/
gf_bs_seek(bs, start+size);
} else if (end-start < size) {
u32 to_skip = (u32) (size-(end-start));
if (!skip_logs) {
if ((to_skip!=4) || gf_bs_peek_bits(bs, 32, 0)) {
GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[iso file] Box \"%s\" (start "LLU") has %u extra bytes\n", gf_4cc_to_str(type), start, to_skip));
unused_bytes += to_skip;
}
}
gf_bs_skip_bytes(bs, to_skip);
}
*outBox = newBox;
return e;
} | 0 | [
"CWE-476"
]
| gpac | 37592ad86c6ca934d34740012213e467acc4a3b0 | 39,055,403,588,388,566,000,000,000,000,000,000,000 | 248 | fixed #2163 |
virtual Item *grouping_field_transformer_for_where(THD *thd, uchar *arg)
{ return this; } | 0 | [
"CWE-617"
]
| server | 807945f2eb5fa22e6f233cc17b85a2e141efe2c8 | 331,035,693,368,227,070,000,000,000,000,000,000,000 | 2 | MDEV-26402: A SEGV in Item_field::used_tables/update_depend_map_for_order...
When doing condition pushdown from HAVING into WHERE,
Item_equal::create_pushable_equalities() calls
item->set_extraction_flag(IMMUTABLE_FL) for constant items.
Then, Item::cleanup_excluding_immutables_processor() checks for this flag
to see if it should call item->cleanup() or leave the item as-is.
The failure happens when a constant item has a non-constant one inside it,
like:
(tbl.col=0 AND impossible_cond)
item->walk(cleanup_excluding_immutables_processor) works in a bottom-up
way so it
1. will call Item_func_eq(tbl.col=0)->cleanup()
2. will not call Item_cond_and->cleanup (as the AND is constant)
This creates an item tree where a fixed Item has an un-fixed Item inside
it which eventually causes an assertion failure.
Fixed by introducing this rule: instead of just calling
item->set_extraction_flag(IMMUTABLE_FL);
we call Item::walk() to set the flag for all sub-items of the item. |
global_notify_fifo_script(vector_t *strvec)
{
notify_fifo_script(strvec, "", &global_data->notify_fifo);
} | 0 | [
"CWE-200"
]
| keepalived | c6247a9ef2c7b33244ab1d3aa5d629ec49f0a067 | 199,202,355,516,101,060,000,000,000,000,000,000,000 | 4 | Add command line and configuration option to set umask
Issue #1048 identified that files created by keepalived are created
with mode 0666. This commit changes the default to 0644, and also
allows the umask to be specified in the configuration or as a command
line option.
Signed-off-by: Quentin Armitage <[email protected]> |
wl_array_add(struct wl_array *array, size_t size)
{
size_t alloc;
void *data, *p;
if (array->alloc > 0)
alloc = array->alloc;
else
alloc = 16;
while (alloc < array->size + size)
alloc *= 2;
if (array->alloc < alloc) {
if (array->alloc > 0)
data = realloc(array->data, alloc);
else
data = malloc(alloc);
if (data == NULL)
return NULL;
array->data = data;
array->alloc = alloc;
}
p = (char *)array->data + array->size;
array->size += size;
return p;
} | 0 | []
| wayland | b19488c7154b902354cb26a27f11415d7799b0b2 | 200,053,387,127,008,240,000,000,000,000,000,000,000 | 30 | util: Limit size of wl_map
Since server IDs are basically indistinguishable from really big client
IDs at many points in the source, it's theoretically possible to overflow
a map and either overflow server IDs into the client ID space, or grow
client IDs into the server ID space. This would currently take a massive
amount of RAM, but the definition of massive changes yearly.
Prevent this by placing a ridiculous but arbitrary upper bound on the
number of items we can put in a map: 0xF00000, somewhere over 15 million.
This should satisfy pathological clients without restriction, but stays
well clear of the 0xFF000000 transition point between server and client
IDs. It will still take an improbable amount of RAM to hit this, and a
client could still exhaust all RAM in this way, but our goal is to prevent
overflow and undefined behaviour.
Fixes #224
Signed-off-by: Derek Foreman <[email protected]> |
static void ip6_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
struct sk_buff *skb, u32 mtu)
{
struct rt6_info *rt6 = (struct rt6_info *)dst;
dst_confirm(dst);
if (mtu < dst_mtu(dst) && rt6->rt6i_dst.plen == 128) {
struct net *net = dev_net(dst->dev);
rt6->rt6i_flags |= RTF_MODIFIED;
if (mtu < IPV6_MIN_MTU)
mtu = IPV6_MIN_MTU;
dst_metric_set(dst, RTAX_MTU, mtu);
rt6_update_expires(rt6, net->ipv6.sysctl.ip6_rt_mtu_expires);
}
} | 0 | [
"CWE-17"
]
| linux-stable | 9d289715eb5c252ae15bd547cb252ca547a3c4f2 | 82,096,211,975,627,580,000,000,000,000,000,000,000 | 17 | ipv6: stop sending PTB packets for MTU < 1280
Reduce the attack vector and stop generating IPv6 Fragment Header for
paths with an MTU smaller than the minimum required IPv6 MTU
size (1280 byte) - called atomic fragments.
See IETF I-D "Deprecating the Generation of IPv6 Atomic Fragments" [1]
for more information and how this "feature" can be misused.
[1] https://tools.ietf.org/html/draft-ietf-6man-deprecate-atomfrag-generation-00
Signed-off-by: Fernando Gont <[email protected]>
Signed-off-by: Hagen Paul Pfeifer <[email protected]>
Acked-by: Hannes Frederic Sowa <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
void security_mac_salt_key(const BYTE* session_key_blob, const BYTE* client_random,
const BYTE* server_random, BYTE* output)
{
/* MacSaltKey = First128Bits(SessionKeyBlob) */
memcpy(output, session_key_blob, 16);
} | 0 | [
"CWE-476"
]
| FreeRDP | 7d58aac24fe20ffaad7bd9b40c9ddf457c1b06e7 | 263,948,985,184,640,330,000,000,000,000,000,000,000 | 6 | security: add a NULL pointer check to fix a server crash. |
void negotiate(CLIENT *client) {
char zeros[300];
u64 size_host;
memset(zeros, 0, 290);
if (write(client->net, INIT_PASSWD, 8) < 0)
err("Negotiation failed: %m");
cliserv_magic = htonll(cliserv_magic);
if (write(client->net, &cliserv_magic, sizeof(cliserv_magic)) < 0)
err("Negotiation failed: %m");
size_host = htonll((u64)(client->exportsize));
if (write(client->net, &size_host, 8) < 0)
err("Negotiation failed: %m");
if (write(client->net, zeros, 128) < 0)
err("Negotiation failed: %m");
} | 0 | [
"CWE-119"
]
| nbd | 4ed24fe0d64c7cc9963c57b52cad1555ad7c6b60 | 299,234,879,029,562,600,000,000,000,000,000,000,000 | 16 | r134: CVE-2005-3534 |
AfpInfo *afpinfo_unpack(TALLOC_CTX *ctx, const void *data)
{
AfpInfo *ai = talloc_zero(ctx, AfpInfo);
if (ai == NULL) {
return NULL;
}
ai->afpi_Signature = RIVAL(data, 0);
ai->afpi_Version = RIVAL(data, 4);
ai->afpi_BackupTime = RIVAL(data, 12);
memcpy(ai->afpi_FinderInfo, (const char *)data + 16,
sizeof(ai->afpi_FinderInfo));
if (ai->afpi_Signature != AFP_Signature
|| ai->afpi_Version != AFP_Version) {
DEBUG(1, ("Bad AfpInfo signature or version\n"));
TALLOC_FREE(ai);
}
return ai;
} | 0 | [
"CWE-787"
]
| samba | 0e2b3fb982d1f53d111e10d9197ed2ec2e13712c | 322,020,611,515,480,700,000,000,000,000,000,000,000 | 21 | CVE-2021-44142: libadouble: harden parsing code
BUG: https://bugzilla.samba.org/show_bug.cgi?id=14914
Signed-off-by: Ralph Boehme <[email protected]>
Reviewed-by: Jeremy Allison <[email protected]> |
int snd_ctl_add(struct snd_card *card, struct snd_kcontrol *kcontrol)
{
struct snd_ctl_elem_id id;
unsigned int idx;
int err = -EINVAL;
if (! kcontrol)
return err;
if (snd_BUG_ON(!card || !kcontrol->info))
goto error;
id = kcontrol->id;
down_write(&card->controls_rwsem);
if (snd_ctl_find_id(card, &id)) {
up_write(&card->controls_rwsem);
dev_err(card->dev, "control %i:%i:%i:%s:%i is already present\n",
id.iface,
id.device,
id.subdevice,
id.name,
id.index);
err = -EBUSY;
goto error;
}
if (snd_ctl_find_hole(card, kcontrol->count) < 0) {
up_write(&card->controls_rwsem);
err = -ENOMEM;
goto error;
}
list_add_tail(&kcontrol->list, &card->controls);
card->controls_count += kcontrol->count;
kcontrol->id.numid = card->last_numid + 1;
card->last_numid += kcontrol->count;
up_write(&card->controls_rwsem);
for (idx = 0; idx < kcontrol->count; idx++, id.index++, id.numid++)
snd_ctl_notify(card, SNDRV_CTL_EVENT_MASK_ADD, &id);
return 0;
error:
snd_ctl_free_one(kcontrol);
return err;
} | 1 | [
"CWE-416",
"CWE-415"
]
| linux | fd9f26e4eca5d08a27d12c0933fceef76ed9663d | 320,174,769,789,961,530,000,000,000,000,000,000,000 | 41 | ALSA: control: Don't access controls outside of protected regions
A control that is visible on the card->controls list can be freed at any time.
This means we must not access any of its memory while not holding the
controls_rw_lock. Otherwise we risk a use after free access.
Signed-off-by: Lars-Peter Clausen <[email protected]>
Acked-by: Jaroslav Kysela <[email protected]>
Cc: <[email protected]>
Signed-off-by: Takashi Iwai <[email protected]> |
static inline void mpeg4_encode_dc(PutBitContext *s, int level, int n)
{
#if 1
/* DC will overflow if level is outside the [-255,255] range. */
level += 256;
if (n < 4) {
/* luminance */
put_bits(s, uni_DCtab_lum_len[level], uni_DCtab_lum_bits[level]);
} else {
/* chrominance */
put_bits(s, uni_DCtab_chrom_len[level], uni_DCtab_chrom_bits[level]);
}
#else
int size, v;
/* find number of bits */
size = 0;
v = abs(level);
while (v) {
v >>= 1;
size++;
}
if (n < 4) {
/* luminance */
put_bits(s, ff_mpeg4_DCtab_lum[size][1], ff_mpeg4_DCtab_lum[size][0]);
} else {
/* chrominance */
put_bits(s, ff_mpeg4_DCtab_chrom[size][1], ff_mpeg4_DCtab_chrom[size][0]);
}
/* encode remaining bits */
if (size > 0) {
if (level < 0)
level = (-level) ^ ((1 << size) - 1);
put_bits(s, size, level);
if (size > 8)
put_bits(s, 1, 1);
}
#endif
} | 0 | [
"CWE-20"
]
| FFmpeg | 6bbef938839adc55e8e048bc9cc2e0fafe2064df | 268,552,533,158,738,000,000,000,000,000,000,000,000 | 40 | avcodec/mpeg4videoenc: Use 64 bit for times in mpeg4_encode_gop_header()
Fixes truncation
Fixes Assertion n <= 31 && value < (1U << n) failed at libavcodec/put_bits.h:169
Fixes: ffmpeg_crash_2.avi
Found-by: Thuan Pham <[email protected]>, Marcel Böhme, Andrew Santosa and Alexandru RazvanCaciulescu with AFLSmart
Signed-off-by: Michael Niedermayer <[email protected]>
(cherry picked from commit e1182fac1afba92a4975917823a5f644bee7e6e8)
Signed-off-by: Michael Niedermayer <[email protected]> |
static llparse_state_t llhttp__internal__run(
llhttp__internal_t* state,
const unsigned char* p,
const unsigned char* endp) {
int match;
switch ((llparse_state_t) (intptr_t) state->_current) {
case s_n_llhttp__internal__n_invoke_llhttp__after_message_complete:
s_n_llhttp__internal__n_invoke_llhttp__after_message_complete: {
switch (llhttp__after_message_complete(state, p, endp)) {
default:
goto s_n_llhttp__internal__n_invoke_update_finish_1;
}
/* UNREACHABLE */;
abort();
}
case s_n_llhttp__internal__n_pause_1:
s_n_llhttp__internal__n_pause_1: {
state->error = 0x16;
state->reason = "Pause on CONNECT/Upgrade";
state->error_pos = (const char*) p;
state->_current = (void*) (intptr_t) s_n_llhttp__internal__n_invoke_llhttp__after_message_complete;
return s_error;
/* UNREACHABLE */;
abort();
}
case s_n_llhttp__internal__n_invoke_is_equal_upgrade:
s_n_llhttp__internal__n_invoke_is_equal_upgrade: {
switch (llhttp__internal__c_is_equal_upgrade(state, p, endp)) {
case 0:
goto s_n_llhttp__internal__n_invoke_llhttp__after_message_complete;
default:
goto s_n_llhttp__internal__n_pause_1;
}
/* UNREACHABLE */;
abort();
}
case s_n_llhttp__internal__n_invoke_llhttp__on_message_complete_2:
s_n_llhttp__internal__n_invoke_llhttp__on_message_complete_2: {
switch (llhttp__on_message_complete(state, p, endp)) {
case 0:
goto s_n_llhttp__internal__n_invoke_is_equal_upgrade;
case 21:
goto s_n_llhttp__internal__n_pause_5;
default:
goto s_n_llhttp__internal__n_error_10;
}
/* UNREACHABLE */;
abort();
}
case s_n_llhttp__internal__n_chunk_data_almost_done_skip:
s_n_llhttp__internal__n_chunk_data_almost_done_skip: {
if (p == endp) {
return s_n_llhttp__internal__n_chunk_data_almost_done_skip;
}
p++;
goto s_n_llhttp__internal__n_invoke_llhttp__on_chunk_complete;
/* UNREACHABLE */;
abort();
}
case s_n_llhttp__internal__n_chunk_data_almost_done:
s_n_llhttp__internal__n_chunk_data_almost_done: {
if (p == endp) {
return s_n_llhttp__internal__n_chunk_data_almost_done;
}
p++;
goto s_n_llhttp__internal__n_chunk_data_almost_done_skip;
/* UNREACHABLE */;
abort();
}
case s_n_llhttp__internal__n_consume_content_length:
s_n_llhttp__internal__n_consume_content_length: {
size_t avail;
uint64_t need;
avail = endp - p;
need = state->content_length;
if (avail >= need) {
p += need;
state->content_length = 0;
goto s_n_llhttp__internal__n_span_end_llhttp__on_body;
}
state->content_length -= avail;
return s_n_llhttp__internal__n_consume_content_length;
/* UNREACHABLE */;
abort();
}
case s_n_llhttp__internal__n_span_start_llhttp__on_body:
s_n_llhttp__internal__n_span_start_llhttp__on_body: {
if (p == endp) {
return s_n_llhttp__internal__n_span_start_llhttp__on_body;
}
state->_span_pos0 = (void*) p;
state->_span_cb0 = llhttp__on_body;
goto s_n_llhttp__internal__n_consume_content_length;
/* UNREACHABLE */;
abort();
}
case s_n_llhttp__internal__n_invoke_is_equal_content_length:
s_n_llhttp__internal__n_invoke_is_equal_content_length: {
switch (llhttp__internal__c_is_equal_content_length(state, p, endp)) {
case 0:
goto s_n_llhttp__internal__n_span_start_llhttp__on_body;
default:
goto s_n_llhttp__internal__n_invoke_or_flags;
}
/* UNREACHABLE */;
abort();
}
case s_n_llhttp__internal__n_chunk_size_almost_done:
s_n_llhttp__internal__n_chunk_size_almost_done: {
if (p == endp) {
return s_n_llhttp__internal__n_chunk_size_almost_done;
}
p++;
goto s_n_llhttp__internal__n_invoke_llhttp__on_chunk_header;
/* UNREACHABLE */;
abort();
}
case s_n_llhttp__internal__n_chunk_parameters:
s_n_llhttp__internal__n_chunk_parameters: {
static uint8_t lookup_table[] = {
0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 2, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1
};
if (p == endp) {
return s_n_llhttp__internal__n_chunk_parameters;
}
switch (lookup_table[(uint8_t) *p]) {
case 1: {
p++;
goto s_n_llhttp__internal__n_chunk_parameters;
}
case 2: {
p++;
goto s_n_llhttp__internal__n_chunk_size_almost_done;
}
default: {
goto s_n_llhttp__internal__n_error_6;
}
}
/* UNREACHABLE */;
abort();
}
case s_n_llhttp__internal__n_chunk_size_otherwise:
s_n_llhttp__internal__n_chunk_size_otherwise: {
if (p == endp) {
return s_n_llhttp__internal__n_chunk_size_otherwise;
}
switch (*p) {
case 13: {
p++;
goto s_n_llhttp__internal__n_chunk_size_almost_done;
}
case ' ': {
p++;
goto s_n_llhttp__internal__n_chunk_parameters;
}
case ';': {
p++;
goto s_n_llhttp__internal__n_chunk_parameters;
}
default: {
goto s_n_llhttp__internal__n_error_7;
}
}
/* UNREACHABLE */;
abort();
}
case s_n_llhttp__internal__n_chunk_size:
s_n_llhttp__internal__n_chunk_size: {
if (p == endp) {
return s_n_llhttp__internal__n_chunk_size;
}
switch (*p) {
case '0': {
p++;
match = 0;
goto s_n_llhttp__internal__n_invoke_mul_add_content_length;
}
case '1': {
p++;
match = 1;
goto s_n_llhttp__internal__n_invoke_mul_add_content_length;
}
case '2': {
p++;
match = 2;
goto s_n_llhttp__internal__n_invoke_mul_add_content_length;
}
case '3': {
p++;
match = 3;
goto s_n_llhttp__internal__n_invoke_mul_add_content_length;
}
case '4': {
p++;
match = 4;
goto s_n_llhttp__internal__n_invoke_mul_add_content_length;
}
case '5': {
p++;
match = 5;
goto s_n_llhttp__internal__n_invoke_mul_add_content_length;
}
case '6': {
p++;
match = 6;
goto s_n_llhttp__internal__n_invoke_mul_add_content_length;
}
case '7': {
p++;
match = 7;
goto s_n_llhttp__internal__n_invoke_mul_add_content_length;
}
case '8': {
p++;
match = 8;
goto s_n_llhttp__internal__n_invoke_mul_add_content_length;
}
case '9': {
p++;
match = 9;
goto s_n_llhttp__internal__n_invoke_mul_add_content_length;
}
case 'A': {
p++;
match = 10;
goto s_n_llhttp__internal__n_invoke_mul_add_content_length;
}
case 'B': {
p++;
match = 11;
goto s_n_llhttp__internal__n_invoke_mul_add_content_length;
}
case 'C': {
p++;
match = 12;
goto s_n_llhttp__internal__n_invoke_mul_add_content_length;
}
case 'D': {
p++;
match = 13;
goto s_n_llhttp__internal__n_invoke_mul_add_content_length;
}
case 'E': {
p++;
match = 14;
goto s_n_llhttp__internal__n_invoke_mul_add_content_length;
}
case 'F': {
p++;
match = 15;
goto s_n_llhttp__internal__n_invoke_mul_add_content_length;
}
case 'a': {
p++;
match = 10;
goto s_n_llhttp__internal__n_invoke_mul_add_content_length;
}
case 'b': {
p++;
match = 11;
goto s_n_llhttp__internal__n_invoke_mul_add_content_length;
}
case 'c': {
p++;
match = 12;
goto s_n_llhttp__internal__n_invoke_mul_add_content_length;
}
case 'd': {
p++;
match = 13;
goto s_n_llhttp__internal__n_invoke_mul_add_content_length;
}
case 'e': {
p++;
match = 14;
goto s_n_llhttp__internal__n_invoke_mul_add_content_length;
}
case 'f': {
p++;
match = 15;
goto s_n_llhttp__internal__n_invoke_mul_add_content_length;
}
default: {
goto s_n_llhttp__internal__n_chunk_size_otherwise;
}
}
/* UNREACHABLE */;
abort();
}
case s_n_llhttp__internal__n_chunk_size_digit:
s_n_llhttp__internal__n_chunk_size_digit: {
if (p == endp) {
return s_n_llhttp__internal__n_chunk_size_digit;
}
switch (*p) {
case '0': {
p++;
match = 0;
goto s_n_llhttp__internal__n_invoke_mul_add_content_length;
}
case '1': {
p++;
match = 1;
goto s_n_llhttp__internal__n_invoke_mul_add_content_length;
}
case '2': {
p++;
match = 2;
goto s_n_llhttp__internal__n_invoke_mul_add_content_length;
}
case '3': {
p++;
match = 3;
goto s_n_llhttp__internal__n_invoke_mul_add_content_length;
}
case '4': {
p++;
match = 4;
goto s_n_llhttp__internal__n_invoke_mul_add_content_length;
}
case '5': {
p++;
match = 5;
goto s_n_llhttp__internal__n_invoke_mul_add_content_length;
}
case '6': {
p++;
match = 6;
goto s_n_llhttp__internal__n_invoke_mul_add_content_length;
}
case '7': {
p++;
match = 7;
goto s_n_llhttp__internal__n_invoke_mul_add_content_length;
}
case '8': {
p++;
match = 8;
goto s_n_llhttp__internal__n_invoke_mul_add_content_length;
}
case '9': {
p++;
match = 9;
goto s_n_llhttp__internal__n_invoke_mul_add_content_length;
}
case 'A': {
p++;
match = 10;
goto s_n_llhttp__internal__n_invoke_mul_add_content_length;
}
case 'B': {
p++;
match = 11;
goto s_n_llhttp__internal__n_invoke_mul_add_content_length;
}
case 'C': {
p++;
match = 12;
goto s_n_llhttp__internal__n_invoke_mul_add_content_length;
}
case 'D': {
p++;
match = 13;
goto s_n_llhttp__internal__n_invoke_mul_add_content_length;
}
case 'E': {
p++;
match = 14;
goto s_n_llhttp__internal__n_invoke_mul_add_content_length;
}
case 'F': {
p++;
match = 15;
goto s_n_llhttp__internal__n_invoke_mul_add_content_length;
}
case 'a': {
p++;
match = 10;
goto s_n_llhttp__internal__n_invoke_mul_add_content_length;
}
case 'b': {
p++;
match = 11;
goto s_n_llhttp__internal__n_invoke_mul_add_content_length;
}
case 'c': {
p++;
match = 12;
goto s_n_llhttp__internal__n_invoke_mul_add_content_length;
}
case 'd': {
p++;
match = 13;
goto s_n_llhttp__internal__n_invoke_mul_add_content_length;
}
case 'e': {
p++;
match = 14;
goto s_n_llhttp__internal__n_invoke_mul_add_content_length;
}
case 'f': {
p++;
match = 15;
goto s_n_llhttp__internal__n_invoke_mul_add_content_length;
}
default: {
goto s_n_llhttp__internal__n_error_9;
}
}
/* UNREACHABLE */;
abort();
}
case s_n_llhttp__internal__n_invoke_update_content_length:
s_n_llhttp__internal__n_invoke_update_content_length: {
switch (llhttp__internal__c_update_content_length(state, p, endp)) {
default:
goto s_n_llhttp__internal__n_chunk_size_digit;
}
/* UNREACHABLE */;
abort();
}
case s_n_llhttp__internal__n_consume_content_length_1:
s_n_llhttp__internal__n_consume_content_length_1: {
size_t avail;
uint64_t need;
avail = endp - p;
need = state->content_length;
if (avail >= need) {
p += need;
state->content_length = 0;
goto s_n_llhttp__internal__n_span_end_llhttp__on_body_1;
}
state->content_length -= avail;
return s_n_llhttp__internal__n_consume_content_length_1;
/* UNREACHABLE */;
abort();
}
case s_n_llhttp__internal__n_span_start_llhttp__on_body_1:
s_n_llhttp__internal__n_span_start_llhttp__on_body_1: {
if (p == endp) {
return s_n_llhttp__internal__n_span_start_llhttp__on_body_1;
}
state->_span_pos0 = (void*) p;
state->_span_cb0 = llhttp__on_body;
goto s_n_llhttp__internal__n_consume_content_length_1;
/* UNREACHABLE */;
abort();
}
case s_n_llhttp__internal__n_eof:
s_n_llhttp__internal__n_eof: {
if (p == endp) {
return s_n_llhttp__internal__n_eof;
}
p++;
goto s_n_llhttp__internal__n_eof;
/* UNREACHABLE */;
abort();
}
case s_n_llhttp__internal__n_span_start_llhttp__on_body_2:
s_n_llhttp__internal__n_span_start_llhttp__on_body_2: {
if (p == endp) {
return s_n_llhttp__internal__n_span_start_llhttp__on_body_2;
}
state->_span_pos0 = (void*) p;
state->_span_cb0 = llhttp__on_body;
goto s_n_llhttp__internal__n_eof;
/* UNREACHABLE */;
abort();
}
case s_n_llhttp__internal__n_invoke_llhttp__after_headers_complete:
s_n_llhttp__internal__n_invoke_llhttp__after_headers_complete: {
switch (llhttp__after_headers_complete(state, p, endp)) {
case 1:
goto s_n_llhttp__internal__n_invoke_llhttp__on_message_complete_1;
case 2:
goto s_n_llhttp__internal__n_invoke_update_content_length;
case 3:
goto s_n_llhttp__internal__n_span_start_llhttp__on_body_1;
case 4:
goto s_n_llhttp__internal__n_invoke_update_finish_2;
case 5:
goto s_n_llhttp__internal__n_error_11;
default:
goto s_n_llhttp__internal__n_invoke_llhttp__on_message_complete;
}
/* UNREACHABLE */;
abort();
}
case s_n_llhttp__internal__n_headers_almost_done:
s_n_llhttp__internal__n_headers_almost_done: {
if (p == endp) {
return s_n_llhttp__internal__n_headers_almost_done;
}
p++;
goto s_n_llhttp__internal__n_invoke_test_flags;
/* UNREACHABLE */;
abort();
}
case s_n_llhttp__internal__n_header_field_colon_discard_ws:
s_n_llhttp__internal__n_header_field_colon_discard_ws: {
if (p == endp) {
return s_n_llhttp__internal__n_header_field_colon_discard_ws;
}
switch (*p) {
case ' ': {
p++;
goto s_n_llhttp__internal__n_header_field_colon_discard_ws;
}
default: {
goto s_n_llhttp__internal__n_header_field_colon;
}
}
/* UNREACHABLE */;
abort();
}
case s_n_llhttp__internal__n_error_15:
s_n_llhttp__internal__n_error_15: {
state->error = 0xa;
state->reason = "Invalid header field char";
state->error_pos = (const char*) p;
state->_current = (void*) (intptr_t) s_error;
return s_error;
/* UNREACHABLE */;
abort();
}
case s_n_llhttp__internal__n_span_start_llhttp__on_header_value:
s_n_llhttp__internal__n_span_start_llhttp__on_header_value: {
if (p == endp) {
return s_n_llhttp__internal__n_span_start_llhttp__on_header_value;
}
state->_span_pos0 = (void*) p;
state->_span_cb0 = llhttp__on_header_value;
goto s_n_llhttp__internal__n_span_end_llhttp__on_header_value;
/* UNREACHABLE */;
abort();
}
case s_n_llhttp__internal__n_header_value_discard_lws:
s_n_llhttp__internal__n_header_value_discard_lws: {
if (p == endp) {
return s_n_llhttp__internal__n_header_value_discard_lws;
}
switch (*p) {
case 9: {
p++;
goto s_n_llhttp__internal__n_header_value_discard_ws;
}
case ' ': {
p++;
goto s_n_llhttp__internal__n_header_value_discard_ws;
}
default: {
goto s_n_llhttp__internal__n_invoke_load_header_state;
}
}
/* UNREACHABLE */;
abort();
}
case s_n_llhttp__internal__n_header_value_discard_ws_almost_done:
s_n_llhttp__internal__n_header_value_discard_ws_almost_done: {
if (p == endp) {
return s_n_llhttp__internal__n_header_value_discard_ws_almost_done;
}
p++;
goto s_n_llhttp__internal__n_header_value_discard_lws;
/* UNREACHABLE */;
abort();
}
case s_n_llhttp__internal__n_header_value_lws:
s_n_llhttp__internal__n_header_value_lws: {
if (p == endp) {
return s_n_llhttp__internal__n_header_value_lws;
}
switch (*p) {
case 9: {
goto s_n_llhttp__internal__n_span_start_llhttp__on_header_value_1;
}
case ' ': {
goto s_n_llhttp__internal__n_span_start_llhttp__on_header_value_1;
}
default: {
goto s_n_llhttp__internal__n_invoke_load_header_state_3;
}
}
/* UNREACHABLE */;
abort();
}
case s_n_llhttp__internal__n_header_value_almost_done:
s_n_llhttp__internal__n_header_value_almost_done: {
if (p == endp) {
return s_n_llhttp__internal__n_header_value_almost_done;
}
switch (*p) {
case 10: {
p++;
goto s_n_llhttp__internal__n_header_value_lws;
}
default: {
goto s_n_llhttp__internal__n_error_17;
}
}
/* UNREACHABLE */;
abort();
}
case s_n_llhttp__internal__n_header_value_lenient:
s_n_llhttp__internal__n_header_value_lenient: {
if (p == endp) {
return s_n_llhttp__internal__n_header_value_lenient;
}
switch (*p) {
case 10: {
goto s_n_llhttp__internal__n_span_end_llhttp__on_header_value_1;
}
case 13: {
goto s_n_llhttp__internal__n_span_end_llhttp__on_header_value_3;
}
default: {
p++;
goto s_n_llhttp__internal__n_header_value_lenient;
}
}
/* UNREACHABLE */;
abort();
}
case s_n_llhttp__internal__n_header_value_otherwise:
s_n_llhttp__internal__n_header_value_otherwise: {
if (p == endp) {
return s_n_llhttp__internal__n_header_value_otherwise;
}
switch (*p) {
case 10: {
goto s_n_llhttp__internal__n_span_end_llhttp__on_header_value_1;
}
case 13: {
goto s_n_llhttp__internal__n_span_end_llhttp__on_header_value_2;
}
default: {
goto s_n_llhttp__internal__n_invoke_test_flags_5;
}
}
/* UNREACHABLE */;
abort();
}
case s_n_llhttp__internal__n_header_value_connection_token:
s_n_llhttp__internal__n_header_value_connection_token: {
static uint8_t lookup_table[] = {
0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1
};
if (p == endp) {
return s_n_llhttp__internal__n_header_value_connection_token;
}
switch (lookup_table[(uint8_t) *p]) {
case 1: {
p++;
goto s_n_llhttp__internal__n_header_value_connection_token;
}
case 2: {
p++;
goto s_n_llhttp__internal__n_header_value_connection;
}
default: {
goto s_n_llhttp__internal__n_header_value_otherwise;
}
}
/* UNREACHABLE */;
abort();
}
case s_n_llhttp__internal__n_header_value_connection_ws:
s_n_llhttp__internal__n_header_value_connection_ws: {
if (p == endp) {
return s_n_llhttp__internal__n_header_value_connection_ws;
}
switch (*p) {
case 10: {
goto s_n_llhttp__internal__n_header_value_otherwise;
}
case 13: {
goto s_n_llhttp__internal__n_header_value_otherwise;
}
case ' ': {
p++;
goto s_n_llhttp__internal__n_header_value_connection_ws;
}
case ',': {
p++;
goto s_n_llhttp__internal__n_invoke_load_header_state_4;
}
default: {
goto s_n_llhttp__internal__n_invoke_update_header_state_4;
}
}
/* UNREACHABLE */;
abort();
}
case s_n_llhttp__internal__n_header_value_connection_1:
s_n_llhttp__internal__n_header_value_connection_1: {
llparse_match_t match_seq;
if (p == endp) {
return s_n_llhttp__internal__n_header_value_connection_1;
}
match_seq = llparse__match_sequence_to_lower(state, p, endp, llparse_blob4, 4);
p = match_seq.current;
switch (match_seq.status) {
case kMatchComplete: {
p++;
goto s_n_llhttp__internal__n_invoke_update_header_state_2;
}
case kMatchPause: {
return s_n_llhttp__internal__n_header_value_connection_1;
}
case kMatchMismatch: {
goto s_n_llhttp__internal__n_header_value_connection_token;
}
}
/* UNREACHABLE */;
abort();
}
case s_n_llhttp__internal__n_header_value_connection_2:
s_n_llhttp__internal__n_header_value_connection_2: {
llparse_match_t match_seq;
if (p == endp) {
return s_n_llhttp__internal__n_header_value_connection_2;
}
match_seq = llparse__match_sequence_to_lower(state, p, endp, llparse_blob5, 9);
p = match_seq.current;
switch (match_seq.status) {
case kMatchComplete: {
p++;
goto s_n_llhttp__internal__n_invoke_update_header_state_5;
}
case kMatchPause: {
return s_n_llhttp__internal__n_header_value_connection_2;
}
case kMatchMismatch: {
goto s_n_llhttp__internal__n_header_value_connection_token;
}
}
/* UNREACHABLE */;
abort();
}
case s_n_llhttp__internal__n_header_value_connection_3:
s_n_llhttp__internal__n_header_value_connection_3: {
llparse_match_t match_seq;
if (p == endp) {
return s_n_llhttp__internal__n_header_value_connection_3;
}
match_seq = llparse__match_sequence_to_lower(state, p, endp, llparse_blob6, 6);
p = match_seq.current;
switch (match_seq.status) {
case kMatchComplete: {
p++;
goto s_n_llhttp__internal__n_invoke_update_header_state_6;
}
case kMatchPause: {
return s_n_llhttp__internal__n_header_value_connection_3;
}
case kMatchMismatch: {
goto s_n_llhttp__internal__n_header_value_connection_token;
}
}
/* UNREACHABLE */;
abort();
}
case s_n_llhttp__internal__n_header_value_connection:
s_n_llhttp__internal__n_header_value_connection: {
if (p == endp) {
return s_n_llhttp__internal__n_header_value_connection;
}
switch (((*p) >= 'A' && (*p) <= 'Z' ? (*p | 0x20) : (*p))) {
case 9: {
p++;
goto s_n_llhttp__internal__n_header_value_connection;
}
case ' ': {
p++;
goto s_n_llhttp__internal__n_header_value_connection;
}
case 'c': {
p++;
goto s_n_llhttp__internal__n_header_value_connection_1;
}
case 'k': {
p++;
goto s_n_llhttp__internal__n_header_value_connection_2;
}
case 'u': {
p++;
goto s_n_llhttp__internal__n_header_value_connection_3;
}
default: {
goto s_n_llhttp__internal__n_header_value_connection_token;
}
}
/* UNREACHABLE */;
abort();
}
case s_n_llhttp__internal__n_error_20:
s_n_llhttp__internal__n_error_20: {
state->error = 0xb;
state->reason = "Content-Length overflow";
state->error_pos = (const char*) p;
state->_current = (void*) (intptr_t) s_error;
return s_error;
/* UNREACHABLE */;
abort();
}
case s_n_llhttp__internal__n_error_21:
s_n_llhttp__internal__n_error_21: {
state->error = 0xb;
state->reason = "Invalid character in Content-Length";
state->error_pos = (const char*) p;
state->_current = (void*) (intptr_t) s_error;
return s_error;
/* UNREACHABLE */;
abort();
}
case s_n_llhttp__internal__n_header_value_content_length_ws:
s_n_llhttp__internal__n_header_value_content_length_ws: {
if (p == endp) {
return s_n_llhttp__internal__n_header_value_content_length_ws;
}
switch (*p) {
case 10: {
goto s_n_llhttp__internal__n_invoke_or_flags_15;
}
case 13: {
goto s_n_llhttp__internal__n_invoke_or_flags_15;
}
case ' ': {
p++;
goto s_n_llhttp__internal__n_header_value_content_length_ws;
}
default: {
goto s_n_llhttp__internal__n_span_end_llhttp__on_header_value_5;
}
}
/* UNREACHABLE */;
abort();
}
case s_n_llhttp__internal__n_header_value_content_length:
s_n_llhttp__internal__n_header_value_content_length: {
if (p == endp) {
return s_n_llhttp__internal__n_header_value_content_length;
}
switch (*p) {
case '0': {
p++;
match = 0;
goto s_n_llhttp__internal__n_invoke_mul_add_content_length_1;
}
case '1': {
p++;
match = 1;
goto s_n_llhttp__internal__n_invoke_mul_add_content_length_1;
}
case '2': {
p++;
match = 2;
goto s_n_llhttp__internal__n_invoke_mul_add_content_length_1;
}
case '3': {
p++;
match = 3;
goto s_n_llhttp__internal__n_invoke_mul_add_content_length_1;
}
case '4': {
p++;
match = 4;
goto s_n_llhttp__internal__n_invoke_mul_add_content_length_1;
}
case '5': {
p++;
match = 5;
goto s_n_llhttp__internal__n_invoke_mul_add_content_length_1;
}
case '6': {
p++;
match = 6;
goto s_n_llhttp__internal__n_invoke_mul_add_content_length_1;
}
case '7': {
p++;
match = 7;
goto s_n_llhttp__internal__n_invoke_mul_add_content_length_1;
}
case '8': {
p++;
match = 8;
goto s_n_llhttp__internal__n_invoke_mul_add_content_length_1;
}
case '9': {
p++;
match = 9;
goto s_n_llhttp__internal__n_invoke_mul_add_content_length_1;
}
default: {
goto s_n_llhttp__internal__n_header_value_content_length_ws;
}
}
/* UNREACHABLE */;
abort();
}
case s_n_llhttp__internal__n_header_value_te_chunked_last:
s_n_llhttp__internal__n_header_value_te_chunked_last: {
if (p == endp) {
return s_n_llhttp__internal__n_header_value_te_chunked_last;
}
switch (*p) {
case 10: {
goto s_n_llhttp__internal__n_invoke_update_header_state_7;
}
case 13: {
goto s_n_llhttp__internal__n_invoke_update_header_state_7;
}
case ' ': {
p++;
goto s_n_llhttp__internal__n_header_value_te_chunked_last;
}
default: {
goto s_n_llhttp__internal__n_header_value_te_chunked;
}
}
/* UNREACHABLE */;
abort();
}
case s_n_llhttp__internal__n_header_value_te_token_ows:
s_n_llhttp__internal__n_header_value_te_token_ows: {
if (p == endp) {
return s_n_llhttp__internal__n_header_value_te_token_ows;
}
switch (*p) {
case 9: {
p++;
goto s_n_llhttp__internal__n_header_value_te_token_ows;
}
case ' ': {
p++;
goto s_n_llhttp__internal__n_header_value_te_token_ows;
}
default: {
goto s_n_llhttp__internal__n_header_value_te_chunked;
}
}
/* UNREACHABLE */;
abort();
}
case s_n_llhttp__internal__n_header_value:
s_n_llhttp__internal__n_header_value: {
static uint8_t lookup_table[] = {
0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1
};
if (p == endp) {
return s_n_llhttp__internal__n_header_value;
}
#ifdef __SSE4_2__
if (endp - p >= 16) {
__m128i ranges;
__m128i input;
int avail;
int match_len;
/* Load input */
input = _mm_loadu_si128((__m128i const*) p);
ranges = _mm_loadu_si128((__m128i const*) llparse_blob8);
/* Find first character that does not match `ranges` */
match_len = _mm_cmpestri(ranges, 6,
input, 16,
_SIDD_UBYTE_OPS | _SIDD_CMP_RANGES |
_SIDD_NEGATIVE_POLARITY);
if (match_len != 0) {
p += match_len;
goto s_n_llhttp__internal__n_header_value;
}
goto s_n_llhttp__internal__n_header_value_otherwise;
}
#endif /* __SSE4_2__ */
switch (lookup_table[(uint8_t) *p]) {
case 1: {
p++;
goto s_n_llhttp__internal__n_header_value;
}
default: {
goto s_n_llhttp__internal__n_header_value_otherwise;
}
}
/* UNREACHABLE */;
abort();
}
case s_n_llhttp__internal__n_header_value_te_token:
s_n_llhttp__internal__n_header_value_te_token: {
static uint8_t lookup_table[] = {
0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1
};
if (p == endp) {
return s_n_llhttp__internal__n_header_value_te_token;
}
switch (lookup_table[(uint8_t) *p]) {
case 1: {
p++;
goto s_n_llhttp__internal__n_header_value_te_token;
}
case 2: {
p++;
goto s_n_llhttp__internal__n_header_value_te_token_ows;
}
default: {
goto s_n_llhttp__internal__n_invoke_update_header_state_8;
}
}
/* UNREACHABLE */;
abort();
}
case s_n_llhttp__internal__n_header_value_te_chunked:
s_n_llhttp__internal__n_header_value_te_chunked: {
llparse_match_t match_seq;
if (p == endp) {
return s_n_llhttp__internal__n_header_value_te_chunked;
}
match_seq = llparse__match_sequence_to_lower_unsafe(state, p, endp, llparse_blob7, 7);
p = match_seq.current;
switch (match_seq.status) {
case kMatchComplete: {
p++;
goto s_n_llhttp__internal__n_header_value_te_chunked_last;
}
case kMatchPause: {
return s_n_llhttp__internal__n_header_value_te_chunked;
}
case kMatchMismatch: {
goto s_n_llhttp__internal__n_header_value_te_token;
}
}
/* UNREACHABLE */;
abort();
}
case s_n_llhttp__internal__n_span_start_llhttp__on_header_value_1:
s_n_llhttp__internal__n_span_start_llhttp__on_header_value_1: {
if (p == endp) {
return s_n_llhttp__internal__n_span_start_llhttp__on_header_value_1;
}
state->_span_pos0 = (void*) p;
state->_span_cb0 = llhttp__on_header_value;
goto s_n_llhttp__internal__n_invoke_load_header_state_2;
/* UNREACHABLE */;
abort();
}
case s_n_llhttp__internal__n_header_value_discard_ws:
s_n_llhttp__internal__n_header_value_discard_ws: {
if (p == endp) {
return s_n_llhttp__internal__n_header_value_discard_ws;
}
switch (*p) {
case 9: {
p++;
goto s_n_llhttp__internal__n_header_value_discard_ws;
}
case 10: {
p++;
goto s_n_llhttp__internal__n_header_value_discard_lws;
}
case 13: {
p++;
goto s_n_llhttp__internal__n_header_value_discard_ws_almost_done;
}
case ' ': {
p++;
goto s_n_llhttp__internal__n_header_value_discard_ws;
}
default: {
goto s_n_llhttp__internal__n_span_start_llhttp__on_header_value_1;
}
}
/* UNREACHABLE */;
abort();
}
case s_n_llhttp__internal__n_header_field_general_otherwise:
s_n_llhttp__internal__n_header_field_general_otherwise: {
if (p == endp) {
return s_n_llhttp__internal__n_header_field_general_otherwise;
}
switch (*p) {
case ':': {
goto s_n_llhttp__internal__n_span_end_llhttp__on_header_field_2;
}
default: {
goto s_n_llhttp__internal__n_error_22;
}
}
/* UNREACHABLE */;
abort();
}
case s_n_llhttp__internal__n_header_field_general:
s_n_llhttp__internal__n_header_field_general: {
static uint8_t lookup_table[] = {
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 0,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0,
0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
};
if (p == endp) {
return s_n_llhttp__internal__n_header_field_general;
}
#ifdef __SSE4_2__
if (endp - p >= 16) {
__m128i ranges;
__m128i input;
int avail;
int match_len;
/* Load input */
input = _mm_loadu_si128((__m128i const*) p);
ranges = _mm_loadu_si128((__m128i const*) llparse_blob9);
/* Find first character that does not match `ranges` */
match_len = _mm_cmpestri(ranges, 16,
input, 16,
_SIDD_UBYTE_OPS | _SIDD_CMP_RANGES |
_SIDD_NEGATIVE_POLARITY);
if (match_len != 0) {
p += match_len;
goto s_n_llhttp__internal__n_header_field_general;
}
ranges = _mm_loadu_si128((__m128i const*) llparse_blob10);
/* Find first character that does not match `ranges` */
match_len = _mm_cmpestri(ranges, 2,
input, 16,
_SIDD_UBYTE_OPS | _SIDD_CMP_RANGES |
_SIDD_NEGATIVE_POLARITY);
if (match_len != 0) {
p += match_len;
goto s_n_llhttp__internal__n_header_field_general;
}
goto s_n_llhttp__internal__n_header_field_general_otherwise;
}
#endif /* __SSE4_2__ */
switch (lookup_table[(uint8_t) *p]) {
case 1: {
p++;
goto s_n_llhttp__internal__n_header_field_general;
}
default: {
goto s_n_llhttp__internal__n_header_field_general_otherwise;
}
}
/* UNREACHABLE */;
abort();
}
case s_n_llhttp__internal__n_header_field_colon:
s_n_llhttp__internal__n_header_field_colon: {
if (p == endp) {
return s_n_llhttp__internal__n_header_field_colon;
}
switch (*p) {
case ' ': {
goto s_n_llhttp__internal__n_invoke_test_flags_4;
}
case ':': {
goto s_n_llhttp__internal__n_span_end_llhttp__on_header_field_1;
}
default: {
goto s_n_llhttp__internal__n_invoke_update_header_state_9;
}
}
/* UNREACHABLE */;
abort();
}
case s_n_llhttp__internal__n_header_field_3:
s_n_llhttp__internal__n_header_field_3: {
llparse_match_t match_seq;
if (p == endp) {
return s_n_llhttp__internal__n_header_field_3;
}
match_seq = llparse__match_sequence_to_lower(state, p, endp, llparse_blob3, 6);
p = match_seq.current;
switch (match_seq.status) {
case kMatchComplete: {
p++;
match = 1;
goto s_n_llhttp__internal__n_invoke_store_header_state;
}
case kMatchPause: {
return s_n_llhttp__internal__n_header_field_3;
}
case kMatchMismatch: {
goto s_n_llhttp__internal__n_invoke_update_header_state_10;
}
}
/* UNREACHABLE */;
abort();
}
case s_n_llhttp__internal__n_header_field_4:
s_n_llhttp__internal__n_header_field_4: {
llparse_match_t match_seq;
if (p == endp) {
return s_n_llhttp__internal__n_header_field_4;
}
match_seq = llparse__match_sequence_to_lower(state, p, endp, llparse_blob11, 10);
p = match_seq.current;
switch (match_seq.status) {
case kMatchComplete: {
p++;
match = 2;
goto s_n_llhttp__internal__n_invoke_store_header_state;
}
case kMatchPause: {
return s_n_llhttp__internal__n_header_field_4;
}
case kMatchMismatch: {
goto s_n_llhttp__internal__n_invoke_update_header_state_10;
}
}
/* UNREACHABLE */;
abort();
}
case s_n_llhttp__internal__n_header_field_2:
s_n_llhttp__internal__n_header_field_2: {
if (p == endp) {
return s_n_llhttp__internal__n_header_field_2;
}
switch (((*p) >= 'A' && (*p) <= 'Z' ? (*p | 0x20) : (*p))) {
case 'n': {
p++;
goto s_n_llhttp__internal__n_header_field_3;
}
case 't': {
p++;
goto s_n_llhttp__internal__n_header_field_4;
}
default: {
goto s_n_llhttp__internal__n_invoke_update_header_state_10;
}
}
/* UNREACHABLE */;
abort();
}
case s_n_llhttp__internal__n_header_field_1:
s_n_llhttp__internal__n_header_field_1: {
llparse_match_t match_seq;
if (p == endp) {
return s_n_llhttp__internal__n_header_field_1;
}
match_seq = llparse__match_sequence_to_lower(state, p, endp, llparse_blob2, 2);
p = match_seq.current;
switch (match_seq.status) {
case kMatchComplete: {
p++;
goto s_n_llhttp__internal__n_header_field_2;
}
case kMatchPause: {
return s_n_llhttp__internal__n_header_field_1;
}
case kMatchMismatch: {
goto s_n_llhttp__internal__n_invoke_update_header_state_10;
}
}
/* UNREACHABLE */;
abort();
}
case s_n_llhttp__internal__n_header_field_5:
s_n_llhttp__internal__n_header_field_5: {
llparse_match_t match_seq;
if (p == endp) {
return s_n_llhttp__internal__n_header_field_5;
}
match_seq = llparse__match_sequence_to_lower(state, p, endp, llparse_blob12, 15);
p = match_seq.current;
switch (match_seq.status) {
case kMatchComplete: {
p++;
match = 1;
goto s_n_llhttp__internal__n_invoke_store_header_state;
}
case kMatchPause: {
return s_n_llhttp__internal__n_header_field_5;
}
case kMatchMismatch: {
goto s_n_llhttp__internal__n_invoke_update_header_state_10;
}
}
/* UNREACHABLE */;
abort();
}
case s_n_llhttp__internal__n_header_field_6:
s_n_llhttp__internal__n_header_field_6: {
llparse_match_t match_seq;
if (p == endp) {
return s_n_llhttp__internal__n_header_field_6;
}
match_seq = llparse__match_sequence_to_lower(state, p, endp, llparse_blob13, 16);
p = match_seq.current;
switch (match_seq.status) {
case kMatchComplete: {
p++;
match = 3;
goto s_n_llhttp__internal__n_invoke_store_header_state;
}
case kMatchPause: {
return s_n_llhttp__internal__n_header_field_6;
}
case kMatchMismatch: {
goto s_n_llhttp__internal__n_invoke_update_header_state_10;
}
}
/* UNREACHABLE */;
abort();
}
case s_n_llhttp__internal__n_header_field_7:
s_n_llhttp__internal__n_header_field_7: {
llparse_match_t match_seq;
if (p == endp) {
return s_n_llhttp__internal__n_header_field_7;
}
match_seq = llparse__match_sequence_to_lower(state, p, endp, llparse_blob14, 6);
p = match_seq.current;
switch (match_seq.status) {
case kMatchComplete: {
p++;
match = 4;
goto s_n_llhttp__internal__n_invoke_store_header_state;
}
case kMatchPause: {
return s_n_llhttp__internal__n_header_field_7;
}
case kMatchMismatch: {
goto s_n_llhttp__internal__n_invoke_update_header_state_10;
}
}
/* UNREACHABLE */;
abort();
}
case s_n_llhttp__internal__n_header_field:
s_n_llhttp__internal__n_header_field: {
if (p == endp) {
return s_n_llhttp__internal__n_header_field;
}
switch (((*p) >= 'A' && (*p) <= 'Z' ? (*p | 0x20) : (*p))) {
case 'c': {
p++;
goto s_n_llhttp__internal__n_header_field_1;
}
case 'p': {
p++;
goto s_n_llhttp__internal__n_header_field_5;
}
case 't': {
p++;
goto s_n_llhttp__internal__n_header_field_6;
}
case 'u': {
p++;
goto s_n_llhttp__internal__n_header_field_7;
}
default: {
goto s_n_llhttp__internal__n_invoke_update_header_state_10;
}
}
/* UNREACHABLE */;
abort();
}
case s_n_llhttp__internal__n_span_start_llhttp__on_header_field:
s_n_llhttp__internal__n_span_start_llhttp__on_header_field: {
if (p == endp) {
return s_n_llhttp__internal__n_span_start_llhttp__on_header_field;
}
state->_span_pos0 = (void*) p;
state->_span_cb0 = llhttp__on_header_field;
goto s_n_llhttp__internal__n_header_field;
/* UNREACHABLE */;
abort();
}
case s_n_llhttp__internal__n_header_field_start:
s_n_llhttp__internal__n_header_field_start: {
if (p == endp) {
return s_n_llhttp__internal__n_header_field_start;
}
switch (*p) {
case 10: {
goto s_n_llhttp__internal__n_headers_almost_done;
}
case 13: {
p++;
goto s_n_llhttp__internal__n_headers_almost_done;
}
default: {
goto s_n_llhttp__internal__n_span_start_llhttp__on_header_field;
}
}
/* UNREACHABLE */;
abort();
}
case s_n_llhttp__internal__n_url_skip_to_http09:
s_n_llhttp__internal__n_url_skip_to_http09: {
if (p == endp) {
return s_n_llhttp__internal__n_url_skip_to_http09;
}
p++;
goto s_n_llhttp__internal__n_invoke_update_http_major;
/* UNREACHABLE */;
abort();
}
case s_n_llhttp__internal__n_url_skip_lf_to_http09:
s_n_llhttp__internal__n_url_skip_lf_to_http09: {
llparse_match_t match_seq;
if (p == endp) {
return s_n_llhttp__internal__n_url_skip_lf_to_http09;
}
match_seq = llparse__match_sequence_id(state, p, endp, llparse_blob15, 2);
p = match_seq.current;
switch (match_seq.status) {
case kMatchComplete: {
p++;
goto s_n_llhttp__internal__n_invoke_update_http_major;
}
case kMatchPause: {
return s_n_llhttp__internal__n_url_skip_lf_to_http09;
}
case kMatchMismatch: {
goto s_n_llhttp__internal__n_error_23;
}
}
/* UNREACHABLE */;
abort();
}
case s_n_llhttp__internal__n_req_http_end_1:
s_n_llhttp__internal__n_req_http_end_1: {
if (p == endp) {
return s_n_llhttp__internal__n_req_http_end_1;
}
switch (*p) {
case 10: {
p++;
goto s_n_llhttp__internal__n_header_field_start;
}
default: {
goto s_n_llhttp__internal__n_error_24;
}
}
/* UNREACHABLE */;
abort();
}
case s_n_llhttp__internal__n_req_http_end:
s_n_llhttp__internal__n_req_http_end: {
if (p == endp) {
return s_n_llhttp__internal__n_req_http_end;
}
switch (*p) {
case 10: {
p++;
goto s_n_llhttp__internal__n_header_field_start;
}
case 13: {
p++;
goto s_n_llhttp__internal__n_req_http_end_1;
}
default: {
goto s_n_llhttp__internal__n_error_24;
}
}
/* UNREACHABLE */;
abort();
}
case s_n_llhttp__internal__n_req_http_minor:
s_n_llhttp__internal__n_req_http_minor: {
if (p == endp) {
return s_n_llhttp__internal__n_req_http_minor;
}
switch (*p) {
case '0': {
p++;
match = 0;
goto s_n_llhttp__internal__n_invoke_store_http_minor;
}
case '1': {
p++;
match = 1;
goto s_n_llhttp__internal__n_invoke_store_http_minor;
}
case '2': {
p++;
match = 2;
goto s_n_llhttp__internal__n_invoke_store_http_minor;
}
case '3': {
p++;
match = 3;
goto s_n_llhttp__internal__n_invoke_store_http_minor;
}
case '4': {
p++;
match = 4;
goto s_n_llhttp__internal__n_invoke_store_http_minor;
}
case '5': {
p++;
match = 5;
goto s_n_llhttp__internal__n_invoke_store_http_minor;
}
case '6': {
p++;
match = 6;
goto s_n_llhttp__internal__n_invoke_store_http_minor;
}
case '7': {
p++;
match = 7;
goto s_n_llhttp__internal__n_invoke_store_http_minor;
}
case '8': {
p++;
match = 8;
goto s_n_llhttp__internal__n_invoke_store_http_minor;
}
case '9': {
p++;
match = 9;
goto s_n_llhttp__internal__n_invoke_store_http_minor;
}
default: {
goto s_n_llhttp__internal__n_error_25;
}
}
/* UNREACHABLE */;
abort();
}
case s_n_llhttp__internal__n_req_http_dot:
s_n_llhttp__internal__n_req_http_dot: {
if (p == endp) {
return s_n_llhttp__internal__n_req_http_dot;
}
switch (*p) {
case '.': {
p++;
goto s_n_llhttp__internal__n_req_http_minor;
}
default: {
goto s_n_llhttp__internal__n_error_26;
}
}
/* UNREACHABLE */;
abort();
}
case s_n_llhttp__internal__n_req_http_major:
s_n_llhttp__internal__n_req_http_major: {
if (p == endp) {
return s_n_llhttp__internal__n_req_http_major;
}
switch (*p) {
case '0': {
p++;
match = 0;
goto s_n_llhttp__internal__n_invoke_store_http_major;
}
case '1': {
p++;
match = 1;
goto s_n_llhttp__internal__n_invoke_store_http_major;
}
case '2': {
p++;
match = 2;
goto s_n_llhttp__internal__n_invoke_store_http_major;
}
case '3': {
p++;
match = 3;
goto s_n_llhttp__internal__n_invoke_store_http_major;
}
case '4': {
p++;
match = 4;
goto s_n_llhttp__internal__n_invoke_store_http_major;
}
case '5': {
p++;
match = 5;
goto s_n_llhttp__internal__n_invoke_store_http_major;
}
case '6': {
p++;
match = 6;
goto s_n_llhttp__internal__n_invoke_store_http_major;
}
case '7': {
p++;
match = 7;
goto s_n_llhttp__internal__n_invoke_store_http_major;
}
case '8': {
p++;
match = 8;
goto s_n_llhttp__internal__n_invoke_store_http_major;
}
case '9': {
p++;
match = 9;
goto s_n_llhttp__internal__n_invoke_store_http_major;
}
default: {
goto s_n_llhttp__internal__n_error_27;
}
}
/* UNREACHABLE */;
abort();
}
case s_n_llhttp__internal__n_req_http_start_1:
s_n_llhttp__internal__n_req_http_start_1: {
llparse_match_t match_seq;
if (p == endp) {
return s_n_llhttp__internal__n_req_http_start_1;
}
match_seq = llparse__match_sequence_id(state, p, endp, llparse_blob16, 4);
p = match_seq.current;
switch (match_seq.status) {
case kMatchComplete: {
p++;
goto s_n_llhttp__internal__n_req_http_major;
}
case kMatchPause: {
return s_n_llhttp__internal__n_req_http_start_1;
}
case kMatchMismatch: {
goto s_n_llhttp__internal__n_error_29;
}
}
/* UNREACHABLE */;
abort();
}
case s_n_llhttp__internal__n_req_http_start_2:
s_n_llhttp__internal__n_req_http_start_2: {
llparse_match_t match_seq;
if (p == endp) {
return s_n_llhttp__internal__n_req_http_start_2;
}
match_seq = llparse__match_sequence_id(state, p, endp, llparse_blob17, 3);
p = match_seq.current;
switch (match_seq.status) {
case kMatchComplete: {
p++;
goto s_n_llhttp__internal__n_invoke_is_equal_method_1;
}
case kMatchPause: {
return s_n_llhttp__internal__n_req_http_start_2;
}
case kMatchMismatch: {
goto s_n_llhttp__internal__n_error_29;
}
}
/* UNREACHABLE */;
abort();
}
case s_n_llhttp__internal__n_req_http_start:
s_n_llhttp__internal__n_req_http_start: {
if (p == endp) {
return s_n_llhttp__internal__n_req_http_start;
}
switch (*p) {
case ' ': {
p++;
goto s_n_llhttp__internal__n_req_http_start;
}
case 'H': {
p++;
goto s_n_llhttp__internal__n_req_http_start_1;
}
case 'I': {
p++;
goto s_n_llhttp__internal__n_req_http_start_2;
}
default: {
goto s_n_llhttp__internal__n_error_29;
}
}
/* UNREACHABLE */;
abort();
}
case s_n_llhttp__internal__n_url_skip_to_http:
s_n_llhttp__internal__n_url_skip_to_http: {
if (p == endp) {
return s_n_llhttp__internal__n_url_skip_to_http;
}
p++;
goto s_n_llhttp__internal__n_req_http_start;
/* UNREACHABLE */;
abort();
}
case s_n_llhttp__internal__n_url_fragment:
s_n_llhttp__internal__n_url_fragment: {
static uint8_t lookup_table[] = {
0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 0, 1, 3, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1
};
if (p == endp) {
return s_n_llhttp__internal__n_url_fragment;
}
switch (lookup_table[(uint8_t) *p]) {
case 1: {
p++;
goto s_n_llhttp__internal__n_url_fragment;
}
case 2: {
goto s_n_llhttp__internal__n_span_end_llhttp__on_url_6;
}
case 3: {
goto s_n_llhttp__internal__n_span_end_llhttp__on_url_7;
}
case 4: {
goto s_n_llhttp__internal__n_span_end_llhttp__on_url_8;
}
default: {
goto s_n_llhttp__internal__n_error_30;
}
}
/* UNREACHABLE */;
abort();
}
case s_n_llhttp__internal__n_span_end_stub_query_3:
s_n_llhttp__internal__n_span_end_stub_query_3: {
if (p == endp) {
return s_n_llhttp__internal__n_span_end_stub_query_3;
}
p++;
goto s_n_llhttp__internal__n_url_fragment;
/* UNREACHABLE */;
abort();
}
case s_n_llhttp__internal__n_url_query:
s_n_llhttp__internal__n_url_query: {
static uint8_t lookup_table[] = {
0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 0, 1, 3, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4, 1, 1, 5, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1
};
if (p == endp) {
return s_n_llhttp__internal__n_url_query;
}
switch (lookup_table[(uint8_t) *p]) {
case 1: {
p++;
goto s_n_llhttp__internal__n_url_query;
}
case 2: {
goto s_n_llhttp__internal__n_span_end_llhttp__on_url_9;
}
case 3: {
goto s_n_llhttp__internal__n_span_end_llhttp__on_url_10;
}
case 4: {
goto s_n_llhttp__internal__n_span_end_llhttp__on_url_11;
}
case 5: {
goto s_n_llhttp__internal__n_span_end_stub_query_3;
}
default: {
goto s_n_llhttp__internal__n_error_31;
}
}
/* UNREACHABLE */;
abort();
}
case s_n_llhttp__internal__n_url_query_or_fragment:
s_n_llhttp__internal__n_url_query_or_fragment: {
if (p == endp) {
return s_n_llhttp__internal__n_url_query_or_fragment;
}
switch (*p) {
case 10: {
goto s_n_llhttp__internal__n_span_end_llhttp__on_url_3;
}
case 13: {
goto s_n_llhttp__internal__n_span_end_llhttp__on_url_4;
}
case ' ': {
goto s_n_llhttp__internal__n_span_end_llhttp__on_url_5;
}
case '#': {
p++;
goto s_n_llhttp__internal__n_url_fragment;
}
case '?': {
p++;
goto s_n_llhttp__internal__n_url_query;
}
default: {
goto s_n_llhttp__internal__n_error_32;
}
}
/* UNREACHABLE */;
abort();
}
case s_n_llhttp__internal__n_url_path:
s_n_llhttp__internal__n_url_path: {
static uint8_t lookup_table[] = {
0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1
};
if (p == endp) {
return s_n_llhttp__internal__n_url_path;
}
#ifdef __SSE4_2__
if (endp - p >= 16) {
__m128i ranges;
__m128i input;
int avail;
int match_len;
/* Load input */
input = _mm_loadu_si128((__m128i const*) p);
ranges = _mm_loadu_si128((__m128i const*) llparse_blob1);
/* Find first character that does not match `ranges` */
match_len = _mm_cmpestri(ranges, 12,
input, 16,
_SIDD_UBYTE_OPS | _SIDD_CMP_RANGES |
_SIDD_NEGATIVE_POLARITY);
if (match_len != 0) {
p += match_len;
goto s_n_llhttp__internal__n_url_path;
}
goto s_n_llhttp__internal__n_url_query_or_fragment;
}
#endif /* __SSE4_2__ */
switch (lookup_table[(uint8_t) *p]) {
case 1: {
p++;
goto s_n_llhttp__internal__n_url_path;
}
default: {
goto s_n_llhttp__internal__n_url_query_or_fragment;
}
}
/* UNREACHABLE */;
abort();
}
case s_n_llhttp__internal__n_span_start_stub_path_2:
s_n_llhttp__internal__n_span_start_stub_path_2: {
if (p == endp) {
return s_n_llhttp__internal__n_span_start_stub_path_2;
}
p++;
goto s_n_llhttp__internal__n_url_path;
/* UNREACHABLE */;
abort();
}
case s_n_llhttp__internal__n_span_start_stub_path:
s_n_llhttp__internal__n_span_start_stub_path: {
if (p == endp) {
return s_n_llhttp__internal__n_span_start_stub_path;
}
p++;
goto s_n_llhttp__internal__n_url_path;
/* UNREACHABLE */;
abort();
}
case s_n_llhttp__internal__n_span_start_stub_path_1:
s_n_llhttp__internal__n_span_start_stub_path_1: {
if (p == endp) {
return s_n_llhttp__internal__n_span_start_stub_path_1;
}
p++;
goto s_n_llhttp__internal__n_url_path;
/* UNREACHABLE */;
abort();
}
case s_n_llhttp__internal__n_url_server_with_at:
s_n_llhttp__internal__n_url_server_with_at: {
static uint8_t lookup_table[] = {
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 2, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
3, 4, 0, 0, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 5,
4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 0, 4, 0, 6,
7, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 0, 4, 0, 4,
0, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 0, 0, 0, 4, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
};
if (p == endp) {
return s_n_llhttp__internal__n_url_server_with_at;
}
switch (lookup_table[(uint8_t) *p]) {
case 1: {
goto s_n_llhttp__internal__n_span_end_llhttp__on_url_12;
}
case 2: {
goto s_n_llhttp__internal__n_span_end_llhttp__on_url_13;
}
case 3: {
goto s_n_llhttp__internal__n_span_end_llhttp__on_url_14;
}
case 4: {
p++;
goto s_n_llhttp__internal__n_url_server;
}
case 5: {
goto s_n_llhttp__internal__n_span_start_stub_path_1;
}
case 6: {
p++;
goto s_n_llhttp__internal__n_url_query;
}
case 7: {
p++;
goto s_n_llhttp__internal__n_error_33;
}
default: {
goto s_n_llhttp__internal__n_error_34;
}
}
/* UNREACHABLE */;
abort();
}
case s_n_llhttp__internal__n_url_server:
s_n_llhttp__internal__n_url_server: {
static uint8_t lookup_table[] = {
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 2, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
3, 4, 0, 0, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 5,
4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 0, 4, 0, 6,
7, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 0, 4, 0, 4,
0, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 0, 0, 0, 4, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
};
if (p == endp) {
return s_n_llhttp__internal__n_url_server;
}
switch (lookup_table[(uint8_t) *p]) {
case 1: {
goto s_n_llhttp__internal__n_span_end_llhttp__on_url;
}
case 2: {
goto s_n_llhttp__internal__n_span_end_llhttp__on_url_1;
}
case 3: {
goto s_n_llhttp__internal__n_span_end_llhttp__on_url_2;
}
case 4: {
p++;
goto s_n_llhttp__internal__n_url_server;
}
case 5: {
goto s_n_llhttp__internal__n_span_start_stub_path;
}
case 6: {
p++;
goto s_n_llhttp__internal__n_url_query;
}
case 7: {
p++;
goto s_n_llhttp__internal__n_url_server_with_at;
}
default: {
goto s_n_llhttp__internal__n_error_35;
}
}
/* UNREACHABLE */;
abort();
}
case s_n_llhttp__internal__n_url_schema_delim_1:
s_n_llhttp__internal__n_url_schema_delim_1: {
if (p == endp) {
return s_n_llhttp__internal__n_url_schema_delim_1;
}
switch (*p) {
case '/': {
p++;
goto s_n_llhttp__internal__n_url_server;
}
default: {
goto s_n_llhttp__internal__n_error_37;
}
}
/* UNREACHABLE */;
abort();
}
case s_n_llhttp__internal__n_url_schema_delim:
s_n_llhttp__internal__n_url_schema_delim: {
if (p == endp) {
return s_n_llhttp__internal__n_url_schema_delim;
}
switch (*p) {
case 10: {
p++;
goto s_n_llhttp__internal__n_error_36;
}
case 13: {
p++;
goto s_n_llhttp__internal__n_error_36;
}
case ' ': {
p++;
goto s_n_llhttp__internal__n_error_36;
}
case '/': {
p++;
goto s_n_llhttp__internal__n_url_schema_delim_1;
}
default: {
goto s_n_llhttp__internal__n_error_37;
}
}
/* UNREACHABLE */;
abort();
}
case s_n_llhttp__internal__n_span_end_stub_schema:
s_n_llhttp__internal__n_span_end_stub_schema: {
if (p == endp) {
return s_n_llhttp__internal__n_span_end_stub_schema;
}
p++;
goto s_n_llhttp__internal__n_url_schema_delim;
/* UNREACHABLE */;
abort();
}
case s_n_llhttp__internal__n_url_schema:
s_n_llhttp__internal__n_url_schema: {
static uint8_t lookup_table[] = {
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0,
0, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0,
0, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
};
if (p == endp) {
return s_n_llhttp__internal__n_url_schema;
}
switch (lookup_table[(uint8_t) *p]) {
case 1: {
p++;
goto s_n_llhttp__internal__n_error_36;
}
case 2: {
goto s_n_llhttp__internal__n_span_end_stub_schema;
}
case 3: {
p++;
goto s_n_llhttp__internal__n_url_schema;
}
default: {
goto s_n_llhttp__internal__n_error_38;
}
}
/* UNREACHABLE */;
abort();
}
case s_n_llhttp__internal__n_url_start:
s_n_llhttp__internal__n_url_start: {
static uint8_t lookup_table[] = {
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 2,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0,
0, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
};
if (p == endp) {
return s_n_llhttp__internal__n_url_start;
}
switch (lookup_table[(uint8_t) *p]) {
case 1: {
p++;
goto s_n_llhttp__internal__n_error_36;
}
case 2: {
goto s_n_llhttp__internal__n_span_start_stub_path_2;
}
case 3: {
goto s_n_llhttp__internal__n_url_schema;
}
default: {
goto s_n_llhttp__internal__n_error_39;
}
}
/* UNREACHABLE */;
abort();
}
case s_n_llhttp__internal__n_span_start_llhttp__on_url_1:
s_n_llhttp__internal__n_span_start_llhttp__on_url_1: {
if (p == endp) {
return s_n_llhttp__internal__n_span_start_llhttp__on_url_1;
}
state->_span_pos0 = (void*) p;
state->_span_cb0 = llhttp__on_url;
goto s_n_llhttp__internal__n_url_start;
/* UNREACHABLE */;
abort();
}
case s_n_llhttp__internal__n_span_start_llhttp__on_url:
s_n_llhttp__internal__n_span_start_llhttp__on_url: {
if (p == endp) {
return s_n_llhttp__internal__n_span_start_llhttp__on_url;
}
state->_span_pos0 = (void*) p;
state->_span_cb0 = llhttp__on_url;
goto s_n_llhttp__internal__n_url_server;
/* UNREACHABLE */;
abort();
}
case s_n_llhttp__internal__n_req_spaces_before_url:
s_n_llhttp__internal__n_req_spaces_before_url: {
if (p == endp) {
return s_n_llhttp__internal__n_req_spaces_before_url;
}
switch (*p) {
case ' ': {
p++;
goto s_n_llhttp__internal__n_req_spaces_before_url;
}
default: {
goto s_n_llhttp__internal__n_invoke_is_equal_method;
}
}
/* UNREACHABLE */;
abort();
}
case s_n_llhttp__internal__n_req_first_space_before_url:
s_n_llhttp__internal__n_req_first_space_before_url: {
if (p == endp) {
return s_n_llhttp__internal__n_req_first_space_before_url;
}
switch (*p) {
case ' ': {
p++;
goto s_n_llhttp__internal__n_req_spaces_before_url;
}
default: {
goto s_n_llhttp__internal__n_error_40;
}
}
/* UNREACHABLE */;
abort();
}
case s_n_llhttp__internal__n_start_req_1:
s_n_llhttp__internal__n_start_req_1: {
llparse_match_t match_seq;
if (p == endp) {
return s_n_llhttp__internal__n_start_req_1;
}
match_seq = llparse__match_sequence_id(state, p, endp, llparse_blob0, 2);
p = match_seq.current;
switch (match_seq.status) {
case kMatchComplete: {
p++;
match = 19;
goto s_n_llhttp__internal__n_invoke_store_method_1;
}
case kMatchPause: {
return s_n_llhttp__internal__n_start_req_1;
}
case kMatchMismatch: {
goto s_n_llhttp__internal__n_error_48;
}
}
/* UNREACHABLE */;
abort();
}
case s_n_llhttp__internal__n_start_req_2:
s_n_llhttp__internal__n_start_req_2: {
llparse_match_t match_seq;
if (p == endp) {
return s_n_llhttp__internal__n_start_req_2;
}
match_seq = llparse__match_sequence_id(state, p, endp, llparse_blob18, 3);
p = match_seq.current;
switch (match_seq.status) {
case kMatchComplete: {
p++;
match = 16;
goto s_n_llhttp__internal__n_invoke_store_method_1;
}
case kMatchPause: {
return s_n_llhttp__internal__n_start_req_2;
}
case kMatchMismatch: {
goto s_n_llhttp__internal__n_error_48;
}
}
/* UNREACHABLE */;
abort();
}
case s_n_llhttp__internal__n_start_req_4:
s_n_llhttp__internal__n_start_req_4: {
llparse_match_t match_seq;
if (p == endp) {
return s_n_llhttp__internal__n_start_req_4;
}
match_seq = llparse__match_sequence_id(state, p, endp, llparse_blob19, 6);
p = match_seq.current;
switch (match_seq.status) {
case kMatchComplete: {
p++;
match = 22;
goto s_n_llhttp__internal__n_invoke_store_method_1;
}
case kMatchPause: {
return s_n_llhttp__internal__n_start_req_4;
}
case kMatchMismatch: {
goto s_n_llhttp__internal__n_error_48;
}
}
/* UNREACHABLE */;
abort();
}
case s_n_llhttp__internal__n_start_req_6:
s_n_llhttp__internal__n_start_req_6: {
llparse_match_t match_seq;
if (p == endp) {
return s_n_llhttp__internal__n_start_req_6;
}
match_seq = llparse__match_sequence_id(state, p, endp, llparse_blob20, 4);
p = match_seq.current;
switch (match_seq.status) {
case kMatchComplete: {
p++;
match = 5;
goto s_n_llhttp__internal__n_invoke_store_method_1;
}
case kMatchPause: {
return s_n_llhttp__internal__n_start_req_6;
}
case kMatchMismatch: {
goto s_n_llhttp__internal__n_error_48;
}
}
/* UNREACHABLE */;
abort();
}
case s_n_llhttp__internal__n_start_req_7:
s_n_llhttp__internal__n_start_req_7: {
if (p == endp) {
return s_n_llhttp__internal__n_start_req_7;
}
switch (*p) {
case 'Y': {
p++;
match = 8;
goto s_n_llhttp__internal__n_invoke_store_method_1;
}
default: {
goto s_n_llhttp__internal__n_error_48;
}
}
/* UNREACHABLE */;
abort();
}
case s_n_llhttp__internal__n_start_req_5:
s_n_llhttp__internal__n_start_req_5: {
if (p == endp) {
return s_n_llhttp__internal__n_start_req_5;
}
switch (*p) {
case 'N': {
p++;
goto s_n_llhttp__internal__n_start_req_6;
}
case 'P': {
p++;
goto s_n_llhttp__internal__n_start_req_7;
}
default: {
goto s_n_llhttp__internal__n_error_48;
}
}
/* UNREACHABLE */;
abort();
}
case s_n_llhttp__internal__n_start_req_3:
s_n_llhttp__internal__n_start_req_3: {
if (p == endp) {
return s_n_llhttp__internal__n_start_req_3;
}
switch (*p) {
case 'H': {
p++;
goto s_n_llhttp__internal__n_start_req_4;
}
case 'O': {
p++;
goto s_n_llhttp__internal__n_start_req_5;
}
default: {
goto s_n_llhttp__internal__n_error_48;
}
}
/* UNREACHABLE */;
abort();
}
case s_n_llhttp__internal__n_start_req_8:
s_n_llhttp__internal__n_start_req_8: {
llparse_match_t match_seq;
if (p == endp) {
return s_n_llhttp__internal__n_start_req_8;
}
match_seq = llparse__match_sequence_id(state, p, endp, llparse_blob21, 5);
p = match_seq.current;
switch (match_seq.status) {
case kMatchComplete: {
p++;
match = 0;
goto s_n_llhttp__internal__n_invoke_store_method_1;
}
case kMatchPause: {
return s_n_llhttp__internal__n_start_req_8;
}
case kMatchMismatch: {
goto s_n_llhttp__internal__n_error_48;
}
}
/* UNREACHABLE */;
abort();
}
case s_n_llhttp__internal__n_start_req_9:
s_n_llhttp__internal__n_start_req_9: {
llparse_match_t match_seq;
if (p == endp) {
return s_n_llhttp__internal__n_start_req_9;
}
match_seq = llparse__match_sequence_id(state, p, endp, llparse_blob22, 2);
p = match_seq.current;
switch (match_seq.status) {
case kMatchComplete: {
p++;
match = 1;
goto s_n_llhttp__internal__n_invoke_store_method_1;
}
case kMatchPause: {
return s_n_llhttp__internal__n_start_req_9;
}
case kMatchMismatch: {
goto s_n_llhttp__internal__n_error_48;
}
}
/* UNREACHABLE */;
abort();
}
case s_n_llhttp__internal__n_start_req_10:
s_n_llhttp__internal__n_start_req_10: {
llparse_match_t match_seq;
if (p == endp) {
return s_n_llhttp__internal__n_start_req_10;
}
match_seq = llparse__match_sequence_id(state, p, endp, llparse_blob23, 3);
p = match_seq.current;
switch (match_seq.status) {
case kMatchComplete: {
p++;
match = 2;
goto s_n_llhttp__internal__n_invoke_store_method_1;
}
case kMatchPause: {
return s_n_llhttp__internal__n_start_req_10;
}
case kMatchMismatch: {
goto s_n_llhttp__internal__n_error_48;
}
}
/* UNREACHABLE */;
abort();
}
case s_n_llhttp__internal__n_start_req_12:
s_n_llhttp__internal__n_start_req_12: {
llparse_match_t match_seq;
if (p == endp) {
return s_n_llhttp__internal__n_start_req_12;
}
match_seq = llparse__match_sequence_id(state, p, endp, llparse_blob24, 2);
p = match_seq.current;
switch (match_seq.status) {
case kMatchComplete: {
p++;
match = 31;
goto s_n_llhttp__internal__n_invoke_store_method_1;
}
case kMatchPause: {
return s_n_llhttp__internal__n_start_req_12;
}
case kMatchMismatch: {
goto s_n_llhttp__internal__n_error_48;
}
}
/* UNREACHABLE */;
abort();
}
case s_n_llhttp__internal__n_start_req_13:
s_n_llhttp__internal__n_start_req_13: {
llparse_match_t match_seq;
if (p == endp) {
return s_n_llhttp__internal__n_start_req_13;
}
match_seq = llparse__match_sequence_id(state, p, endp, llparse_blob25, 2);
p = match_seq.current;
switch (match_seq.status) {
case kMatchComplete: {
p++;
match = 9;
goto s_n_llhttp__internal__n_invoke_store_method_1;
}
case kMatchPause: {
return s_n_llhttp__internal__n_start_req_13;
}
case kMatchMismatch: {
goto s_n_llhttp__internal__n_error_48;
}
}
/* UNREACHABLE */;
abort();
}
case s_n_llhttp__internal__n_start_req_11:
s_n_llhttp__internal__n_start_req_11: {
if (p == endp) {
return s_n_llhttp__internal__n_start_req_11;
}
switch (*p) {
case 'I': {
p++;
goto s_n_llhttp__internal__n_start_req_12;
}
case 'O': {
p++;
goto s_n_llhttp__internal__n_start_req_13;
}
default: {
goto s_n_llhttp__internal__n_error_48;
}
}
/* UNREACHABLE */;
abort();
}
case s_n_llhttp__internal__n_start_req_15:
s_n_llhttp__internal__n_start_req_15: {
llparse_match_t match_seq;
if (p == endp) {
return s_n_llhttp__internal__n_start_req_15;
}
match_seq = llparse__match_sequence_id(state, p, endp, llparse_blob26, 6);
p = match_seq.current;
switch (match_seq.status) {
case kMatchComplete: {
p++;
match = 24;
goto s_n_llhttp__internal__n_invoke_store_method_1;
}
case kMatchPause: {
return s_n_llhttp__internal__n_start_req_15;
}
case kMatchMismatch: {
goto s_n_llhttp__internal__n_error_48;
}
}
/* UNREACHABLE */;
abort();
}
case s_n_llhttp__internal__n_start_req_16:
s_n_llhttp__internal__n_start_req_16: {
llparse_match_t match_seq;
if (p == endp) {
return s_n_llhttp__internal__n_start_req_16;
}
match_seq = llparse__match_sequence_id(state, p, endp, llparse_blob27, 3);
p = match_seq.current;
switch (match_seq.status) {
case kMatchComplete: {
p++;
match = 23;
goto s_n_llhttp__internal__n_invoke_store_method_1;
}
case kMatchPause: {
return s_n_llhttp__internal__n_start_req_16;
}
case kMatchMismatch: {
goto s_n_llhttp__internal__n_error_48;
}
}
/* UNREACHABLE */;
abort();
}
case s_n_llhttp__internal__n_start_req_18:
s_n_llhttp__internal__n_start_req_18: {
llparse_match_t match_seq;
if (p == endp) {
return s_n_llhttp__internal__n_start_req_18;
}
match_seq = llparse__match_sequence_id(state, p, endp, llparse_blob28, 7);
p = match_seq.current;
switch (match_seq.status) {
case kMatchComplete: {
p++;
match = 21;
goto s_n_llhttp__internal__n_invoke_store_method_1;
}
case kMatchPause: {
return s_n_llhttp__internal__n_start_req_18;
}
case kMatchMismatch: {
goto s_n_llhttp__internal__n_error_48;
}
}
/* UNREACHABLE */;
abort();
}
case s_n_llhttp__internal__n_start_req_20:
s_n_llhttp__internal__n_start_req_20: {
llparse_match_t match_seq;
if (p == endp) {
return s_n_llhttp__internal__n_start_req_20;
}
match_seq = llparse__match_sequence_id(state, p, endp, llparse_blob29, 6);
p = match_seq.current;
switch (match_seq.status) {
case kMatchComplete: {
p++;
match = 30;
goto s_n_llhttp__internal__n_invoke_store_method_1;
}
case kMatchPause: {
return s_n_llhttp__internal__n_start_req_20;
}
case kMatchMismatch: {
goto s_n_llhttp__internal__n_error_48;
}
}
/* UNREACHABLE */;
abort();
}
case s_n_llhttp__internal__n_start_req_21:
s_n_llhttp__internal__n_start_req_21: {
if (p == endp) {
return s_n_llhttp__internal__n_start_req_21;
}
switch (*p) {
case 'L': {
p++;
match = 10;
goto s_n_llhttp__internal__n_invoke_store_method_1;
}
default: {
goto s_n_llhttp__internal__n_error_48;
}
}
/* UNREACHABLE */;
abort();
}
case s_n_llhttp__internal__n_start_req_19:
s_n_llhttp__internal__n_start_req_19: {
if (p == endp) {
return s_n_llhttp__internal__n_start_req_19;
}
switch (*p) {
case 'A': {
p++;
goto s_n_llhttp__internal__n_start_req_20;
}
case 'O': {
p++;
goto s_n_llhttp__internal__n_start_req_21;
}
default: {
goto s_n_llhttp__internal__n_error_48;
}
}
/* UNREACHABLE */;
abort();
}
case s_n_llhttp__internal__n_start_req_17:
s_n_llhttp__internal__n_start_req_17: {
if (p == endp) {
return s_n_llhttp__internal__n_start_req_17;
}
switch (*p) {
case 'A': {
p++;
goto s_n_llhttp__internal__n_start_req_18;
}
case 'C': {
p++;
goto s_n_llhttp__internal__n_start_req_19;
}
default: {
goto s_n_llhttp__internal__n_error_48;
}
}
/* UNREACHABLE */;
abort();
}
case s_n_llhttp__internal__n_start_req_22:
s_n_llhttp__internal__n_start_req_22: {
llparse_match_t match_seq;
if (p == endp) {
return s_n_llhttp__internal__n_start_req_22;
}
match_seq = llparse__match_sequence_id(state, p, endp, llparse_blob30, 2);
p = match_seq.current;
switch (match_seq.status) {
case kMatchComplete: {
p++;
match = 11;
goto s_n_llhttp__internal__n_invoke_store_method_1;
}
case kMatchPause: {
return s_n_llhttp__internal__n_start_req_22;
}
case kMatchMismatch: {
goto s_n_llhttp__internal__n_error_48;
}
}
/* UNREACHABLE */;
abort();
}
case s_n_llhttp__internal__n_start_req_14:
s_n_llhttp__internal__n_start_req_14: {
if (p == endp) {
return s_n_llhttp__internal__n_start_req_14;
}
switch (*p) {
case '-': {
p++;
goto s_n_llhttp__internal__n_start_req_15;
}
case 'E': {
p++;
goto s_n_llhttp__internal__n_start_req_16;
}
case 'K': {
p++;
goto s_n_llhttp__internal__n_start_req_17;
}
case 'O': {
p++;
goto s_n_llhttp__internal__n_start_req_22;
}
default: {
goto s_n_llhttp__internal__n_error_48;
}
}
/* UNREACHABLE */;
abort();
}
case s_n_llhttp__internal__n_start_req_23:
s_n_llhttp__internal__n_start_req_23: {
llparse_match_t match_seq;
if (p == endp) {
return s_n_llhttp__internal__n_start_req_23;
}
match_seq = llparse__match_sequence_id(state, p, endp, llparse_blob31, 5);
p = match_seq.current;
switch (match_seq.status) {
case kMatchComplete: {
p++;
match = 25;
goto s_n_llhttp__internal__n_invoke_store_method_1;
}
case kMatchPause: {
return s_n_llhttp__internal__n_start_req_23;
}
case kMatchMismatch: {
goto s_n_llhttp__internal__n_error_48;
}
}
/* UNREACHABLE */;
abort();
}
case s_n_llhttp__internal__n_start_req_24:
s_n_llhttp__internal__n_start_req_24: {
llparse_match_t match_seq;
if (p == endp) {
return s_n_llhttp__internal__n_start_req_24;
}
match_seq = llparse__match_sequence_id(state, p, endp, llparse_blob32, 6);
p = match_seq.current;
switch (match_seq.status) {
case kMatchComplete: {
p++;
match = 6;
goto s_n_llhttp__internal__n_invoke_store_method_1;
}
case kMatchPause: {
return s_n_llhttp__internal__n_start_req_24;
}
case kMatchMismatch: {
goto s_n_llhttp__internal__n_error_48;
}
}
/* UNREACHABLE */;
abort();
}
case s_n_llhttp__internal__n_start_req_26:
s_n_llhttp__internal__n_start_req_26: {
llparse_match_t match_seq;
if (p == endp) {
return s_n_llhttp__internal__n_start_req_26;
}
match_seq = llparse__match_sequence_id(state, p, endp, llparse_blob33, 3);
p = match_seq.current;
switch (match_seq.status) {
case kMatchComplete: {
p++;
match = 28;
goto s_n_llhttp__internal__n_invoke_store_method_1;
}
case kMatchPause: {
return s_n_llhttp__internal__n_start_req_26;
}
case kMatchMismatch: {
goto s_n_llhttp__internal__n_error_48;
}
}
/* UNREACHABLE */;
abort();
}
case s_n_llhttp__internal__n_start_req_27:
s_n_llhttp__internal__n_start_req_27: {
llparse_match_t match_seq;
if (p == endp) {
return s_n_llhttp__internal__n_start_req_27;
}
match_seq = llparse__match_sequence_id(state, p, endp, llparse_blob34, 2);
p = match_seq.current;
switch (match_seq.status) {
case kMatchComplete: {
p++;
match = 3;
goto s_n_llhttp__internal__n_invoke_store_method_1;
}
case kMatchPause: {
return s_n_llhttp__internal__n_start_req_27;
}
case kMatchMismatch: {
goto s_n_llhttp__internal__n_error_48;
}
}
/* UNREACHABLE */;
abort();
}
case s_n_llhttp__internal__n_start_req_31:
s_n_llhttp__internal__n_start_req_31: {
llparse_match_t match_seq;
if (p == endp) {
return s_n_llhttp__internal__n_start_req_31;
}
match_seq = llparse__match_sequence_id(state, p, endp, llparse_blob35, 3);
p = match_seq.current;
switch (match_seq.status) {
case kMatchComplete: {
p++;
match = 12;
goto s_n_llhttp__internal__n_invoke_store_method_1;
}
case kMatchPause: {
return s_n_llhttp__internal__n_start_req_31;
}
case kMatchMismatch: {
goto s_n_llhttp__internal__n_error_48;
}
}
/* UNREACHABLE */;
abort();
}
case s_n_llhttp__internal__n_start_req_32:
s_n_llhttp__internal__n_start_req_32: {
llparse_match_t match_seq;
if (p == endp) {
return s_n_llhttp__internal__n_start_req_32;
}
match_seq = llparse__match_sequence_id(state, p, endp, llparse_blob36, 4);
p = match_seq.current;
switch (match_seq.status) {
case kMatchComplete: {
p++;
match = 13;
goto s_n_llhttp__internal__n_invoke_store_method_1;
}
case kMatchPause: {
return s_n_llhttp__internal__n_start_req_32;
}
case kMatchMismatch: {
goto s_n_llhttp__internal__n_error_48;
}
}
/* UNREACHABLE */;
abort();
}
case s_n_llhttp__internal__n_start_req_30:
s_n_llhttp__internal__n_start_req_30: {
if (p == endp) {
return s_n_llhttp__internal__n_start_req_30;
}
switch (*p) {
case 'F': {
p++;
goto s_n_llhttp__internal__n_start_req_31;
}
case 'P': {
p++;
goto s_n_llhttp__internal__n_start_req_32;
}
default: {
goto s_n_llhttp__internal__n_error_48;
}
}
/* UNREACHABLE */;
abort();
}
case s_n_llhttp__internal__n_start_req_29:
s_n_llhttp__internal__n_start_req_29: {
if (p == endp) {
return s_n_llhttp__internal__n_start_req_29;
}
switch (*p) {
case 'P': {
p++;
goto s_n_llhttp__internal__n_start_req_30;
}
default: {
goto s_n_llhttp__internal__n_error_48;
}
}
/* UNREACHABLE */;
abort();
}
case s_n_llhttp__internal__n_start_req_28:
s_n_llhttp__internal__n_start_req_28: {
if (p == endp) {
return s_n_llhttp__internal__n_start_req_28;
}
switch (*p) {
case 'I': {
p++;
match = 34;
goto s_n_llhttp__internal__n_invoke_store_method_1;
}
case 'O': {
p++;
goto s_n_llhttp__internal__n_start_req_29;
}
default: {
goto s_n_llhttp__internal__n_error_48;
}
}
/* UNREACHABLE */;
abort();
}
case s_n_llhttp__internal__n_start_req_34:
s_n_llhttp__internal__n_start_req_34: {
llparse_match_t match_seq;
if (p == endp) {
return s_n_llhttp__internal__n_start_req_34;
}
match_seq = llparse__match_sequence_id(state, p, endp, llparse_blob37, 2);
p = match_seq.current;
switch (match_seq.status) {
case kMatchComplete: {
p++;
match = 29;
goto s_n_llhttp__internal__n_invoke_store_method_1;
}
case kMatchPause: {
return s_n_llhttp__internal__n_start_req_34;
}
case kMatchMismatch: {
goto s_n_llhttp__internal__n_error_48;
}
}
/* UNREACHABLE */;
abort();
}
case s_n_llhttp__internal__n_start_req_33:
s_n_llhttp__internal__n_start_req_33: {
if (p == endp) {
return s_n_llhttp__internal__n_start_req_33;
}
switch (*p) {
case 'R': {
p++;
goto s_n_llhttp__internal__n_start_req_34;
}
case 'T': {
p++;
match = 4;
goto s_n_llhttp__internal__n_invoke_store_method_1;
}
default: {
goto s_n_llhttp__internal__n_error_48;
}
}
/* UNREACHABLE */;
abort();
}
case s_n_llhttp__internal__n_start_req_25:
s_n_llhttp__internal__n_start_req_25: {
if (p == endp) {
return s_n_llhttp__internal__n_start_req_25;
}
switch (*p) {
case 'A': {
p++;
goto s_n_llhttp__internal__n_start_req_26;
}
case 'O': {
p++;
goto s_n_llhttp__internal__n_start_req_27;
}
case 'R': {
p++;
goto s_n_llhttp__internal__n_start_req_28;
}
case 'U': {
p++;
goto s_n_llhttp__internal__n_start_req_33;
}
default: {
goto s_n_llhttp__internal__n_error_48;
}
}
/* UNREACHABLE */;
abort();
}
case s_n_llhttp__internal__n_start_req_37:
s_n_llhttp__internal__n_start_req_37: {
llparse_match_t match_seq;
if (p == endp) {
return s_n_llhttp__internal__n_start_req_37;
}
match_seq = llparse__match_sequence_id(state, p, endp, llparse_blob38, 3);
p = match_seq.current;
switch (match_seq.status) {
case kMatchComplete: {
p++;
match = 17;
goto s_n_llhttp__internal__n_invoke_store_method_1;
}
case kMatchPause: {
return s_n_llhttp__internal__n_start_req_37;
}
case kMatchMismatch: {
goto s_n_llhttp__internal__n_error_48;
}
}
/* UNREACHABLE */;
abort();
}
case s_n_llhttp__internal__n_start_req_38:
s_n_llhttp__internal__n_start_req_38: {
llparse_match_t match_seq;
if (p == endp) {
return s_n_llhttp__internal__n_start_req_38;
}
match_seq = llparse__match_sequence_id(state, p, endp, llparse_blob39, 3);
p = match_seq.current;
switch (match_seq.status) {
case kMatchComplete: {
p++;
match = 20;
goto s_n_llhttp__internal__n_invoke_store_method_1;
}
case kMatchPause: {
return s_n_llhttp__internal__n_start_req_38;
}
case kMatchMismatch: {
goto s_n_llhttp__internal__n_error_48;
}
}
/* UNREACHABLE */;
abort();
}
case s_n_llhttp__internal__n_start_req_36:
s_n_llhttp__internal__n_start_req_36: {
if (p == endp) {
return s_n_llhttp__internal__n_start_req_36;
}
switch (*p) {
case 'B': {
p++;
goto s_n_llhttp__internal__n_start_req_37;
}
case 'P': {
p++;
goto s_n_llhttp__internal__n_start_req_38;
}
default: {
goto s_n_llhttp__internal__n_error_48;
}
}
/* UNREACHABLE */;
abort();
}
case s_n_llhttp__internal__n_start_req_35:
s_n_llhttp__internal__n_start_req_35: {
if (p == endp) {
return s_n_llhttp__internal__n_start_req_35;
}
switch (*p) {
case 'E': {
p++;
goto s_n_llhttp__internal__n_start_req_36;
}
default: {
goto s_n_llhttp__internal__n_error_48;
}
}
/* UNREACHABLE */;
abort();
}
case s_n_llhttp__internal__n_start_req_40:
s_n_llhttp__internal__n_start_req_40: {
llparse_match_t match_seq;
if (p == endp) {
return s_n_llhttp__internal__n_start_req_40;
}
match_seq = llparse__match_sequence_id(state, p, endp, llparse_blob40, 4);
p = match_seq.current;
switch (match_seq.status) {
case kMatchComplete: {
p++;
match = 14;
goto s_n_llhttp__internal__n_invoke_store_method_1;
}
case kMatchPause: {
return s_n_llhttp__internal__n_start_req_40;
}
case kMatchMismatch: {
goto s_n_llhttp__internal__n_error_48;
}
}
/* UNREACHABLE */;
abort();
}
case s_n_llhttp__internal__n_start_req_41:
s_n_llhttp__internal__n_start_req_41: {
llparse_match_t match_seq;
if (p == endp) {
return s_n_llhttp__internal__n_start_req_41;
}
match_seq = llparse__match_sequence_id(state, p, endp, llparse_blob41, 4);
p = match_seq.current;
switch (match_seq.status) {
case kMatchComplete: {
p++;
match = 33;
goto s_n_llhttp__internal__n_invoke_store_method_1;
}
case kMatchPause: {
return s_n_llhttp__internal__n_start_req_41;
}
case kMatchMismatch: {
goto s_n_llhttp__internal__n_error_48;
}
}
/* UNREACHABLE */;
abort();
}
case s_n_llhttp__internal__n_start_req_42:
s_n_llhttp__internal__n_start_req_42: {
llparse_match_t match_seq;
if (p == endp) {
return s_n_llhttp__internal__n_start_req_42;
}
match_seq = llparse__match_sequence_id(state, p, endp, llparse_blob42, 7);
p = match_seq.current;
switch (match_seq.status) {
case kMatchComplete: {
p++;
match = 26;
goto s_n_llhttp__internal__n_invoke_store_method_1;
}
case kMatchPause: {
return s_n_llhttp__internal__n_start_req_42;
}
case kMatchMismatch: {
goto s_n_llhttp__internal__n_error_48;
}
}
/* UNREACHABLE */;
abort();
}
case s_n_llhttp__internal__n_start_req_39:
s_n_llhttp__internal__n_start_req_39: {
if (p == endp) {
return s_n_llhttp__internal__n_start_req_39;
}
switch (*p) {
case 'E': {
p++;
goto s_n_llhttp__internal__n_start_req_40;
}
case 'O': {
p++;
goto s_n_llhttp__internal__n_start_req_41;
}
case 'U': {
p++;
goto s_n_llhttp__internal__n_start_req_42;
}
default: {
goto s_n_llhttp__internal__n_error_48;
}
}
/* UNREACHABLE */;
abort();
}
case s_n_llhttp__internal__n_start_req_43:
s_n_llhttp__internal__n_start_req_43: {
llparse_match_t match_seq;
if (p == endp) {
return s_n_llhttp__internal__n_start_req_43;
}
match_seq = llparse__match_sequence_id(state, p, endp, llparse_blob43, 4);
p = match_seq.current;
switch (match_seq.status) {
case kMatchComplete: {
p++;
match = 7;
goto s_n_llhttp__internal__n_invoke_store_method_1;
}
case kMatchPause: {
return s_n_llhttp__internal__n_start_req_43;
}
case kMatchMismatch: {
goto s_n_llhttp__internal__n_error_48;
}
}
/* UNREACHABLE */;
abort();
}
case s_n_llhttp__internal__n_start_req_46:
s_n_llhttp__internal__n_start_req_46: {
llparse_match_t match_seq;
if (p == endp) {
return s_n_llhttp__internal__n_start_req_46;
}
match_seq = llparse__match_sequence_id(state, p, endp, llparse_blob44, 3);
p = match_seq.current;
switch (match_seq.status) {
case kMatchComplete: {
p++;
match = 18;
goto s_n_llhttp__internal__n_invoke_store_method_1;
}
case kMatchPause: {
return s_n_llhttp__internal__n_start_req_46;
}
case kMatchMismatch: {
goto s_n_llhttp__internal__n_error_48;
}
}
/* UNREACHABLE */;
abort();
}
case s_n_llhttp__internal__n_start_req_48:
s_n_llhttp__internal__n_start_req_48: {
llparse_match_t match_seq;
if (p == endp) {
return s_n_llhttp__internal__n_start_req_48;
}
match_seq = llparse__match_sequence_id(state, p, endp, llparse_blob45, 2);
p = match_seq.current;
switch (match_seq.status) {
case kMatchComplete: {
p++;
match = 32;
goto s_n_llhttp__internal__n_invoke_store_method_1;
}
case kMatchPause: {
return s_n_llhttp__internal__n_start_req_48;
}
case kMatchMismatch: {
goto s_n_llhttp__internal__n_error_48;
}
}
/* UNREACHABLE */;
abort();
}
case s_n_llhttp__internal__n_start_req_49:
s_n_llhttp__internal__n_start_req_49: {
llparse_match_t match_seq;
if (p == endp) {
return s_n_llhttp__internal__n_start_req_49;
}
match_seq = llparse__match_sequence_id(state, p, endp, llparse_blob46, 2);
p = match_seq.current;
switch (match_seq.status) {
case kMatchComplete: {
p++;
match = 15;
goto s_n_llhttp__internal__n_invoke_store_method_1;
}
case kMatchPause: {
return s_n_llhttp__internal__n_start_req_49;
}
case kMatchMismatch: {
goto s_n_llhttp__internal__n_error_48;
}
}
/* UNREACHABLE */;
abort();
}
case s_n_llhttp__internal__n_start_req_47:
s_n_llhttp__internal__n_start_req_47: {
if (p == endp) {
return s_n_llhttp__internal__n_start_req_47;
}
switch (*p) {
case 'I': {
p++;
goto s_n_llhttp__internal__n_start_req_48;
}
case 'O': {
p++;
goto s_n_llhttp__internal__n_start_req_49;
}
default: {
goto s_n_llhttp__internal__n_error_48;
}
}
/* UNREACHABLE */;
abort();
}
case s_n_llhttp__internal__n_start_req_50:
s_n_llhttp__internal__n_start_req_50: {
llparse_match_t match_seq;
if (p == endp) {
return s_n_llhttp__internal__n_start_req_50;
}
match_seq = llparse__match_sequence_id(state, p, endp, llparse_blob47, 8);
p = match_seq.current;
switch (match_seq.status) {
case kMatchComplete: {
p++;
match = 27;
goto s_n_llhttp__internal__n_invoke_store_method_1;
}
case kMatchPause: {
return s_n_llhttp__internal__n_start_req_50;
}
case kMatchMismatch: {
goto s_n_llhttp__internal__n_error_48;
}
}
/* UNREACHABLE */;
abort();
}
case s_n_llhttp__internal__n_start_req_45:
s_n_llhttp__internal__n_start_req_45: {
if (p == endp) {
return s_n_llhttp__internal__n_start_req_45;
}
switch (*p) {
case 'B': {
p++;
goto s_n_llhttp__internal__n_start_req_46;
}
case 'L': {
p++;
goto s_n_llhttp__internal__n_start_req_47;
}
case 'S': {
p++;
goto s_n_llhttp__internal__n_start_req_50;
}
default: {
goto s_n_llhttp__internal__n_error_48;
}
}
/* UNREACHABLE */;
abort();
}
case s_n_llhttp__internal__n_start_req_44:
s_n_llhttp__internal__n_start_req_44: {
if (p == endp) {
return s_n_llhttp__internal__n_start_req_44;
}
switch (*p) {
case 'N': {
p++;
goto s_n_llhttp__internal__n_start_req_45;
}
default: {
goto s_n_llhttp__internal__n_error_48;
}
}
/* UNREACHABLE */;
abort();
}
case s_n_llhttp__internal__n_start_req:
s_n_llhttp__internal__n_start_req: {
if (p == endp) {
return s_n_llhttp__internal__n_start_req;
}
switch (*p) {
case 'A': {
p++;
goto s_n_llhttp__internal__n_start_req_1;
}
case 'B': {
p++;
goto s_n_llhttp__internal__n_start_req_2;
}
case 'C': {
p++;
goto s_n_llhttp__internal__n_start_req_3;
}
case 'D': {
p++;
goto s_n_llhttp__internal__n_start_req_8;
}
case 'G': {
p++;
goto s_n_llhttp__internal__n_start_req_9;
}
case 'H': {
p++;
goto s_n_llhttp__internal__n_start_req_10;
}
case 'L': {
p++;
goto s_n_llhttp__internal__n_start_req_11;
}
case 'M': {
p++;
goto s_n_llhttp__internal__n_start_req_14;
}
case 'N': {
p++;
goto s_n_llhttp__internal__n_start_req_23;
}
case 'O': {
p++;
goto s_n_llhttp__internal__n_start_req_24;
}
case 'P': {
p++;
goto s_n_llhttp__internal__n_start_req_25;
}
case 'R': {
p++;
goto s_n_llhttp__internal__n_start_req_35;
}
case 'S': {
p++;
goto s_n_llhttp__internal__n_start_req_39;
}
case 'T': {
p++;
goto s_n_llhttp__internal__n_start_req_43;
}
case 'U': {
p++;
goto s_n_llhttp__internal__n_start_req_44;
}
default: {
goto s_n_llhttp__internal__n_error_48;
}
}
/* UNREACHABLE */;
abort();
}
case s_n_llhttp__internal__n_res_line_almost_done:
s_n_llhttp__internal__n_res_line_almost_done: {
if (p == endp) {
return s_n_llhttp__internal__n_res_line_almost_done;
}
p++;
goto s_n_llhttp__internal__n_header_field_start;
/* UNREACHABLE */;
abort();
}
case s_n_llhttp__internal__n_res_status:
s_n_llhttp__internal__n_res_status: {
if (p == endp) {
return s_n_llhttp__internal__n_res_status;
}
switch (*p) {
case 10: {
goto s_n_llhttp__internal__n_span_end_llhttp__on_status;
}
case 13: {
goto s_n_llhttp__internal__n_span_end_llhttp__on_status_1;
}
default: {
p++;
goto s_n_llhttp__internal__n_res_status;
}
}
/* UNREACHABLE */;
abort();
}
case s_n_llhttp__internal__n_span_start_llhttp__on_status:
s_n_llhttp__internal__n_span_start_llhttp__on_status: {
if (p == endp) {
return s_n_llhttp__internal__n_span_start_llhttp__on_status;
}
state->_span_pos0 = (void*) p;
state->_span_cb0 = llhttp__on_status;
goto s_n_llhttp__internal__n_res_status;
/* UNREACHABLE */;
abort();
}
case s_n_llhttp__internal__n_res_status_start:
s_n_llhttp__internal__n_res_status_start: {
if (p == endp) {
return s_n_llhttp__internal__n_res_status_start;
}
switch (*p) {
case 10: {
p++;
goto s_n_llhttp__internal__n_header_field_start;
}
case 13: {
p++;
goto s_n_llhttp__internal__n_res_line_almost_done;
}
default: {
goto s_n_llhttp__internal__n_span_start_llhttp__on_status;
}
}
/* UNREACHABLE */;
abort();
}
case s_n_llhttp__internal__n_res_status_code_otherwise:
s_n_llhttp__internal__n_res_status_code_otherwise: {
if (p == endp) {
return s_n_llhttp__internal__n_res_status_code_otherwise;
}
switch (*p) {
case 10: {
goto s_n_llhttp__internal__n_res_status_start;
}
case 13: {
goto s_n_llhttp__internal__n_res_status_start;
}
case ' ': {
p++;
goto s_n_llhttp__internal__n_res_status_start;
}
default: {
goto s_n_llhttp__internal__n_error_42;
}
}
/* UNREACHABLE */;
abort();
}
case s_n_llhttp__internal__n_res_status_code:
s_n_llhttp__internal__n_res_status_code: {
if (p == endp) {
return s_n_llhttp__internal__n_res_status_code;
}
switch (*p) {
case '0': {
p++;
match = 0;
goto s_n_llhttp__internal__n_invoke_mul_add_status_code;
}
case '1': {
p++;
match = 1;
goto s_n_llhttp__internal__n_invoke_mul_add_status_code;
}
case '2': {
p++;
match = 2;
goto s_n_llhttp__internal__n_invoke_mul_add_status_code;
}
case '3': {
p++;
match = 3;
goto s_n_llhttp__internal__n_invoke_mul_add_status_code;
}
case '4': {
p++;
match = 4;
goto s_n_llhttp__internal__n_invoke_mul_add_status_code;
}
case '5': {
p++;
match = 5;
goto s_n_llhttp__internal__n_invoke_mul_add_status_code;
}
case '6': {
p++;
match = 6;
goto s_n_llhttp__internal__n_invoke_mul_add_status_code;
}
case '7': {
p++;
match = 7;
goto s_n_llhttp__internal__n_invoke_mul_add_status_code;
}
case '8': {
p++;
match = 8;
goto s_n_llhttp__internal__n_invoke_mul_add_status_code;
}
case '9': {
p++;
match = 9;
goto s_n_llhttp__internal__n_invoke_mul_add_status_code;
}
default: {
goto s_n_llhttp__internal__n_res_status_code_otherwise;
}
}
/* UNREACHABLE */;
abort();
}
case s_n_llhttp__internal__n_res_http_end:
s_n_llhttp__internal__n_res_http_end: {
if (p == endp) {
return s_n_llhttp__internal__n_res_http_end;
}
switch (*p) {
case ' ': {
p++;
goto s_n_llhttp__internal__n_invoke_update_status_code;
}
default: {
goto s_n_llhttp__internal__n_error_43;
}
}
/* UNREACHABLE */;
abort();
}
case s_n_llhttp__internal__n_res_http_minor:
s_n_llhttp__internal__n_res_http_minor: {
if (p == endp) {
return s_n_llhttp__internal__n_res_http_minor;
}
switch (*p) {
case '0': {
p++;
match = 0;
goto s_n_llhttp__internal__n_invoke_store_http_minor_1;
}
case '1': {
p++;
match = 1;
goto s_n_llhttp__internal__n_invoke_store_http_minor_1;
}
case '2': {
p++;
match = 2;
goto s_n_llhttp__internal__n_invoke_store_http_minor_1;
}
case '3': {
p++;
match = 3;
goto s_n_llhttp__internal__n_invoke_store_http_minor_1;
}
case '4': {
p++;
match = 4;
goto s_n_llhttp__internal__n_invoke_store_http_minor_1;
}
case '5': {
p++;
match = 5;
goto s_n_llhttp__internal__n_invoke_store_http_minor_1;
}
case '6': {
p++;
match = 6;
goto s_n_llhttp__internal__n_invoke_store_http_minor_1;
}
case '7': {
p++;
match = 7;
goto s_n_llhttp__internal__n_invoke_store_http_minor_1;
}
case '8': {
p++;
match = 8;
goto s_n_llhttp__internal__n_invoke_store_http_minor_1;
}
case '9': {
p++;
match = 9;
goto s_n_llhttp__internal__n_invoke_store_http_minor_1;
}
default: {
goto s_n_llhttp__internal__n_error_44;
}
}
/* UNREACHABLE */;
abort();
}
case s_n_llhttp__internal__n_res_http_dot:
s_n_llhttp__internal__n_res_http_dot: {
if (p == endp) {
return s_n_llhttp__internal__n_res_http_dot;
}
switch (*p) {
case '.': {
p++;
goto s_n_llhttp__internal__n_res_http_minor;
}
default: {
goto s_n_llhttp__internal__n_error_45;
}
}
/* UNREACHABLE */;
abort();
}
case s_n_llhttp__internal__n_res_http_major:
s_n_llhttp__internal__n_res_http_major: {
if (p == endp) {
return s_n_llhttp__internal__n_res_http_major;
}
switch (*p) {
case '0': {
p++;
match = 0;
goto s_n_llhttp__internal__n_invoke_store_http_major_1;
}
case '1': {
p++;
match = 1;
goto s_n_llhttp__internal__n_invoke_store_http_major_1;
}
case '2': {
p++;
match = 2;
goto s_n_llhttp__internal__n_invoke_store_http_major_1;
}
case '3': {
p++;
match = 3;
goto s_n_llhttp__internal__n_invoke_store_http_major_1;
}
case '4': {
p++;
match = 4;
goto s_n_llhttp__internal__n_invoke_store_http_major_1;
}
case '5': {
p++;
match = 5;
goto s_n_llhttp__internal__n_invoke_store_http_major_1;
}
case '6': {
p++;
match = 6;
goto s_n_llhttp__internal__n_invoke_store_http_major_1;
}
case '7': {
p++;
match = 7;
goto s_n_llhttp__internal__n_invoke_store_http_major_1;
}
case '8': {
p++;
match = 8;
goto s_n_llhttp__internal__n_invoke_store_http_major_1;
}
case '9': {
p++;
match = 9;
goto s_n_llhttp__internal__n_invoke_store_http_major_1;
}
default: {
goto s_n_llhttp__internal__n_error_46;
}
}
/* UNREACHABLE */;
abort();
}
case s_n_llhttp__internal__n_start_res:
s_n_llhttp__internal__n_start_res: {
llparse_match_t match_seq;
if (p == endp) {
return s_n_llhttp__internal__n_start_res;
}
match_seq = llparse__match_sequence_id(state, p, endp, llparse_blob48, 5);
p = match_seq.current;
switch (match_seq.status) {
case kMatchComplete: {
p++;
goto s_n_llhttp__internal__n_res_http_major;
}
case kMatchPause: {
return s_n_llhttp__internal__n_start_res;
}
case kMatchMismatch: {
goto s_n_llhttp__internal__n_error_49;
}
}
/* UNREACHABLE */;
abort();
}
case s_n_llhttp__internal__n_req_or_res_method_2:
s_n_llhttp__internal__n_req_or_res_method_2: {
llparse_match_t match_seq;
if (p == endp) {
return s_n_llhttp__internal__n_req_or_res_method_2;
}
match_seq = llparse__match_sequence_id(state, p, endp, llparse_blob49, 2);
p = match_seq.current;
switch (match_seq.status) {
case kMatchComplete: {
p++;
match = 2;
goto s_n_llhttp__internal__n_invoke_store_method;
}
case kMatchPause: {
return s_n_llhttp__internal__n_req_or_res_method_2;
}
case kMatchMismatch: {
goto s_n_llhttp__internal__n_error_47;
}
}
/* UNREACHABLE */;
abort();
}
case s_n_llhttp__internal__n_req_or_res_method_3:
s_n_llhttp__internal__n_req_or_res_method_3: {
llparse_match_t match_seq;
if (p == endp) {
return s_n_llhttp__internal__n_req_or_res_method_3;
}
match_seq = llparse__match_sequence_id(state, p, endp, llparse_blob50, 3);
p = match_seq.current;
switch (match_seq.status) {
case kMatchComplete: {
p++;
goto s_n_llhttp__internal__n_invoke_update_type_1;
}
case kMatchPause: {
return s_n_llhttp__internal__n_req_or_res_method_3;
}
case kMatchMismatch: {
goto s_n_llhttp__internal__n_error_47;
}
}
/* UNREACHABLE */;
abort();
}
case s_n_llhttp__internal__n_req_or_res_method_1:
s_n_llhttp__internal__n_req_or_res_method_1: {
if (p == endp) {
return s_n_llhttp__internal__n_req_or_res_method_1;
}
switch (*p) {
case 'E': {
p++;
goto s_n_llhttp__internal__n_req_or_res_method_2;
}
case 'T': {
p++;
goto s_n_llhttp__internal__n_req_or_res_method_3;
}
default: {
goto s_n_llhttp__internal__n_error_47;
}
}
/* UNREACHABLE */;
abort();
}
case s_n_llhttp__internal__n_req_or_res_method:
s_n_llhttp__internal__n_req_or_res_method: {
if (p == endp) {
return s_n_llhttp__internal__n_req_or_res_method;
}
switch (*p) {
case 'H': {
p++;
goto s_n_llhttp__internal__n_req_or_res_method_1;
}
default: {
goto s_n_llhttp__internal__n_error_47;
}
}
/* UNREACHABLE */;
abort();
}
case s_n_llhttp__internal__n_start_req_or_res:
s_n_llhttp__internal__n_start_req_or_res: {
if (p == endp) {
return s_n_llhttp__internal__n_start_req_or_res;
}
switch (*p) {
case 'H': {
goto s_n_llhttp__internal__n_req_or_res_method;
}
default: {
goto s_n_llhttp__internal__n_invoke_update_type_2;
}
}
/* UNREACHABLE */;
abort();
}
case s_n_llhttp__internal__n_invoke_load_type:
s_n_llhttp__internal__n_invoke_load_type: {
switch (llhttp__internal__c_load_type(state, p, endp)) {
case 1:
goto s_n_llhttp__internal__n_start_req;
case 2:
goto s_n_llhttp__internal__n_start_res;
default:
goto s_n_llhttp__internal__n_start_req_or_res;
}
/* UNREACHABLE */;
abort();
}
case s_n_llhttp__internal__n_start:
s_n_llhttp__internal__n_start: {
if (p == endp) {
return s_n_llhttp__internal__n_start;
}
switch (*p) {
case 10: {
p++;
goto s_n_llhttp__internal__n_start;
}
case 13: {
p++;
goto s_n_llhttp__internal__n_start;
}
default: {
goto s_n_llhttp__internal__n_invoke_update_finish;
}
}
/* UNREACHABLE */;
abort();
}
default:
/* UNREACHABLE */
abort();
}
s_n_llhttp__internal__n_error_36: {
state->error = 0x7;
state->reason = "Invalid characters in url";
state->error_pos = (const char*) p;
state->_current = (void*) (intptr_t) s_error;
return s_error;
/* UNREACHABLE */;
abort();
}
s_n_llhttp__internal__n_invoke_update_finish_1: {
switch (llhttp__internal__c_update_finish_1(state, p, endp)) {
default:
goto s_n_llhttp__internal__n_start;
}
/* UNREACHABLE */;
abort();
}
s_n_llhttp__internal__n_pause_5: {
state->error = 0x15;
state->reason = "on_message_complete pause";
state->error_pos = (const char*) p;
state->_current = (void*) (intptr_t) s_n_llhttp__internal__n_invoke_is_equal_upgrade;
return s_error;
/* UNREACHABLE */;
abort();
}
s_n_llhttp__internal__n_error_10: {
state->error = 0x12;
state->reason = "`on_message_complete` callback error";
state->error_pos = (const char*) p;
state->_current = (void*) (intptr_t) s_error;
return s_error;
/* UNREACHABLE */;
abort();
}
s_n_llhttp__internal__n_pause_7: {
state->error = 0x15;
state->reason = "on_chunk_complete pause";
state->error_pos = (const char*) p;
state->_current = (void*) (intptr_t) s_n_llhttp__internal__n_invoke_llhttp__on_message_complete_2;
return s_error;
/* UNREACHABLE */;
abort();
}
s_n_llhttp__internal__n_error_14: {
state->error = 0x14;
state->reason = "`on_chunk_complete` callback error";
state->error_pos = (const char*) p;
state->_current = (void*) (intptr_t) s_error;
return s_error;
/* UNREACHABLE */;
abort();
}
s_n_llhttp__internal__n_invoke_llhttp__on_chunk_complete_1: {
switch (llhttp__on_chunk_complete(state, p, endp)) {
case 0:
goto s_n_llhttp__internal__n_invoke_llhttp__on_message_complete_2;
case 21:
goto s_n_llhttp__internal__n_pause_7;
default:
goto s_n_llhttp__internal__n_error_14;
}
/* UNREACHABLE */;
abort();
}
s_n_llhttp__internal__n_error_12: {
state->error = 0x4;
state->reason = "Content-Length can't be present with Transfer-Encoding";
state->error_pos = (const char*) p;
state->_current = (void*) (intptr_t) s_error;
return s_error;
/* UNREACHABLE */;
abort();
}
s_n_llhttp__internal__n_error_13: {
state->error = 0x4;
state->reason = "Content-Length can't be present with chunked encoding";
state->error_pos = (const char*) p;
state->_current = (void*) (intptr_t) s_error;
return s_error;
/* UNREACHABLE */;
abort();
}
s_n_llhttp__internal__n_pause_2: {
state->error = 0x15;
state->reason = "on_message_complete pause";
state->error_pos = (const char*) p;
state->_current = (void*) (intptr_t) s_n_llhttp__internal__n_pause_1;
return s_error;
/* UNREACHABLE */;
abort();
}
s_n_llhttp__internal__n_error_3: {
state->error = 0x12;
state->reason = "`on_message_complete` callback error";
state->error_pos = (const char*) p;
state->_current = (void*) (intptr_t) s_error;
return s_error;
/* UNREACHABLE */;
abort();
}
s_n_llhttp__internal__n_invoke_llhttp__on_message_complete_1: {
switch (llhttp__on_message_complete(state, p, endp)) {
case 0:
goto s_n_llhttp__internal__n_pause_1;
case 21:
goto s_n_llhttp__internal__n_pause_2;
default:
goto s_n_llhttp__internal__n_error_3;
}
/* UNREACHABLE */;
abort();
}
s_n_llhttp__internal__n_error_8: {
state->error = 0xc;
state->reason = "Chunk size overflow";
state->error_pos = (const char*) p;
state->_current = (void*) (intptr_t) s_error;
return s_error;
/* UNREACHABLE */;
abort();
}
s_n_llhttp__internal__n_pause_3: {
state->error = 0x15;
state->reason = "on_chunk_complete pause";
state->error_pos = (const char*) p;
state->_current = (void*) (intptr_t) s_n_llhttp__internal__n_invoke_update_content_length;
return s_error;
/* UNREACHABLE */;
abort();
}
s_n_llhttp__internal__n_error_5: {
state->error = 0x14;
state->reason = "`on_chunk_complete` callback error";
state->error_pos = (const char*) p;
state->_current = (void*) (intptr_t) s_error;
return s_error;
/* UNREACHABLE */;
abort();
}
s_n_llhttp__internal__n_invoke_llhttp__on_chunk_complete: {
switch (llhttp__on_chunk_complete(state, p, endp)) {
case 0:
goto s_n_llhttp__internal__n_invoke_update_content_length;
case 21:
goto s_n_llhttp__internal__n_pause_3;
default:
goto s_n_llhttp__internal__n_error_5;
}
/* UNREACHABLE */;
abort();
}
s_n_llhttp__internal__n_span_end_llhttp__on_body: {
const unsigned char* start;
int err;
start = state->_span_pos0;
state->_span_pos0 = NULL;
err = llhttp__on_body(state, start, p);
if (err != 0) {
state->error = err;
state->error_pos = (const char*) p;
state->_current = (void*) (intptr_t) s_n_llhttp__internal__n_chunk_data_almost_done;
return s_error;
}
goto s_n_llhttp__internal__n_chunk_data_almost_done;
/* UNREACHABLE */;
abort();
}
s_n_llhttp__internal__n_invoke_or_flags: {
switch (llhttp__internal__c_or_flags(state, p, endp)) {
default:
goto s_n_llhttp__internal__n_header_field_start;
}
/* UNREACHABLE */;
abort();
}
s_n_llhttp__internal__n_pause_4: {
state->error = 0x15;
state->reason = "on_chunk_header pause";
state->error_pos = (const char*) p;
state->_current = (void*) (intptr_t) s_n_llhttp__internal__n_invoke_is_equal_content_length;
return s_error;
/* UNREACHABLE */;
abort();
}
s_n_llhttp__internal__n_error_4: {
state->error = 0x13;
state->reason = "`on_chunk_header` callback error";
state->error_pos = (const char*) p;
state->_current = (void*) (intptr_t) s_error;
return s_error;
/* UNREACHABLE */;
abort();
}
s_n_llhttp__internal__n_invoke_llhttp__on_chunk_header: {
switch (llhttp__on_chunk_header(state, p, endp)) {
case 0:
goto s_n_llhttp__internal__n_invoke_is_equal_content_length;
case 21:
goto s_n_llhttp__internal__n_pause_4;
default:
goto s_n_llhttp__internal__n_error_4;
}
/* UNREACHABLE */;
abort();
}
s_n_llhttp__internal__n_error_6: {
state->error = 0x2;
state->reason = "Invalid character in chunk parameters";
state->error_pos = (const char*) p;
state->_current = (void*) (intptr_t) s_error;
return s_error;
/* UNREACHABLE */;
abort();
}
s_n_llhttp__internal__n_error_7: {
state->error = 0xc;
state->reason = "Invalid character in chunk size";
state->error_pos = (const char*) p;
state->_current = (void*) (intptr_t) s_error;
return s_error;
/* UNREACHABLE */;
abort();
}
s_n_llhttp__internal__n_invoke_mul_add_content_length: {
switch (llhttp__internal__c_mul_add_content_length(state, p, endp, match)) {
case 1:
goto s_n_llhttp__internal__n_error_8;
default:
goto s_n_llhttp__internal__n_chunk_size;
}
/* UNREACHABLE */;
abort();
}
s_n_llhttp__internal__n_error_9: {
state->error = 0xc;
state->reason = "Invalid character in chunk size";
state->error_pos = (const char*) p;
state->_current = (void*) (intptr_t) s_error;
return s_error;
/* UNREACHABLE */;
abort();
}
s_n_llhttp__internal__n_span_end_llhttp__on_body_1: {
const unsigned char* start;
int err;
start = state->_span_pos0;
state->_span_pos0 = NULL;
err = llhttp__on_body(state, start, p);
if (err != 0) {
state->error = err;
state->error_pos = (const char*) p;
state->_current = (void*) (intptr_t) s_n_llhttp__internal__n_invoke_llhttp__on_message_complete_2;
return s_error;
}
goto s_n_llhttp__internal__n_invoke_llhttp__on_message_complete_2;
/* UNREACHABLE */;
abort();
}
s_n_llhttp__internal__n_invoke_update_finish_2: {
switch (llhttp__internal__c_update_finish_2(state, p, endp)) {
default:
goto s_n_llhttp__internal__n_span_start_llhttp__on_body_2;
}
/* UNREACHABLE */;
abort();
}
s_n_llhttp__internal__n_error_11: {
state->error = 0xf;
state->reason = "Request has invalid `Transfer-Encoding`";
state->error_pos = (const char*) p;
state->_current = (void*) (intptr_t) s_error;
return s_error;
/* UNREACHABLE */;
abort();
}
s_n_llhttp__internal__n_pause: {
state->error = 0x15;
state->reason = "on_message_complete pause";
state->error_pos = (const char*) p;
state->_current = (void*) (intptr_t) s_n_llhttp__internal__n_invoke_llhttp__after_message_complete;
return s_error;
/* UNREACHABLE */;
abort();
}
s_n_llhttp__internal__n_error_2: {
state->error = 0x12;
state->reason = "`on_message_complete` callback error";
state->error_pos = (const char*) p;
state->_current = (void*) (intptr_t) s_error;
return s_error;
/* UNREACHABLE */;
abort();
}
s_n_llhttp__internal__n_invoke_llhttp__on_message_complete: {
switch (llhttp__on_message_complete(state, p, endp)) {
case 0:
goto s_n_llhttp__internal__n_invoke_llhttp__after_message_complete;
case 21:
goto s_n_llhttp__internal__n_pause;
default:
goto s_n_llhttp__internal__n_error_2;
}
/* UNREACHABLE */;
abort();
}
s_n_llhttp__internal__n_invoke_or_flags_1: {
switch (llhttp__internal__c_or_flags_1(state, p, endp)) {
default:
goto s_n_llhttp__internal__n_invoke_llhttp__after_headers_complete;
}
/* UNREACHABLE */;
abort();
}
s_n_llhttp__internal__n_invoke_or_flags_2: {
switch (llhttp__internal__c_or_flags_1(state, p, endp)) {
default:
goto s_n_llhttp__internal__n_invoke_llhttp__after_headers_complete;
}
/* UNREACHABLE */;
abort();
}
s_n_llhttp__internal__n_invoke_update_upgrade: {
switch (llhttp__internal__c_update_upgrade(state, p, endp)) {
default:
goto s_n_llhttp__internal__n_invoke_or_flags_2;
}
/* UNREACHABLE */;
abort();
}
s_n_llhttp__internal__n_pause_6: {
state->error = 0x15;
state->reason = "Paused by on_headers_complete";
state->error_pos = (const char*) p;
state->_current = (void*) (intptr_t) s_n_llhttp__internal__n_invoke_llhttp__after_headers_complete;
return s_error;
/* UNREACHABLE */;
abort();
}
s_n_llhttp__internal__n_error_1: {
state->error = 0x11;
state->reason = "User callback error";
state->error_pos = (const char*) p;
state->_current = (void*) (intptr_t) s_error;
return s_error;
/* UNREACHABLE */;
abort();
}
s_n_llhttp__internal__n_invoke_llhttp__on_headers_complete: {
switch (llhttp__on_headers_complete(state, p, endp)) {
case 0:
goto s_n_llhttp__internal__n_invoke_llhttp__after_headers_complete;
case 1:
goto s_n_llhttp__internal__n_invoke_or_flags_1;
case 2:
goto s_n_llhttp__internal__n_invoke_update_upgrade;
case 21:
goto s_n_llhttp__internal__n_pause_6;
default:
goto s_n_llhttp__internal__n_error_1;
}
/* UNREACHABLE */;
abort();
}
s_n_llhttp__internal__n_invoke_llhttp__before_headers_complete: {
switch (llhttp__before_headers_complete(state, p, endp)) {
default:
goto s_n_llhttp__internal__n_invoke_llhttp__on_headers_complete;
}
/* UNREACHABLE */;
abort();
}
s_n_llhttp__internal__n_invoke_test_flags_3: {
switch (llhttp__internal__c_test_flags_3(state, p, endp)) {
case 1:
goto s_n_llhttp__internal__n_error_13;
default:
goto s_n_llhttp__internal__n_invoke_llhttp__before_headers_complete;
}
/* UNREACHABLE */;
abort();
}
s_n_llhttp__internal__n_invoke_test_flags_2: {
switch (llhttp__internal__c_test_flags_2(state, p, endp)) {
case 0:
goto s_n_llhttp__internal__n_error_12;
case 1:
goto s_n_llhttp__internal__n_invoke_test_flags_3;
default:
goto s_n_llhttp__internal__n_invoke_llhttp__before_headers_complete;
}
/* UNREACHABLE */;
abort();
}
s_n_llhttp__internal__n_invoke_test_flags_1: {
switch (llhttp__internal__c_test_flags_1(state, p, endp)) {
case 1:
goto s_n_llhttp__internal__n_invoke_test_flags_2;
default:
goto s_n_llhttp__internal__n_invoke_llhttp__before_headers_complete;
}
/* UNREACHABLE */;
abort();
}
s_n_llhttp__internal__n_invoke_test_flags: {
switch (llhttp__internal__c_test_flags(state, p, endp)) {
case 1:
goto s_n_llhttp__internal__n_invoke_llhttp__on_chunk_complete_1;
default:
goto s_n_llhttp__internal__n_invoke_test_flags_1;
}
/* UNREACHABLE */;
abort();
}
s_n_llhttp__internal__n_span_end_llhttp__on_header_field: {
const unsigned char* start;
int err;
start = state->_span_pos0;
state->_span_pos0 = NULL;
err = llhttp__on_header_field(state, start, p);
if (err != 0) {
state->error = err;
state->error_pos = (const char*) (p + 1);
state->_current = (void*) (intptr_t) s_n_llhttp__internal__n_error_15;
return s_error;
}
p++;
goto s_n_llhttp__internal__n_error_15;
/* UNREACHABLE */;
abort();
}
s_n_llhttp__internal__n_invoke_test_flags_4: {
switch (llhttp__internal__c_test_flags_2(state, p, endp)) {
case 1:
goto s_n_llhttp__internal__n_header_field_colon_discard_ws;
default:
goto s_n_llhttp__internal__n_span_end_llhttp__on_header_field;
}
/* UNREACHABLE */;
abort();
}
s_n_llhttp__internal__n_error_16: {
state->error = 0xb;
state->reason = "Empty Content-Length";
state->error_pos = (const char*) p;
state->_current = (void*) (intptr_t) s_error;
return s_error;
/* UNREACHABLE */;
abort();
}
s_n_llhttp__internal__n_span_end_llhttp__on_header_value: {
const unsigned char* start;
int err;
start = state->_span_pos0;
state->_span_pos0 = NULL;
err = llhttp__on_header_value(state, start, p);
if (err != 0) {
state->error = err;
state->error_pos = (const char*) p;
state->_current = (void*) (intptr_t) s_n_llhttp__internal__n_header_field_start;
return s_error;
}
goto s_n_llhttp__internal__n_header_field_start;
/* UNREACHABLE */;
abort();
}
s_n_llhttp__internal__n_invoke_update_header_state: {
switch (llhttp__internal__c_update_header_state(state, p, endp)) {
default:
goto s_n_llhttp__internal__n_span_start_llhttp__on_header_value;
}
/* UNREACHABLE */;
abort();
}
s_n_llhttp__internal__n_invoke_or_flags_3: {
switch (llhttp__internal__c_or_flags_3(state, p, endp)) {
default:
goto s_n_llhttp__internal__n_invoke_update_header_state;
}
/* UNREACHABLE */;
abort();
}
s_n_llhttp__internal__n_invoke_or_flags_4: {
switch (llhttp__internal__c_or_flags_4(state, p, endp)) {
default:
goto s_n_llhttp__internal__n_invoke_update_header_state;
}
/* UNREACHABLE */;
abort();
}
s_n_llhttp__internal__n_invoke_or_flags_5: {
switch (llhttp__internal__c_or_flags_5(state, p, endp)) {
default:
goto s_n_llhttp__internal__n_invoke_update_header_state;
}
/* UNREACHABLE */;
abort();
}
s_n_llhttp__internal__n_invoke_or_flags_6: {
switch (llhttp__internal__c_or_flags_6(state, p, endp)) {
default:
goto s_n_llhttp__internal__n_span_start_llhttp__on_header_value;
}
/* UNREACHABLE */;
abort();
}
s_n_llhttp__internal__n_invoke_load_header_state_1: {
switch (llhttp__internal__c_load_header_state(state, p, endp)) {
case 5:
goto s_n_llhttp__internal__n_invoke_or_flags_3;
case 6:
goto s_n_llhttp__internal__n_invoke_or_flags_4;
case 7:
goto s_n_llhttp__internal__n_invoke_or_flags_5;
case 8:
goto s_n_llhttp__internal__n_invoke_or_flags_6;
default:
goto s_n_llhttp__internal__n_span_start_llhttp__on_header_value;
}
/* UNREACHABLE */;
abort();
}
s_n_llhttp__internal__n_invoke_load_header_state: {
switch (llhttp__internal__c_load_header_state(state, p, endp)) {
case 2:
goto s_n_llhttp__internal__n_error_16;
default:
goto s_n_llhttp__internal__n_invoke_load_header_state_1;
}
/* UNREACHABLE */;
abort();
}
s_n_llhttp__internal__n_invoke_update_header_state_1: {
switch (llhttp__internal__c_update_header_state(state, p, endp)) {
default:
goto s_n_llhttp__internal__n_header_field_start;
}
/* UNREACHABLE */;
abort();
}
s_n_llhttp__internal__n_invoke_or_flags_7: {
switch (llhttp__internal__c_or_flags_3(state, p, endp)) {
default:
goto s_n_llhttp__internal__n_invoke_update_header_state_1;
}
/* UNREACHABLE */;
abort();
}
s_n_llhttp__internal__n_invoke_or_flags_8: {
switch (llhttp__internal__c_or_flags_4(state, p, endp)) {
default:
goto s_n_llhttp__internal__n_invoke_update_header_state_1;
}
/* UNREACHABLE */;
abort();
}
s_n_llhttp__internal__n_invoke_or_flags_9: {
switch (llhttp__internal__c_or_flags_5(state, p, endp)) {
default:
goto s_n_llhttp__internal__n_invoke_update_header_state_1;
}
/* UNREACHABLE */;
abort();
}
s_n_llhttp__internal__n_invoke_or_flags_10: {
switch (llhttp__internal__c_or_flags_6(state, p, endp)) {
default:
goto s_n_llhttp__internal__n_header_field_start;
}
/* UNREACHABLE */;
abort();
}
s_n_llhttp__internal__n_invoke_load_header_state_3: {
switch (llhttp__internal__c_load_header_state(state, p, endp)) {
case 5:
goto s_n_llhttp__internal__n_invoke_or_flags_7;
case 6:
goto s_n_llhttp__internal__n_invoke_or_flags_8;
case 7:
goto s_n_llhttp__internal__n_invoke_or_flags_9;
case 8:
goto s_n_llhttp__internal__n_invoke_or_flags_10;
default:
goto s_n_llhttp__internal__n_header_field_start;
}
/* UNREACHABLE */;
abort();
}
s_n_llhttp__internal__n_error_17: {
state->error = 0x3;
state->reason = "Missing expected LF after header value";
state->error_pos = (const char*) p;
state->_current = (void*) (intptr_t) s_error;
return s_error;
/* UNREACHABLE */;
abort();
}
s_n_llhttp__internal__n_span_end_llhttp__on_header_value_1: {
const unsigned char* start;
int err;
start = state->_span_pos0;
state->_span_pos0 = NULL;
err = llhttp__on_header_value(state, start, p);
if (err != 0) {
state->error = err;
state->error_pos = (const char*) p;
state->_current = (void*) (intptr_t) s_n_llhttp__internal__n_header_value_almost_done;
return s_error;
}
goto s_n_llhttp__internal__n_header_value_almost_done;
/* UNREACHABLE */;
abort();
}
s_n_llhttp__internal__n_span_end_llhttp__on_header_value_2: {
const unsigned char* start;
int err;
start = state->_span_pos0;
state->_span_pos0 = NULL;
err = llhttp__on_header_value(state, start, p);
if (err != 0) {
state->error = err;
state->error_pos = (const char*) (p + 1);
state->_current = (void*) (intptr_t) s_n_llhttp__internal__n_header_value_almost_done;
return s_error;
}
p++;
goto s_n_llhttp__internal__n_header_value_almost_done;
/* UNREACHABLE */;
abort();
}
s_n_llhttp__internal__n_span_end_llhttp__on_header_value_3: {
const unsigned char* start;
int err;
start = state->_span_pos0;
state->_span_pos0 = NULL;
err = llhttp__on_header_value(state, start, p);
if (err != 0) {
state->error = err;
state->error_pos = (const char*) (p + 1);
state->_current = (void*) (intptr_t) s_n_llhttp__internal__n_header_value_almost_done;
return s_error;
}
p++;
goto s_n_llhttp__internal__n_header_value_almost_done;
/* UNREACHABLE */;
abort();
}
s_n_llhttp__internal__n_error_18: {
state->error = 0xa;
state->reason = "Invalid header value char";
state->error_pos = (const char*) p;
state->_current = (void*) (intptr_t) s_error;
return s_error;
/* UNREACHABLE */;
abort();
}
s_n_llhttp__internal__n_invoke_test_flags_5: {
switch (llhttp__internal__c_test_flags_2(state, p, endp)) {
case 1:
goto s_n_llhttp__internal__n_header_value_lenient;
default:
goto s_n_llhttp__internal__n_error_18;
}
/* UNREACHABLE */;
abort();
}
s_n_llhttp__internal__n_invoke_update_header_state_3: {
switch (llhttp__internal__c_update_header_state(state, p, endp)) {
default:
goto s_n_llhttp__internal__n_header_value_connection;
}
/* UNREACHABLE */;
abort();
}
s_n_llhttp__internal__n_invoke_or_flags_11: {
switch (llhttp__internal__c_or_flags_3(state, p, endp)) {
default:
goto s_n_llhttp__internal__n_invoke_update_header_state_3;
}
/* UNREACHABLE */;
abort();
}
s_n_llhttp__internal__n_invoke_or_flags_12: {
switch (llhttp__internal__c_or_flags_4(state, p, endp)) {
default:
goto s_n_llhttp__internal__n_invoke_update_header_state_3;
}
/* UNREACHABLE */;
abort();
}
s_n_llhttp__internal__n_invoke_or_flags_13: {
switch (llhttp__internal__c_or_flags_5(state, p, endp)) {
default:
goto s_n_llhttp__internal__n_invoke_update_header_state_3;
}
/* UNREACHABLE */;
abort();
}
s_n_llhttp__internal__n_invoke_or_flags_14: {
switch (llhttp__internal__c_or_flags_6(state, p, endp)) {
default:
goto s_n_llhttp__internal__n_header_value_connection;
}
/* UNREACHABLE */;
abort();
}
s_n_llhttp__internal__n_invoke_load_header_state_4: {
switch (llhttp__internal__c_load_header_state(state, p, endp)) {
case 5:
goto s_n_llhttp__internal__n_invoke_or_flags_11;
case 6:
goto s_n_llhttp__internal__n_invoke_or_flags_12;
case 7:
goto s_n_llhttp__internal__n_invoke_or_flags_13;
case 8:
goto s_n_llhttp__internal__n_invoke_or_flags_14;
default:
goto s_n_llhttp__internal__n_header_value_connection;
}
/* UNREACHABLE */;
abort();
}
s_n_llhttp__internal__n_invoke_update_header_state_4: {
switch (llhttp__internal__c_update_header_state_4(state, p, endp)) {
default:
goto s_n_llhttp__internal__n_header_value_connection_token;
}
/* UNREACHABLE */;
abort();
}
s_n_llhttp__internal__n_invoke_update_header_state_2: {
switch (llhttp__internal__c_update_header_state_2(state, p, endp)) {
default:
goto s_n_llhttp__internal__n_header_value_connection_ws;
}
/* UNREACHABLE */;
abort();
}
s_n_llhttp__internal__n_invoke_update_header_state_5: {
switch (llhttp__internal__c_update_header_state_5(state, p, endp)) {
default:
goto s_n_llhttp__internal__n_header_value_connection_ws;
}
/* UNREACHABLE */;
abort();
}
s_n_llhttp__internal__n_invoke_update_header_state_6: {
switch (llhttp__internal__c_update_header_state_6(state, p, endp)) {
default:
goto s_n_llhttp__internal__n_header_value_connection_ws;
}
/* UNREACHABLE */;
abort();
}
s_n_llhttp__internal__n_span_end_llhttp__on_header_value_4: {
const unsigned char* start;
int err;
start = state->_span_pos0;
state->_span_pos0 = NULL;
err = llhttp__on_header_value(state, start, p);
if (err != 0) {
state->error = err;
state->error_pos = (const char*) p;
state->_current = (void*) (intptr_t) s_n_llhttp__internal__n_error_20;
return s_error;
}
goto s_n_llhttp__internal__n_error_20;
/* UNREACHABLE */;
abort();
}
s_n_llhttp__internal__n_invoke_mul_add_content_length_1: {
switch (llhttp__internal__c_mul_add_content_length_1(state, p, endp, match)) {
case 1:
goto s_n_llhttp__internal__n_span_end_llhttp__on_header_value_4;
default:
goto s_n_llhttp__internal__n_header_value_content_length;
}
/* UNREACHABLE */;
abort();
}
s_n_llhttp__internal__n_invoke_or_flags_15: {
switch (llhttp__internal__c_or_flags_15(state, p, endp)) {
default:
goto s_n_llhttp__internal__n_header_value_otherwise;
}
/* UNREACHABLE */;
abort();
}
s_n_llhttp__internal__n_span_end_llhttp__on_header_value_5: {
const unsigned char* start;
int err;
start = state->_span_pos0;
state->_span_pos0 = NULL;
err = llhttp__on_header_value(state, start, p);
if (err != 0) {
state->error = err;
state->error_pos = (const char*) p;
state->_current = (void*) (intptr_t) s_n_llhttp__internal__n_error_21;
return s_error;
}
goto s_n_llhttp__internal__n_error_21;
/* UNREACHABLE */;
abort();
}
s_n_llhttp__internal__n_error_19: {
state->error = 0x4;
state->reason = "Duplicate Content-Length";
state->error_pos = (const char*) p;
state->_current = (void*) (intptr_t) s_error;
return s_error;
/* UNREACHABLE */;
abort();
}
s_n_llhttp__internal__n_invoke_test_flags_6: {
switch (llhttp__internal__c_test_flags_6(state, p, endp)) {
case 0:
goto s_n_llhttp__internal__n_header_value_content_length;
default:
goto s_n_llhttp__internal__n_error_19;
}
/* UNREACHABLE */;
abort();
}
s_n_llhttp__internal__n_invoke_update_header_state_7: {
switch (llhttp__internal__c_update_header_state_7(state, p, endp)) {
default:
goto s_n_llhttp__internal__n_header_value_otherwise;
}
/* UNREACHABLE */;
abort();
}
s_n_llhttp__internal__n_invoke_update_header_state_8: {
switch (llhttp__internal__c_update_header_state_4(state, p, endp)) {
default:
goto s_n_llhttp__internal__n_header_value;
}
/* UNREACHABLE */;
abort();
}
s_n_llhttp__internal__n_invoke_and_flags: {
switch (llhttp__internal__c_and_flags(state, p, endp)) {
default:
goto s_n_llhttp__internal__n_header_value_te_chunked;
}
/* UNREACHABLE */;
abort();
}
s_n_llhttp__internal__n_invoke_or_flags_16: {
switch (llhttp__internal__c_or_flags_16(state, p, endp)) {
default:
goto s_n_llhttp__internal__n_invoke_and_flags;
}
/* UNREACHABLE */;
abort();
}
s_n_llhttp__internal__n_invoke_or_flags_17: {
switch (llhttp__internal__c_or_flags_17(state, p, endp)) {
default:
goto s_n_llhttp__internal__n_invoke_update_header_state_8;
}
/* UNREACHABLE */;
abort();
}
s_n_llhttp__internal__n_invoke_load_header_state_2: {
switch (llhttp__internal__c_load_header_state(state, p, endp)) {
case 1:
goto s_n_llhttp__internal__n_header_value_connection;
case 2:
goto s_n_llhttp__internal__n_invoke_test_flags_6;
case 3:
goto s_n_llhttp__internal__n_invoke_or_flags_16;
case 4:
goto s_n_llhttp__internal__n_invoke_or_flags_17;
default:
goto s_n_llhttp__internal__n_header_value;
}
/* UNREACHABLE */;
abort();
}
s_n_llhttp__internal__n_span_end_llhttp__on_header_field_1: {
const unsigned char* start;
int err;
start = state->_span_pos0;
state->_span_pos0 = NULL;
err = llhttp__on_header_field(state, start, p);
if (err != 0) {
state->error = err;
state->error_pos = (const char*) (p + 1);
state->_current = (void*) (intptr_t) s_n_llhttp__internal__n_header_value_discard_ws;
return s_error;
}
p++;
goto s_n_llhttp__internal__n_header_value_discard_ws;
/* UNREACHABLE */;
abort();
}
s_n_llhttp__internal__n_span_end_llhttp__on_header_field_2: {
const unsigned char* start;
int err;
start = state->_span_pos0;
state->_span_pos0 = NULL;
err = llhttp__on_header_field(state, start, p);
if (err != 0) {
state->error = err;
state->error_pos = (const char*) (p + 1);
state->_current = (void*) (intptr_t) s_n_llhttp__internal__n_header_value_discard_ws;
return s_error;
}
p++;
goto s_n_llhttp__internal__n_header_value_discard_ws;
/* UNREACHABLE */;
abort();
}
s_n_llhttp__internal__n_error_22: {
state->error = 0xa;
state->reason = "Invalid header token";
state->error_pos = (const char*) p;
state->_current = (void*) (intptr_t) s_error;
return s_error;
/* UNREACHABLE */;
abort();
}
s_n_llhttp__internal__n_invoke_update_header_state_9: {
switch (llhttp__internal__c_update_header_state_4(state, p, endp)) {
default:
goto s_n_llhttp__internal__n_header_field_general;
}
/* UNREACHABLE */;
abort();
}
s_n_llhttp__internal__n_invoke_store_header_state: {
switch (llhttp__internal__c_store_header_state(state, p, endp, match)) {
default:
goto s_n_llhttp__internal__n_header_field_colon;
}
/* UNREACHABLE */;
abort();
}
s_n_llhttp__internal__n_invoke_update_header_state_10: {
switch (llhttp__internal__c_update_header_state_4(state, p, endp)) {
default:
goto s_n_llhttp__internal__n_header_field_general;
}
/* UNREACHABLE */;
abort();
}
s_n_llhttp__internal__n_invoke_update_http_minor: {
switch (llhttp__internal__c_update_http_minor(state, p, endp)) {
default:
goto s_n_llhttp__internal__n_header_field_start;
}
/* UNREACHABLE */;
abort();
}
s_n_llhttp__internal__n_invoke_update_http_major: {
switch (llhttp__internal__c_update_http_major(state, p, endp)) {
default:
goto s_n_llhttp__internal__n_invoke_update_http_minor;
}
/* UNREACHABLE */;
abort();
}
s_n_llhttp__internal__n_span_end_llhttp__on_url_3: {
const unsigned char* start;
int err;
start = state->_span_pos0;
state->_span_pos0 = NULL;
err = llhttp__on_url(state, start, p);
if (err != 0) {
state->error = err;
state->error_pos = (const char*) p;
state->_current = (void*) (intptr_t) s_n_llhttp__internal__n_url_skip_to_http09;
return s_error;
}
goto s_n_llhttp__internal__n_url_skip_to_http09;
/* UNREACHABLE */;
abort();
}
s_n_llhttp__internal__n_error_23: {
state->error = 0x7;
state->reason = "Expected CRLF";
state->error_pos = (const char*) p;
state->_current = (void*) (intptr_t) s_error;
return s_error;
/* UNREACHABLE */;
abort();
}
s_n_llhttp__internal__n_span_end_llhttp__on_url_4: {
const unsigned char* start;
int err;
start = state->_span_pos0;
state->_span_pos0 = NULL;
err = llhttp__on_url(state, start, p);
if (err != 0) {
state->error = err;
state->error_pos = (const char*) p;
state->_current = (void*) (intptr_t) s_n_llhttp__internal__n_url_skip_lf_to_http09;
return s_error;
}
goto s_n_llhttp__internal__n_url_skip_lf_to_http09;
/* UNREACHABLE */;
abort();
}
s_n_llhttp__internal__n_error_24: {
state->error = 0x9;
state->reason = "Expected CRLF after version";
state->error_pos = (const char*) p;
state->_current = (void*) (intptr_t) s_error;
return s_error;
/* UNREACHABLE */;
abort();
}
s_n_llhttp__internal__n_invoke_store_http_minor: {
switch (llhttp__internal__c_store_http_minor(state, p, endp, match)) {
default:
goto s_n_llhttp__internal__n_req_http_end;
}
/* UNREACHABLE */;
abort();
}
s_n_llhttp__internal__n_error_25: {
state->error = 0x9;
state->reason = "Invalid minor version";
state->error_pos = (const char*) p;
state->_current = (void*) (intptr_t) s_error;
return s_error;
/* UNREACHABLE */;
abort();
}
s_n_llhttp__internal__n_error_26: {
state->error = 0x9;
state->reason = "Expected dot";
state->error_pos = (const char*) p;
state->_current = (void*) (intptr_t) s_error;
return s_error;
/* UNREACHABLE */;
abort();
}
s_n_llhttp__internal__n_invoke_store_http_major: {
switch (llhttp__internal__c_store_http_major(state, p, endp, match)) {
default:
goto s_n_llhttp__internal__n_req_http_dot;
}
/* UNREACHABLE */;
abort();
}
s_n_llhttp__internal__n_error_27: {
state->error = 0x9;
state->reason = "Invalid major version";
state->error_pos = (const char*) p;
state->_current = (void*) (intptr_t) s_error;
return s_error;
/* UNREACHABLE */;
abort();
}
s_n_llhttp__internal__n_error_29: {
state->error = 0x8;
state->reason = "Expected HTTP/";
state->error_pos = (const char*) p;
state->_current = (void*) (intptr_t) s_error;
return s_error;
/* UNREACHABLE */;
abort();
}
s_n_llhttp__internal__n_error_28: {
state->error = 0x8;
state->reason = "Expected SOURCE method for ICE/x.x request";
state->error_pos = (const char*) p;
state->_current = (void*) (intptr_t) s_error;
return s_error;
/* UNREACHABLE */;
abort();
}
s_n_llhttp__internal__n_invoke_is_equal_method_1: {
switch (llhttp__internal__c_is_equal_method_1(state, p, endp)) {
case 0:
goto s_n_llhttp__internal__n_error_28;
default:
goto s_n_llhttp__internal__n_req_http_major;
}
/* UNREACHABLE */;
abort();
}
s_n_llhttp__internal__n_span_end_llhttp__on_url_5: {
const unsigned char* start;
int err;
start = state->_span_pos0;
state->_span_pos0 = NULL;
err = llhttp__on_url(state, start, p);
if (err != 0) {
state->error = err;
state->error_pos = (const char*) p;
state->_current = (void*) (intptr_t) s_n_llhttp__internal__n_url_skip_to_http;
return s_error;
}
goto s_n_llhttp__internal__n_url_skip_to_http;
/* UNREACHABLE */;
abort();
}
s_n_llhttp__internal__n_span_end_llhttp__on_url_6: {
const unsigned char* start;
int err;
start = state->_span_pos0;
state->_span_pos0 = NULL;
err = llhttp__on_url(state, start, p);
if (err != 0) {
state->error = err;
state->error_pos = (const char*) p;
state->_current = (void*) (intptr_t) s_n_llhttp__internal__n_url_skip_to_http09;
return s_error;
}
goto s_n_llhttp__internal__n_url_skip_to_http09;
/* UNREACHABLE */;
abort();
}
s_n_llhttp__internal__n_span_end_llhttp__on_url_7: {
const unsigned char* start;
int err;
start = state->_span_pos0;
state->_span_pos0 = NULL;
err = llhttp__on_url(state, start, p);
if (err != 0) {
state->error = err;
state->error_pos = (const char*) p;
state->_current = (void*) (intptr_t) s_n_llhttp__internal__n_url_skip_lf_to_http09;
return s_error;
}
goto s_n_llhttp__internal__n_url_skip_lf_to_http09;
/* UNREACHABLE */;
abort();
}
s_n_llhttp__internal__n_span_end_llhttp__on_url_8: {
const unsigned char* start;
int err;
start = state->_span_pos0;
state->_span_pos0 = NULL;
err = llhttp__on_url(state, start, p);
if (err != 0) {
state->error = err;
state->error_pos = (const char*) p;
state->_current = (void*) (intptr_t) s_n_llhttp__internal__n_url_skip_to_http;
return s_error;
}
goto s_n_llhttp__internal__n_url_skip_to_http;
/* UNREACHABLE */;
abort();
}
s_n_llhttp__internal__n_error_30: {
state->error = 0x7;
state->reason = "Invalid char in url fragment start";
state->error_pos = (const char*) p;
state->_current = (void*) (intptr_t) s_error;
return s_error;
/* UNREACHABLE */;
abort();
}
s_n_llhttp__internal__n_span_end_llhttp__on_url_9: {
const unsigned char* start;
int err;
start = state->_span_pos0;
state->_span_pos0 = NULL;
err = llhttp__on_url(state, start, p);
if (err != 0) {
state->error = err;
state->error_pos = (const char*) p;
state->_current = (void*) (intptr_t) s_n_llhttp__internal__n_url_skip_to_http09;
return s_error;
}
goto s_n_llhttp__internal__n_url_skip_to_http09;
/* UNREACHABLE */;
abort();
}
s_n_llhttp__internal__n_span_end_llhttp__on_url_10: {
const unsigned char* start;
int err;
start = state->_span_pos0;
state->_span_pos0 = NULL;
err = llhttp__on_url(state, start, p);
if (err != 0) {
state->error = err;
state->error_pos = (const char*) p;
state->_current = (void*) (intptr_t) s_n_llhttp__internal__n_url_skip_lf_to_http09;
return s_error;
}
goto s_n_llhttp__internal__n_url_skip_lf_to_http09;
/* UNREACHABLE */;
abort();
}
s_n_llhttp__internal__n_span_end_llhttp__on_url_11: {
const unsigned char* start;
int err;
start = state->_span_pos0;
state->_span_pos0 = NULL;
err = llhttp__on_url(state, start, p);
if (err != 0) {
state->error = err;
state->error_pos = (const char*) p;
state->_current = (void*) (intptr_t) s_n_llhttp__internal__n_url_skip_to_http;
return s_error;
}
goto s_n_llhttp__internal__n_url_skip_to_http;
/* UNREACHABLE */;
abort();
}
s_n_llhttp__internal__n_error_31: {
state->error = 0x7;
state->reason = "Invalid char in url query";
state->error_pos = (const char*) p;
state->_current = (void*) (intptr_t) s_error;
return s_error;
/* UNREACHABLE */;
abort();
}
s_n_llhttp__internal__n_error_32: {
state->error = 0x7;
state->reason = "Invalid char in url path";
state->error_pos = (const char*) p;
state->_current = (void*) (intptr_t) s_error;
return s_error;
/* UNREACHABLE */;
abort();
}
s_n_llhttp__internal__n_span_end_llhttp__on_url: {
const unsigned char* start;
int err;
start = state->_span_pos0;
state->_span_pos0 = NULL;
err = llhttp__on_url(state, start, p);
if (err != 0) {
state->error = err;
state->error_pos = (const char*) p;
state->_current = (void*) (intptr_t) s_n_llhttp__internal__n_url_skip_to_http09;
return s_error;
}
goto s_n_llhttp__internal__n_url_skip_to_http09;
/* UNREACHABLE */;
abort();
}
s_n_llhttp__internal__n_span_end_llhttp__on_url_1: {
const unsigned char* start;
int err;
start = state->_span_pos0;
state->_span_pos0 = NULL;
err = llhttp__on_url(state, start, p);
if (err != 0) {
state->error = err;
state->error_pos = (const char*) p;
state->_current = (void*) (intptr_t) s_n_llhttp__internal__n_url_skip_lf_to_http09;
return s_error;
}
goto s_n_llhttp__internal__n_url_skip_lf_to_http09;
/* UNREACHABLE */;
abort();
}
s_n_llhttp__internal__n_span_end_llhttp__on_url_2: {
const unsigned char* start;
int err;
start = state->_span_pos0;
state->_span_pos0 = NULL;
err = llhttp__on_url(state, start, p);
if (err != 0) {
state->error = err;
state->error_pos = (const char*) p;
state->_current = (void*) (intptr_t) s_n_llhttp__internal__n_url_skip_to_http;
return s_error;
}
goto s_n_llhttp__internal__n_url_skip_to_http;
/* UNREACHABLE */;
abort();
}
s_n_llhttp__internal__n_span_end_llhttp__on_url_12: {
const unsigned char* start;
int err;
start = state->_span_pos0;
state->_span_pos0 = NULL;
err = llhttp__on_url(state, start, p);
if (err != 0) {
state->error = err;
state->error_pos = (const char*) p;
state->_current = (void*) (intptr_t) s_n_llhttp__internal__n_url_skip_to_http09;
return s_error;
}
goto s_n_llhttp__internal__n_url_skip_to_http09;
/* UNREACHABLE */;
abort();
}
s_n_llhttp__internal__n_span_end_llhttp__on_url_13: {
const unsigned char* start;
int err;
start = state->_span_pos0;
state->_span_pos0 = NULL;
err = llhttp__on_url(state, start, p);
if (err != 0) {
state->error = err;
state->error_pos = (const char*) p;
state->_current = (void*) (intptr_t) s_n_llhttp__internal__n_url_skip_lf_to_http09;
return s_error;
}
goto s_n_llhttp__internal__n_url_skip_lf_to_http09;
/* UNREACHABLE */;
abort();
}
s_n_llhttp__internal__n_span_end_llhttp__on_url_14: {
const unsigned char* start;
int err;
start = state->_span_pos0;
state->_span_pos0 = NULL;
err = llhttp__on_url(state, start, p);
if (err != 0) {
state->error = err;
state->error_pos = (const char*) p;
state->_current = (void*) (intptr_t) s_n_llhttp__internal__n_url_skip_to_http;
return s_error;
}
goto s_n_llhttp__internal__n_url_skip_to_http;
/* UNREACHABLE */;
abort();
}
s_n_llhttp__internal__n_error_33: {
state->error = 0x7;
state->reason = "Double @ in url";
state->error_pos = (const char*) p;
state->_current = (void*) (intptr_t) s_error;
return s_error;
/* UNREACHABLE */;
abort();
}
s_n_llhttp__internal__n_error_34: {
state->error = 0x7;
state->reason = "Unexpected char in url server";
state->error_pos = (const char*) p;
state->_current = (void*) (intptr_t) s_error;
return s_error;
/* UNREACHABLE */;
abort();
}
s_n_llhttp__internal__n_error_35: {
state->error = 0x7;
state->reason = "Unexpected char in url server";
state->error_pos = (const char*) p;
state->_current = (void*) (intptr_t) s_error;
return s_error;
/* UNREACHABLE */;
abort();
}
s_n_llhttp__internal__n_error_37: {
state->error = 0x7;
state->reason = "Unexpected char in url schema";
state->error_pos = (const char*) p;
state->_current = (void*) (intptr_t) s_error;
return s_error;
/* UNREACHABLE */;
abort();
}
s_n_llhttp__internal__n_error_38: {
state->error = 0x7;
state->reason = "Unexpected char in url schema";
state->error_pos = (const char*) p;
state->_current = (void*) (intptr_t) s_error;
return s_error;
/* UNREACHABLE */;
abort();
}
s_n_llhttp__internal__n_error_39: {
state->error = 0x7;
state->reason = "Unexpected start char in url";
state->error_pos = (const char*) p;
state->_current = (void*) (intptr_t) s_error;
return s_error;
/* UNREACHABLE */;
abort();
}
s_n_llhttp__internal__n_invoke_is_equal_method: {
switch (llhttp__internal__c_is_equal_method(state, p, endp)) {
case 0:
goto s_n_llhttp__internal__n_span_start_llhttp__on_url_1;
default:
goto s_n_llhttp__internal__n_span_start_llhttp__on_url;
}
/* UNREACHABLE */;
abort();
}
s_n_llhttp__internal__n_error_40: {
state->error = 0x6;
state->reason = "Expected space after method";
state->error_pos = (const char*) p;
state->_current = (void*) (intptr_t) s_error;
return s_error;
/* UNREACHABLE */;
abort();
}
s_n_llhttp__internal__n_invoke_store_method_1: {
switch (llhttp__internal__c_store_method(state, p, endp, match)) {
default:
goto s_n_llhttp__internal__n_req_first_space_before_url;
}
/* UNREACHABLE */;
abort();
}
s_n_llhttp__internal__n_error_48: {
state->error = 0x6;
state->reason = "Invalid method encountered";
state->error_pos = (const char*) p;
state->_current = (void*) (intptr_t) s_error;
return s_error;
/* UNREACHABLE */;
abort();
}
s_n_llhttp__internal__n_error_41: {
state->error = 0xd;
state->reason = "Response overflow";
state->error_pos = (const char*) p;
state->_current = (void*) (intptr_t) s_error;
return s_error;
/* UNREACHABLE */;
abort();
}
s_n_llhttp__internal__n_invoke_mul_add_status_code: {
switch (llhttp__internal__c_mul_add_status_code(state, p, endp, match)) {
case 1:
goto s_n_llhttp__internal__n_error_41;
default:
goto s_n_llhttp__internal__n_res_status_code;
}
/* UNREACHABLE */;
abort();
}
s_n_llhttp__internal__n_span_end_llhttp__on_status: {
const unsigned char* start;
int err;
start = state->_span_pos0;
state->_span_pos0 = NULL;
err = llhttp__on_status(state, start, p);
if (err != 0) {
state->error = err;
state->error_pos = (const char*) (p + 1);
state->_current = (void*) (intptr_t) s_n_llhttp__internal__n_header_field_start;
return s_error;
}
p++;
goto s_n_llhttp__internal__n_header_field_start;
/* UNREACHABLE */;
abort();
}
s_n_llhttp__internal__n_span_end_llhttp__on_status_1: {
const unsigned char* start;
int err;
start = state->_span_pos0;
state->_span_pos0 = NULL;
err = llhttp__on_status(state, start, p);
if (err != 0) {
state->error = err;
state->error_pos = (const char*) (p + 1);
state->_current = (void*) (intptr_t) s_n_llhttp__internal__n_res_line_almost_done;
return s_error;
}
p++;
goto s_n_llhttp__internal__n_res_line_almost_done;
/* UNREACHABLE */;
abort();
}
s_n_llhttp__internal__n_error_42: {
state->error = 0xd;
state->reason = "Invalid response status";
state->error_pos = (const char*) p;
state->_current = (void*) (intptr_t) s_error;
return s_error;
/* UNREACHABLE */;
abort();
}
s_n_llhttp__internal__n_invoke_update_status_code: {
switch (llhttp__internal__c_update_status_code(state, p, endp)) {
default:
goto s_n_llhttp__internal__n_res_status_code;
}
/* UNREACHABLE */;
abort();
}
s_n_llhttp__internal__n_error_43: {
state->error = 0x9;
state->reason = "Expected space after version";
state->error_pos = (const char*) p;
state->_current = (void*) (intptr_t) s_error;
return s_error;
/* UNREACHABLE */;
abort();
}
s_n_llhttp__internal__n_invoke_store_http_minor_1: {
switch (llhttp__internal__c_store_http_minor(state, p, endp, match)) {
default:
goto s_n_llhttp__internal__n_res_http_end;
}
/* UNREACHABLE */;
abort();
}
s_n_llhttp__internal__n_error_44: {
state->error = 0x9;
state->reason = "Invalid minor version";
state->error_pos = (const char*) p;
state->_current = (void*) (intptr_t) s_error;
return s_error;
/* UNREACHABLE */;
abort();
}
s_n_llhttp__internal__n_error_45: {
state->error = 0x9;
state->reason = "Expected dot";
state->error_pos = (const char*) p;
state->_current = (void*) (intptr_t) s_error;
return s_error;
/* UNREACHABLE */;
abort();
}
s_n_llhttp__internal__n_invoke_store_http_major_1: {
switch (llhttp__internal__c_store_http_major(state, p, endp, match)) {
default:
goto s_n_llhttp__internal__n_res_http_dot;
}
/* UNREACHABLE */;
abort();
}
s_n_llhttp__internal__n_error_46: {
state->error = 0x9;
state->reason = "Invalid major version";
state->error_pos = (const char*) p;
state->_current = (void*) (intptr_t) s_error;
return s_error;
/* UNREACHABLE */;
abort();
}
s_n_llhttp__internal__n_error_49: {
state->error = 0x8;
state->reason = "Expected HTTP/";
state->error_pos = (const char*) p;
state->_current = (void*) (intptr_t) s_error;
return s_error;
/* UNREACHABLE */;
abort();
}
s_n_llhttp__internal__n_invoke_update_type: {
switch (llhttp__internal__c_update_type(state, p, endp)) {
default:
goto s_n_llhttp__internal__n_req_first_space_before_url;
}
/* UNREACHABLE */;
abort();
}
s_n_llhttp__internal__n_invoke_store_method: {
switch (llhttp__internal__c_store_method(state, p, endp, match)) {
default:
goto s_n_llhttp__internal__n_invoke_update_type;
}
/* UNREACHABLE */;
abort();
}
s_n_llhttp__internal__n_error_47: {
state->error = 0x8;
state->reason = "Invalid word encountered";
state->error_pos = (const char*) p;
state->_current = (void*) (intptr_t) s_error;
return s_error;
/* UNREACHABLE */;
abort();
}
s_n_llhttp__internal__n_invoke_update_type_1: {
switch (llhttp__internal__c_update_type_1(state, p, endp)) {
default:
goto s_n_llhttp__internal__n_res_http_major;
}
/* UNREACHABLE */;
abort();
}
s_n_llhttp__internal__n_invoke_update_type_2: {
switch (llhttp__internal__c_update_type(state, p, endp)) {
default:
goto s_n_llhttp__internal__n_start_req;
}
/* UNREACHABLE */;
abort();
}
s_n_llhttp__internal__n_pause_8: {
state->error = 0x15;
state->reason = "on_message_begin pause";
state->error_pos = (const char*) p;
state->_current = (void*) (intptr_t) s_n_llhttp__internal__n_invoke_load_type;
return s_error;
/* UNREACHABLE */;
abort();
}
s_n_llhttp__internal__n_error: {
state->error = 0x10;
state->reason = "`on_message_begin` callback error";
state->error_pos = (const char*) p;
state->_current = (void*) (intptr_t) s_error;
return s_error;
/* UNREACHABLE */;
abort();
}
s_n_llhttp__internal__n_invoke_llhttp__on_message_begin: {
switch (llhttp__on_message_begin(state, p, endp)) {
case 0:
goto s_n_llhttp__internal__n_invoke_load_type;
case 21:
goto s_n_llhttp__internal__n_pause_8;
default:
goto s_n_llhttp__internal__n_error;
}
/* UNREACHABLE */;
abort();
}
s_n_llhttp__internal__n_invoke_update_finish: {
switch (llhttp__internal__c_update_finish(state, p, endp)) {
default:
goto s_n_llhttp__internal__n_invoke_llhttp__on_message_begin;
}
/* UNREACHABLE */;
abort();
}
} | 1 | [
"CWE-444"
]
| node | da0fda0fe81d372e24c0cb11aec37534985708dd | 319,614,150,588,660,300,000,000,000,000,000,000,000 | 5,678 | http: stricter Transfer-Encoding and header separator parsing
Reviewed-By: Matteo Collina <[email protected]>
Reviewed-By: James M Snell <[email protected]>
Reviewed-By: Rich Trott <[email protected]>
Reviewed-By: Vladimir de Turckheim <[email protected]>
PR-URL: #315
Backport-PR-URL: #327
CVE-ID: CVE-2022-32215,CVE-2022-32214,CVE-2022-32213 |
static int32_t ahci_dma_prepare_buf(IDEDMA *dma, int is_write)
{
AHCIDevice *ad = DO_UPCAST(AHCIDevice, dma, dma);
IDEState *s = &ad->port.ifs[0];
if (ahci_populate_sglist(ad, &s->sg, s->io_buffer_offset) == -1) {
DPRINTF(ad->port_no, "ahci_dma_prepare_buf failed.\n");
return -1;
}
s->io_buffer_size = s->sg.size;
DPRINTF(ad->port_no, "len=%#x\n", s->io_buffer_size);
return s->io_buffer_size;
} | 0 | [
"CWE-399"
]
| qemu | 3251bdcf1c67427d964517053c3d185b46e618e8 | 223,371,965,855,669,550,000,000,000,000,000,000,000 | 14 | ide: Correct handling of malformed/short PRDTs
This impacts both BMDMA and AHCI HBA interfaces for IDE.
Currently, we confuse the difference between a PRDT having
"0 bytes" and a PRDT having "0 complete sectors."
When we receive an incomplete sector, inconsistent error checking
leads to an infinite loop wherein the call succeeds, but it
didn't give us enough bytes -- leading us to re-call the
DMA chain over and over again. This leads to, in the BMDMA case,
leaked memory for short PRDTs, and infinite loops and resource
usage in the AHCI case.
The .prepare_buf() callback is reworked to return the number of
bytes that it successfully prepared. 0 is a valid, non-error
answer that means the table was empty and described no bytes.
-1 indicates an error.
Our current implementation uses the io_buffer in IDEState to
ultimately describe the size of a prepared scatter-gather list.
Even though the AHCI PRDT/SGList can be as large as 256GiB, the
AHCI command header limits transactions to just 4GiB. ATA8-ACS3,
however, defines the largest transaction to be an LBA48 command
that transfers 65,536 sectors. With a 512 byte sector size, this
is just 32MiB.
Since our current state structures use the int type to describe
the size of the buffer, and this state is migrated as int32, we
are limited to describing 2GiB buffer sizes unless we change the
migration protocol.
For this reason, this patch begins to unify the assertions in the
IDE pathways that the scatter-gather list provided by either the
AHCI PRDT or the PCI BMDMA PRDs can only describe, at a maximum,
2GiB. This should be resilient enough unless we need a sector
size that exceeds 32KiB.
Further, the likelihood of any guest operating system actually
attempting to transfer this much data in a single operation is
very slim.
To this end, the IDEState variables have been updated to more
explicitly clarify our maximum supported size. Callers to the
prepare_buf callback have been reworked to understand the new
return code, and all versions of the prepare_buf callback have
been adjusted accordingly.
Lastly, the ahci_populate_sglist helper, relied upon by the
AHCI implementation of .prepare_buf() as well as the PCI
implementation of the callback have had overflow assertions
added to help make clear the reasonings behind the various
type changes.
[Added %d -> %"PRId64" fix John sent because off_pos changed from int to
int64_t.
--Stefan]
Signed-off-by: John Snow <[email protected]>
Reviewed-by: Paolo Bonzini <[email protected]>
Message-id: [email protected]
Signed-off-by: Stefan Hajnoczi <[email protected]> |
MONGO_EXPORT int bson_append_regex( bson *b, const char *name, const char *pattern, const char *opts ) {
const int plen = strlen( pattern )+1;
const int olen = strlen( opts )+1;
if ( bson_append_estart( b, BSON_REGEX, name, plen + olen ) == BSON_ERROR )
return BSON_ERROR;
if ( bson_check_string( b, pattern, plen - 1 ) == BSON_ERROR )
return BSON_ERROR;
bson_append( b , pattern , plen );
bson_append( b , opts , olen );
return BSON_OK;
} | 1 | [
"CWE-190"
]
| mongo-c-driver-legacy | 1a1f5e26a4309480d88598913f9eebf9e9cba8ca | 286,092,247,857,024,780,000,000,000,000,000,000,000 | 11 | don't mix up int and size_t (first pass to fix that) |
static int sample_rate_idx (int rate)
{
if (92017 <= rate) return 0;
else if (75132 <= rate) return 1;
else if (55426 <= rate) return 2;
else if (46009 <= rate) return 3;
else if (37566 <= rate) return 4;
else if (27713 <= rate) return 5;
else if (23004 <= rate) return 6;
else if (18783 <= rate) return 7;
else if (13856 <= rate) return 8;
else if (11502 <= rate) return 9;
else if (9391 <= rate) return 10;
else return 11;
} | 0 | [
"CWE-703"
]
| FFmpeg | 6e42ccb9dbc13836cd52cda594f819d17af9afa2 | 84,070,733,163,042,300,000,000,000,000,000,000,000 | 15 | avcodec/aacdec: Fix pulse position checks in decode_pulses()
Fixes out of array read
Fixes: asan_static-oob_1efed25_1887_cov_2013541199_HeyYa_RA10_AAC_192K_30s.rm
Found-by: Mateusz "j00ru" Jurczyk and Gynvael Coldwind
Signed-off-by: Michael Niedermayer <[email protected]> |
size_t qpdf_get_buffer_length(qpdf_data qpdf)
{
qpdf_get_buffer_internal(qpdf);
size_t result = 0;
if (qpdf->output_buffer)
{
result = qpdf->output_buffer->getSize();
}
return result;
} | 0 | [
"CWE-787"
]
| qpdf | d71f05ca07eb5c7cfa4d6d23e5c1f2a800f52e8e | 40,234,429,979,669,373,000,000,000,000,000,000,000 | 10 | Fix sign and conversion warnings (major)
This makes all integer type conversions that have potential data loss
explicit with calls that do range checks and raise an exception. After
this commit, qpdf builds with no warnings when -Wsign-conversion
-Wconversion is used with gcc or clang or when -W3 -Wd4800 is used
with MSVC. This significantly reduces the likelihood of potential
crashes from bogus integer values.
There are some parts of the code that take int when they should take
size_t or an offset. Such places would make qpdf not support files
with more than 2^31 of something that usually wouldn't be so large. In
the event that such a file shows up and is valid, at least qpdf would
raise an error in the right spot so the issue could be legitimately
addressed rather than failing in some weird way because of a silent
overflow condition. |
Item_direct_view_ref(Name_resolution_context *context_arg, Item **item,
const char *table_name_arg,
const char *field_name_arg)
:Item_direct_ref(context_arg, item, table_name_arg, field_name_arg) {} | 0 | []
| mysql-server | f7316aa0c9a3909fc7498e7b95d5d3af044a7e21 | 98,254,808,649,881,200,000,000,000,000,000,000,000 | 4 | Bug#26361149 MYSQL SERVER CRASHES AT: COL IN(IFNULL(CONST,
COL), NAME_CONST('NAME', NULL))
Backport of Bug#19143243 fix.
NAME_CONST item can return NULL_ITEM type in case of incorrect arguments.
NULL_ITEM has special processing in Item_func_in function.
In Item_func_in::fix_length_and_dec an array of possible comparators is
created. Since NAME_CONST function has NULL_ITEM type, corresponding
array element is empty. Then NAME_CONST is wrapped to ITEM_CACHE.
ITEM_CACHE can not return proper type(NULL_ITEM) in Item_func_in::val_int(),
so the NULL_ITEM is attempted compared with an empty comparator.
The fix is to disable the caching of Item_name_const item. |
static pyc_object *get_long_object(RBuffer *buffer) {
pyc_object *ret = NULL;
bool error = false;
bool neg = false;
ut32 tmp = 0;
size_t size;
size_t i, j = 0, left = 0;
ut16 n;
char *hexstr;
char digist2hex[] = "0123456789abcdef";
st32 ndigits = get_st32 (buffer, &error);
if (error) {
return NULL;
}
ret = R_NEW0 (pyc_object);
if (!ret) {
return NULL;
}
ret->type = TYPE_LONG;
if (ndigits < 0) {
ndigits = -ndigits;
neg = true;
}
if (ndigits == 0) {
ret->data = strdup ("0x0");
} else {
size = ndigits * 15;
size = (size - 1) / 4 + 1;
size += 3 + (neg? 1: 0);
hexstr = calloc (size, sizeof (char));
if (!hexstr) {
free (ret);
return NULL;
}
j = size - 1;
for (i = 0; i < ndigits; i++) {
n = get_ut16 (buffer, &error);
tmp |= n << left;
left += 15;
while (left >= 4) {
hexstr[--j] = digist2hex[tmp & 0xf];
tmp >>= 4;
left -= 4;
}
}
if (tmp) {
hexstr[--j] = digist2hex[tmp & 0xf];
}
hexstr[--j] = 'x';
hexstr[--j] = '0';
if (neg) {
hexstr[--j] = '-';
}
ret->data = &hexstr[j];
}
return ret;
} | 0 | [
"CWE-415"
]
| radare2 | 049de62730f4954ef9a642f2eeebbca30a8eccdc | 68,339,553,596,400,200,000,000,000,000,000,000,000 | 63 | Fix #18679 - UAF when parsing corrupted pyc files ##bin |
static int __init eventpoll_init(void)
{
struct sysinfo si;
si_meminfo(&si);
/*
* Allows top 4% of lomem to be allocated for epoll watches (per user).
*/
max_user_watches = (((si.totalram - si.totalhigh) / 25) << PAGE_SHIFT) /
EP_ITEM_COST;
BUG_ON(max_user_watches < 0);
/*
* Initialize the structure used to perform epoll file descriptor
* inclusion loops checks.
*/
ep_nested_calls_init(&poll_loop_ncalls);
/* Initialize the structure used to perform safe poll wait head wake ups */
ep_nested_calls_init(&poll_safewake_ncalls);
/* Initialize the structure used to perform file's f_op->poll() calls */
ep_nested_calls_init(&poll_readywalk_ncalls);
/* Allocates slab cache used to allocate "struct epitem" items */
epi_cache = kmem_cache_create("eventpoll_epi", sizeof(struct epitem),
0, SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL);
/* Allocates slab cache used to allocate "struct eppoll_entry" */
pwq_cache = kmem_cache_create("eventpoll_pwq",
sizeof(struct eppoll_entry), 0, SLAB_PANIC, NULL);
return 0;
} | 0 | []
| linux-2.6 | 28d82dc1c4edbc352129f97f4ca22624d1fe61de | 302,181,285,748,922,780,000,000,000,000,000,000,000 | 34 | epoll: limit paths
The current epoll code can be tickled to run basically indefinitely in
both loop detection path check (on ep_insert()), and in the wakeup paths.
The programs that tickle this behavior set up deeply linked networks of
epoll file descriptors that cause the epoll algorithms to traverse them
indefinitely. A couple of these sample programs have been previously
posted in this thread: https://lkml.org/lkml/2011/2/25/297.
To fix the loop detection path check algorithms, I simply keep track of
the epoll nodes that have been already visited. Thus, the loop detection
becomes proportional to the number of epoll file descriptor and links.
This dramatically decreases the run-time of the loop check algorithm. In
one diabolical case I tried it reduced the run-time from 15 mintues (all
in kernel time) to .3 seconds.
Fixing the wakeup paths could be done at wakeup time in a similar manner
by keeping track of nodes that have already been visited, but the
complexity is harder, since there can be multiple wakeups on different
cpus...Thus, I've opted to limit the number of possible wakeup paths when
the paths are created.
This is accomplished, by noting that the end file descriptor points that
are found during the loop detection pass (from the newly added link), are
actually the sources for wakeup events. I keep a list of these file
descriptors and limit the number and length of these paths that emanate
from these 'source file descriptors'. In the current implemetation I
allow 1000 paths of length 1, 500 of length 2, 100 of length 3, 50 of
length 4 and 10 of length 5. Note that it is sufficient to check the
'source file descriptors' reachable from the newly added link, since no
other 'source file descriptors' will have newly added links. This allows
us to check only the wakeup paths that may have gotten too long, and not
re-check all possible wakeup paths on the system.
In terms of the path limit selection, I think its first worth noting that
the most common case for epoll, is probably the model where you have 1
epoll file descriptor that is monitoring n number of 'source file
descriptors'. In this case, each 'source file descriptor' has a 1 path of
length 1. Thus, I believe that the limits I'm proposing are quite
reasonable and in fact may be too generous. Thus, I'm hoping that the
proposed limits will not prevent any workloads that currently work to
fail.
In terms of locking, I have extended the use of the 'epmutex' to all
epoll_ctl add and remove operations. Currently its only used in a subset
of the add paths. I need to hold the epmutex, so that we can correctly
traverse a coherent graph, to check the number of paths. I believe that
this additional locking is probably ok, since its in the setup/teardown
paths, and doesn't affect the running paths, but it certainly is going to
add some extra overhead. Also, worth noting is that the epmuex was
recently added to the ep_ctl add operations in the initial path loop
detection code using the argument that it was not on a critical path.
Another thing to note here, is the length of epoll chains that is allowed.
Currently, eventpoll.c defines:
/* Maximum number of nesting allowed inside epoll sets */
#define EP_MAX_NESTS 4
This basically means that I am limited to a graph depth of 5 (EP_MAX_NESTS
+ 1). However, this limit is currently only enforced during the loop
check detection code, and only when the epoll file descriptors are added
in a certain order. Thus, this limit is currently easily bypassed. The
newly added check for wakeup paths, stricly limits the wakeup paths to a
length of 5, regardless of the order in which ep's are linked together.
Thus, a side-effect of the new code is a more consistent enforcement of
the graph depth.
Thus far, I've tested this, using the sample programs previously
mentioned, which now either return quickly or return -EINVAL. I've also
testing using the piptest.c epoll tester, which showed no difference in
performance. I've also created a number of different epoll networks and
tested that they behave as expectded.
I believe this solves the original diabolical test cases, while still
preserving the sane epoll nesting.
Signed-off-by: Jason Baron <[email protected]>
Cc: Nelson Elhage <[email protected]>
Cc: Davide Libenzi <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]> |
static inline int has_header(const char *str, const char *header)
{
/* header + 2 to skip over CRLF prefix. (make sure you have one!) */
if (!str)
return 0;
return av_stristart(str, header + 2, NULL) || av_stristr(str, header);
} | 0 | [
"CWE-119",
"CWE-787"
]
| FFmpeg | 2a05c8f813de6f2278827734bf8102291e7484aa | 244,458,579,894,755,500,000,000,000,000,000,000,000 | 7 | http: make length/offset-related variables unsigned.
Fixes #5992, reported and found by Paul Cher <[email protected]>. |
OwnedImpl::OwnedImpl(absl::string_view data) : OwnedImpl() { add(data); } | 0 | [
"CWE-401"
]
| envoy | 5eba69a1f375413fb93fab4173f9c393ac8c2818 | 93,951,309,373,568,670,000,000,000,000,000,000,000 | 1 | [buffer] Add on-drain hook to buffer API and use it to avoid fragmentation due to tracking of H2 data and control frames in the output buffer (#144)
Signed-off-by: antonio <[email protected]> |
Error Box_infe::write(StreamWriter& writer) const
{
size_t box_start = reserve_box_header_space(writer);
if (get_version() <= 1) {
writer.write16((uint16_t)m_item_ID);
writer.write16(m_item_protection_index);
writer.write(m_item_name);
writer.write(m_content_type);
writer.write(m_content_encoding);
}
if (get_version() >= 2) {
if (get_version() == 2) {
writer.write16((uint16_t)m_item_ID);
}
else if (get_version()==3) {
writer.write32(m_item_ID);
}
writer.write16(m_item_protection_index);
if (m_item_type.empty()) {
writer.write32(0);
}
else {
writer.write32(from_fourcc(m_item_type.c_str()));
}
writer.write(m_item_name);
if (m_item_type == "mime") {
writer.write(m_content_type);
writer.write(m_content_encoding);
}
else if (m_item_type == "uri ") {
writer.write(m_item_uri_type);
}
}
prepend_header(writer, box_start);
return Error::Ok;
} | 0 | [
"CWE-703"
]
| libheif | 2710c930918609caaf0a664e9c7bc3dce05d5b58 | 44,322,592,019,301,820,000,000,000,000,000,000,000 | 44 | force fraction to a limited resolution to finally solve those pesky numerical edge cases |
interp(i_ctx_t **pi_ctx_p /* context for execution, updated if resched */,
const ref * pref /* object to interpret */,
ref * perror_object)
{
i_ctx_t *i_ctx_p = *pi_ctx_p;
/*
* Note that iref may actually be either a ref * or a ref_packed *.
* Certain DEC compilers assume that a ref * is ref-aligned even if it
* is cast to a short *, and generate code on this assumption, leading
* to "unaligned access" errors. For this reason, we declare
* iref_packed, and use a macro to cast it to the more aligned type
* where necessary (which is almost everywhere it is used). This may
* lead to compiler warnings about "cast increases alignment
* requirements", but this is less harmful than expensive traps at run
* time.
*/
register const ref_packed *iref_packed = (const ref_packed *)pref;
/*
* To make matters worse, some versions of gcc/egcs have a bug that
* leads them to assume that if iref_packed is EVER cast to a ref *,
* it is ALWAYS ref-aligned. We detect this in stdpre.h and provide
* the following workaround:
*/
#ifdef ALIGNMENT_ALIASING_BUG
const ref *iref_temp;
# define IREF (iref_temp = (const ref *)iref_packed, iref_temp)
#else
# define IREF ((const ref *)iref_packed)
#endif
#define SET_IREF(rp) (iref_packed = (const ref_packed *)(rp))
register int icount = 0; /* # of consecutive tokens at iref */
register os_ptr iosp = osp; /* private copy of osp */
register es_ptr iesp = esp; /* private copy of esp */
int code;
ref token; /* token read from file or string, */
/* must be declared in this scope */
ref *pvalue;
ref refnull;
uint opindex; /* needed for oparrays */
os_ptr whichp;
/*
* We have to make the error information into a struct;
* otherwise, the Watcom compiler will assign it to registers
* strictly on the basis of textual frequency.
* We also have to use ref_assign_inline everywhere, and
* avoid direct assignments of refs, so that esi and edi
* will remain available on Intel processors.
*/
struct interp_error_s {
int code;
int line;
const ref *obj;
ref full;
} ierror;
/*
* Get a pointer to the name table so that we can use the
* inline version of name_index_ref.
*/
const name_table *const int_nt = imemory->gs_lib_ctx->gs_name_table;
#define set_error(ecode)\
{ ierror.code = ecode; ierror.line = __LINE__; }
#define return_with_error(ecode, objp)\
{ set_error(ecode); ierror.obj = objp; goto rwe; }
#define return_with_error_iref(ecode)\
{ set_error(ecode); goto rwei; }
#define return_with_code_iref()\
{ ierror.line = __LINE__; goto rweci; }
#define return_with_stackoverflow(objp)\
{ o_stack.requested = 1; return_with_error(gs_error_stackoverflow, objp); }
#define return_with_stackoverflow_iref()\
{ o_stack.requested = 1; return_with_error_iref(gs_error_stackoverflow); }
/*
* If control reaches the special operators (x_add, etc.) as a result of
* interpreting an executable name, iref points to the name, not the
* operator, so the name rather than the operator becomes the error object,
* which is wrong. We detect and handle this case explicitly when an error
* occurs, so as not to slow down the non-error case.
*/
#define return_with_error_tx_op(err_code)\
{ if (r_has_type(IREF, t_name)) {\
return_with_error(err_code, pvalue);\
} else {\
return_with_error_iref(err_code);\
}\
}
int *ticks_left = &imemory_system->gs_lib_ctx->gcsignal;
#if defined(DEBUG_TRACE_PS_OPERATORS) || defined(DEBUG)
int (*call_operator_fn)(op_proc_t, i_ctx_t *) = do_call_operator;
if (gs_debug_c('!'))
call_operator_fn = do_call_operator_verbose;
#endif
*ticks_left = i_ctx_p->time_slice_ticks;
make_null(&ierror.full);
ierror.obj = &ierror.full;
make_null(&refnull);
pvalue = &refnull;
/*
* If we exceed the VMThreshold, set *ticks_left to -100
* to alert the interpreter that we need to garbage collect.
*/
set_gc_signal(i_ctx_p, -100);
esfile_clear_cache();
/*
* From here on, if icount > 0, iref and icount correspond
* to the top entry on the execution stack: icount is the count
* of sequential entries remaining AFTER the current one.
*/
#define IREF_NEXT(ip)\
((const ref_packed *)((const ref *)(ip) + 1))
#define IREF_NEXT_EITHER(ip)\
( r_is_packed(ip) ? (ip) + 1 : IREF_NEXT(ip) )
#define store_state(ep)\
( icount > 0 ? (ep->value.const_refs = IREF + 1, r_set_size(ep, icount)) : 0 )
#define store_state_short(ep)\
( icount > 0 ? (ep->value.packed = iref_packed + 1, r_set_size(ep, icount)) : 0 )
#define store_state_either(ep)\
( icount > 0 ? (ep->value.packed = IREF_NEXT_EITHER(iref_packed), r_set_size(ep, icount)) : 0 )
#define next()\
if ( --icount > 0 ) { iref_packed = IREF_NEXT(iref_packed); goto top; } else goto out
#define next_short()\
if ( --icount <= 0 ) { if ( icount < 0 ) goto up; iesp--; }\
++iref_packed; goto top
#define next_either()\
if ( --icount <= 0 ) { if ( icount < 0 ) goto up; iesp--; }\
iref_packed = IREF_NEXT_EITHER(iref_packed); goto top
#if !PACKED_SPECIAL_OPS
# undef next_either
# define next_either() next()
# undef store_state_either
# define store_state_either(ep) store_state(ep)
#endif
/* We want to recognize executable arrays here, */
/* so we push the argument on the estack and enter */
/* the loop at the bottom. */
if (iesp >= estop)
return_with_error(gs_error_execstackoverflow, pref);
++iesp;
ref_assign_inline(iesp, pref);
goto bot;
top:
/*
* This is the top of the interpreter loop.
* iref points to the ref being interpreted.
* Note that this might be an element of a packed array,
* not a real ref: we carefully arranged the first 16 bits of
* a ref and of a packed array element so they could be distinguished
* from each other. (See ghost.h and packed.h for more detail.)
*/
INCR(top);
#ifdef DEBUG
/* Do a little validation on the top o-stack entry. */
if (iosp >= osbot &&
(r_type(iosp) == t__invalid || r_type(iosp) >= tx_next_op)
) {
mlprintf(imemory, "Invalid value on o-stack!\n");
return_with_error_iref(gs_error_Fatal);
}
if (gs_debug['I'] ||
(gs_debug['i'] &&
(r_is_packed(iref_packed) ?
r_packed_is_name(iref_packed) :
r_has_type(IREF, t_name)))
) {
os_ptr save_osp = osp; /* avoid side-effects */
es_ptr save_esp = esp;
osp = iosp;
esp = iesp;
dmlprintf5(imemory, "d%u,e%u<%u>0x%lx(%d): ",
ref_stack_count(&d_stack), ref_stack_count(&e_stack),
ref_stack_count(&o_stack), (ulong)IREF, icount);
debug_print_ref(imemory, IREF);
if (iosp >= osbot) {
dmputs(imemory, " // ");
debug_print_ref(imemory, iosp);
}
dmputc(imemory, '\n');
osp = save_osp;
esp = save_esp;
dmflush(imemory);
}
#endif
/* Objects that have attributes (arrays, dictionaries, files, and strings) */
/* use lit and exec; other objects use plain and plain_exec. */
#define lit(t) type_xe_value(t, a_execute)
#define exec(t) type_xe_value(t, a_execute + a_executable)
#define nox(t) type_xe_value(t, 0)
#define nox_exec(t) type_xe_value(t, a_executable)
#define plain(t) type_xe_value(t, 0)
#define plain_exec(t) type_xe_value(t, a_executable)
/*
* We have to populate enough cases of the switch statement to force
* some compilers to use a dispatch rather than a testing loop.
* What a nuisance!
*/
switch (r_type_xe(iref_packed)) {
/* Access errors. */
#define cases_invalid()\
case plain(t__invalid): case plain_exec(t__invalid)
cases_invalid():
return_with_error_iref(gs_error_Fatal);
#define cases_nox()\
case nox_exec(t_array): case nox_exec(t_dictionary):\
case nox_exec(t_file): case nox_exec(t_string):\
case nox_exec(t_mixedarray): case nox_exec(t_shortarray)
cases_nox():
return_with_error_iref(gs_error_invalidaccess);
/*
* Literal objects. We have to enumerate all the types.
* In fact, we have to include some extra plain_exec entries
* just to populate the switch. We break them up into groups
* to avoid overflowing some preprocessors.
*/
#define cases_lit_1()\
case lit(t_array): case nox(t_array):\
case plain(t_boolean): case plain_exec(t_boolean):\
case lit(t_dictionary): case nox(t_dictionary)
#define cases_lit_2()\
case lit(t_file): case nox(t_file):\
case plain(t_fontID): case plain_exec(t_fontID):\
case plain(t_integer): case plain_exec(t_integer):\
case plain(t_mark): case plain_exec(t_mark)
#define cases_lit_3()\
case plain(t_name):\
case plain(t_null):\
case plain(t_oparray):\
case plain(t_operator)
#define cases_lit_4()\
case plain(t_real): case plain_exec(t_real):\
case plain(t_save): case plain_exec(t_save):\
case lit(t_string): case nox(t_string)
#define cases_lit_5()\
case lit(t_mixedarray): case nox(t_mixedarray):\
case lit(t_shortarray): case nox(t_shortarray):\
case plain(t_device): case plain_exec(t_device):\
case plain(t_struct): case plain_exec(t_struct):\
case plain(t_astruct): case plain_exec(t_astruct)
/* Executable arrays are treated as literals in direct execution. */
#define cases_lit_array()\
case exec(t_array): case exec(t_mixedarray): case exec(t_shortarray)
cases_lit_1():
cases_lit_2():
cases_lit_3():
cases_lit_4():
cases_lit_5():
INCR(lit);
break;
cases_lit_array():
INCR(lit_array);
break;
/* Special operators. */
case plain_exec(tx_op_add):
x_add: INCR(x_add);
osp = iosp; /* sync o_stack */
if ((code = zop_add(i_ctx_p)) < 0)
return_with_error_tx_op(code);
iosp--;
next_either();
case plain_exec(tx_op_def):
x_def: INCR(x_def);
osp = iosp; /* sync o_stack */
if ((code = zop_def(i_ctx_p)) < 0)
return_with_error_tx_op(code);
iosp -= 2;
next_either();
case plain_exec(tx_op_dup):
x_dup: INCR(x_dup);
if (iosp < osbot)
return_with_error_tx_op(gs_error_stackunderflow);
if (iosp >= ostop) {
o_stack.requested = 1;
return_with_error_tx_op(gs_error_stackoverflow);
}
iosp++;
ref_assign_inline(iosp, iosp - 1);
next_either();
case plain_exec(tx_op_exch):
x_exch: INCR(x_exch);
if (iosp <= osbot)
return_with_error_tx_op(gs_error_stackunderflow);
ref_assign_inline(&token, iosp);
ref_assign_inline(iosp, iosp - 1);
ref_assign_inline(iosp - 1, &token);
next_either();
case plain_exec(tx_op_if):
x_if: INCR(x_if);
if (!r_is_proc(iosp))
return_with_error_tx_op(check_proc_failed(iosp));
if (!r_has_type(iosp - 1, t_boolean))
return_with_error_tx_op((iosp <= osbot ?
gs_error_stackunderflow : gs_error_typecheck));
if (!iosp[-1].value.boolval) {
iosp -= 2;
next_either();
}
if (iesp >= estop)
return_with_error_tx_op(gs_error_execstackoverflow);
store_state_either(iesp);
whichp = iosp;
iosp -= 2;
goto ifup;
case plain_exec(tx_op_ifelse):
x_ifelse: INCR(x_ifelse);
if (!r_is_proc(iosp))
return_with_error_tx_op(check_proc_failed(iosp));
if (!r_is_proc(iosp - 1))
return_with_error_tx_op(check_proc_failed(iosp - 1));
if (!r_has_type(iosp - 2, t_boolean))
return_with_error_tx_op((iosp < osbot + 2 ?
gs_error_stackunderflow : gs_error_typecheck));
if (iesp >= estop)
return_with_error_tx_op(gs_error_execstackoverflow);
store_state_either(iesp);
whichp = (iosp[-2].value.boolval ? iosp - 1 : iosp);
iosp -= 3;
/* Open code "up" for the array case(s) */
ifup:if ((icount = r_size(whichp) - 1) <= 0) {
if (icount < 0)
goto up; /* 0-element proc */
SET_IREF(whichp->value.refs); /* 1-element proc */
if (--(*ticks_left) > 0)
goto top;
}
++iesp;
/* Do a ref_assign, but also set iref. */
iesp->tas = whichp->tas;
SET_IREF(iesp->value.refs = whichp->value.refs);
if (--(*ticks_left) > 0)
goto top;
goto slice;
case plain_exec(tx_op_index):
x_index: INCR(x_index);
osp = iosp; /* zindex references o_stack */
if ((code = zindex(i_ctx_p)) < 0)
return_with_error_tx_op(code);
next_either();
case plain_exec(tx_op_pop):
x_pop: INCR(x_pop);
if (iosp < osbot)
return_with_error_tx_op(gs_error_stackunderflow);
iosp--;
next_either();
case plain_exec(tx_op_roll):
x_roll: INCR(x_roll);
osp = iosp; /* zroll references o_stack */
if ((code = zroll(i_ctx_p)) < 0)
return_with_error_tx_op(code);
iosp -= 2;
next_either();
case plain_exec(tx_op_sub):
x_sub: INCR(x_sub);
osp = iosp; /* sync o_stack */
if ((code = zop_sub(i_ctx_p)) < 0)
return_with_error_tx_op(code);
iosp--;
next_either();
/* Executable types. */
case plain_exec(t_null):
goto bot;
case plain_exec(t_oparray):
/* Replace with the definition and go again. */
INCR(exec_array);
opindex = op_index(IREF);
pvalue = (ref *)IREF->value.const_refs;
opst: /* Prepare to call a t_oparray procedure in *pvalue. */
store_state(iesp);
oppr: /* Record the stack depths in case of failure. */
if (iesp >= estop - 4)
return_with_error_iref(gs_error_execstackoverflow);
iesp += 5;
osp = iosp; /* ref_stack_count_inline needs this */
make_mark_estack(iesp - 4, es_other, oparray_cleanup);
make_int(iesp - 3, opindex); /* for .errorexec effect */
make_int(iesp - 2, ref_stack_count_inline(&o_stack));
make_int(iesp - 1, ref_stack_count_inline(&d_stack));
make_op_estack(iesp, oparray_pop);
goto pr;
prst: /* Prepare to call the procedure (array) in *pvalue. */
store_state(iesp);
pr: /* Call the array in *pvalue. State has been stored. */
/* We want to do this check before assigning icount so icount is correct
* in the event of a gs_error_execstackoverflow
*/
if (iesp >= estop) {
return_with_error_iref(gs_error_execstackoverflow);
}
if ((icount = r_size(pvalue) - 1) <= 0) {
if (icount < 0)
goto up; /* 0-element proc */
SET_IREF(pvalue->value.refs); /* 1-element proc */
if (--(*ticks_left) > 0)
goto top;
}
++iesp;
/* Do a ref_assign, but also set iref. */
iesp->tas = pvalue->tas;
SET_IREF(iesp->value.refs = pvalue->value.refs);
if (--(*ticks_left) > 0)
goto top;
goto slice;
case plain_exec(t_operator):
INCR(exec_operator);
if (--(*ticks_left) <= 0) { /* The following doesn't work, */
/* and I can't figure out why. */
/****** goto sst; ******/
}
esp = iesp; /* save for operator */
osp = iosp; /* ditto */
/* Operator routines take osp as an argument. */
/* This is just a convenience, since they adjust */
/* osp themselves to reflect the results. */
/* Operators that (net) push information on the */
/* operand stack must check for overflow: */
/* this normally happens automatically through */
/* the push macro (in oper.h). */
/* Operators that do not typecheck their operands, */
/* or take a variable number of arguments, */
/* must check explicitly for stack underflow. */
/* (See oper.h for more detail.) */
/* Note that each case must set iosp = osp: */
/* this is so we can switch on code without having to */
/* store it and reload it (for dumb compilers). */
switch (code = call_operator(real_opproc(IREF), i_ctx_p)) {
case 0: /* normal case */
case 1: /* alternative success case */
iosp = osp;
next();
case o_push_estack: /* store the state and go to up */
store_state(iesp);
opush:iosp = osp;
iesp = esp;
if (--(*ticks_left) > 0)
goto up;
goto slice;
case o_pop_estack: /* just go to up */
opop:iosp = osp;
if (esp == iesp)
goto bot;
iesp = esp;
goto up;
case o_reschedule:
store_state(iesp);
goto res;
case gs_error_Remap_Color:
oe_remap: store_state(iesp);
remap: if (iesp + 2 >= estop) {
esp = iesp;
code = ref_stack_extend(&e_stack, 2);
if (code < 0)
return_with_error_iref(code);
iesp = esp;
}
packed_get(imemory, iref_packed, iesp + 1);
make_oper(iesp + 2, 0,
r_ptr(&istate->remap_color_info,
int_remap_color_info_t)->proc);
iesp += 2;
goto up;
}
iosp = osp;
iesp = esp;
return_with_code_iref();
case plain_exec(t_name):
INCR(exec_name);
pvalue = IREF->value.pname->pvalue;
if (!pv_valid(pvalue)) {
uint nidx = names_index(int_nt, IREF);
uint htemp = 0;
INCR(find_name);
if ((pvalue = dict_find_name_by_index_inline(nidx, htemp)) == 0)
return_with_error_iref(gs_error_undefined);
}
/* Dispatch on the type of the value. */
/* Again, we have to over-populate the switch. */
switch (r_type_xe(pvalue)) {
cases_invalid():
return_with_error_iref(gs_error_Fatal);
cases_nox(): /* access errors */
return_with_error_iref(gs_error_invalidaccess);
cases_lit_1():
cases_lit_2():
cases_lit_3():
cases_lit_4():
cases_lit_5():
INCR(name_lit);
/* Just push the value */
if (iosp >= ostop)
return_with_stackoverflow(pvalue);
++iosp;
ref_assign_inline(iosp, pvalue);
next();
case exec(t_array):
case exec(t_mixedarray):
case exec(t_shortarray):
INCR(name_proc);
/* This is an executable procedure, execute it. */
goto prst;
case plain_exec(tx_op_add):
goto x_add;
case plain_exec(tx_op_def):
goto x_def;
case plain_exec(tx_op_dup):
goto x_dup;
case plain_exec(tx_op_exch):
goto x_exch;
case plain_exec(tx_op_if):
goto x_if;
case plain_exec(tx_op_ifelse):
goto x_ifelse;
case plain_exec(tx_op_index):
goto x_index;
case plain_exec(tx_op_pop):
goto x_pop;
case plain_exec(tx_op_roll):
goto x_roll;
case plain_exec(tx_op_sub):
goto x_sub;
case plain_exec(t_null):
goto bot;
case plain_exec(t_oparray):
INCR(name_oparray);
opindex = op_index(pvalue);
pvalue = (ref *)pvalue->value.const_refs;
goto opst;
case plain_exec(t_operator):
INCR(name_operator);
{ /* Shortcut for operators. */
/* See above for the logic. */
if (--(*ticks_left) <= 0) { /* The following doesn't work, */
/* and I can't figure out why. */
/****** goto sst; ******/
}
esp = iesp;
osp = iosp;
switch (code = call_operator(real_opproc(pvalue),
i_ctx_p)
) {
case 0: /* normal case */
case 1: /* alternative success case */
iosp = osp;
next();
case o_push_estack:
store_state(iesp);
goto opush;
case o_pop_estack:
goto opop;
case o_reschedule:
store_state(iesp);
goto res;
case gs_error_Remap_Color:
goto oe_remap;
}
iosp = osp;
iesp = esp;
return_with_error(code, pvalue);
}
case plain_exec(t_name):
case exec(t_file):
case exec(t_string):
default:
/* Not a procedure, reinterpret it. */
store_state(iesp);
icount = 0;
SET_IREF(pvalue);
goto top;
}
case exec(t_file):
{ /* Executable file. Read the next token and interpret it. */
stream *s;
scanner_state sstate;
check_read_known_file(i_ctx_p, s, IREF, return_with_error_iref);
rt:
if (iosp >= ostop) /* check early */
return_with_stackoverflow_iref();
osp = iosp; /* gs_scan_token uses ostack */
gs_scanner_init_options(&sstate, IREF, i_ctx_p->scanner_options);
again:
code = gs_scan_token(i_ctx_p, &token, &sstate);
iosp = osp; /* ditto */
switch (code) {
case 0: /* read a token */
/* It's worth checking for literals, which make up */
/* the majority of input tokens, before storing the */
/* state on the e-stack. Note that because of //, */
/* the token may have *any* type and attributes. */
/* Note also that executable arrays aren't executed */
/* at the top level -- they're treated as literals. */
if (!r_has_attr(&token, a_executable) ||
r_is_array(&token)
) { /* If gs_scan_token used the o-stack, */
/* we know we can do a push now; if not, */
/* the pre-check is still valid. */
iosp++;
ref_assign_inline(iosp, &token);
goto rt;
}
store_state(iesp);
/* Push the file on the e-stack */
if (iesp >= estop)
return_with_error_iref(gs_error_execstackoverflow);
esfile_set_cache(++iesp);
ref_assign_inline(iesp, IREF);
SET_IREF(&token);
icount = 0;
goto top;
case gs_error_undefined: /* //name undefined */
gs_scanner_error_object(i_ctx_p, &sstate, &token);
return_with_error(code, &token);
case scan_EOF: /* end of file */
esfile_clear_cache();
goto bot;
case scan_BOS:
/* Binary object sequences */
/* ARE executed at the top level. */
store_state(iesp);
/* Push the file on the e-stack */
if (iesp >= estop)
return_with_error_iref(gs_error_execstackoverflow);
esfile_set_cache(++iesp);
ref_assign_inline(iesp, IREF);
pvalue = &token;
goto pr;
case scan_Refill:
store_state(iesp);
/* iref may point into the exec stack; */
/* save its referent now. */
ref_assign_inline(&token, IREF);
/* Push the file on the e-stack */
if (iesp >= estop)
return_with_error_iref(gs_error_execstackoverflow);
++iesp;
ref_assign_inline(iesp, &token);
esp = iesp;
osp = iosp;
code = gs_scan_handle_refill(i_ctx_p, &sstate, true,
ztokenexec_continue);
scan_cont:
iosp = osp;
iesp = esp;
switch (code) {
case 0:
iesp--; /* don't push the file */
goto again; /* stacks are unchanged */
case o_push_estack:
esfile_clear_cache();
if (--(*ticks_left) > 0)
goto up;
goto slice;
}
/* must be an error */
iesp--; /* don't push the file */
return_with_code_iref();
case scan_Comment:
case scan_DSC_Comment: {
/* See scan_Refill above for comments. */
ref file_token;
store_state(iesp);
ref_assign_inline(&file_token, IREF);
if (iesp >= estop)
return_with_error_iref(gs_error_execstackoverflow);
++iesp;
ref_assign_inline(iesp, &file_token);
esp = iesp;
osp = iosp;
code = ztoken_handle_comment(i_ctx_p,
&sstate, &token,
code, true, true,
ztokenexec_continue);
}
goto scan_cont;
default: /* error */
ref_assign_inline(&token, IREF);
gs_scanner_error_object(i_ctx_p, &sstate, &token);
return_with_error(code, &token);
}
}
case exec(t_string):
{ /* Executable string. Read a token and interpret it. */
stream ss;
scanner_state sstate;
s_init(&ss, NULL);
sread_string(&ss, IREF->value.bytes, r_size(IREF));
gs_scanner_init_stream_options(&sstate, &ss, SCAN_FROM_STRING);
osp = iosp; /* gs_scan_token uses ostack */
code = gs_scan_token(i_ctx_p, &token, &sstate);
iosp = osp; /* ditto */
switch (code) {
case 0: /* read a token */
case scan_BOS: /* binary object sequence */
store_state(iesp);
/* If the updated string isn't empty, push it back */
/* on the e-stack. */
{
/* This is just the available buffer size, so
a signed int is plenty big
*/
int size = sbufavailable(&ss);
if (size > 0) {
if (iesp >= estop)
return_with_error_iref(gs_error_execstackoverflow);
++iesp;
iesp->tas.type_attrs = IREF->tas.type_attrs;
iesp->value.const_bytes = sbufptr(&ss);
r_set_size(iesp, size);
}
}
if (code == 0) {
SET_IREF(&token);
icount = 0;
goto top;
}
/* Handle BOS specially */
pvalue = &token;
goto pr;
case scan_EOF: /* end of string */
goto bot;
case scan_Refill: /* error */
code = gs_note_error(gs_error_syntaxerror);
/* fall through */
default: /* error */
ref_assign_inline(&token, IREF);
gs_scanner_error_object(i_ctx_p, &sstate, &token);
return_with_error(code, &token);
}
}
/* Handle packed arrays here by re-dispatching. */
/* This also picks up some anomalous cases of non-packed arrays. */
default:
{
uint index;
switch (*iref_packed >> r_packed_type_shift) {
case pt_full_ref:
case pt_full_ref + 1:
INCR(p_full);
if (iosp >= ostop)
return_with_stackoverflow_iref();
/* We know this can't be an executable object */
/* requiring special handling, so we just push it. */
++iosp;
/* We know that refs are properly aligned: */
/* see packed.h for details. */
ref_assign_inline(iosp, IREF);
next();
case pt_executable_operator:
index = *iref_packed & packed_value_mask;
if (--(*ticks_left) <= 0) { /* The following doesn't work, */
/* and I can't figure out why. */
/****** goto sst_short; ******/
}
if (!op_index_is_operator(index)) {
INCR(p_exec_oparray);
store_state_short(iesp);
opindex = index;
/* Call the operator procedure. */
index -= op_def_count;
pvalue = (ref *)
(index < r_size(&i_ctx_p->op_array_table_global.table) ?
i_ctx_p->op_array_table_global.table.value.const_refs +
index :
i_ctx_p->op_array_table_local.table.value.const_refs +
(index - r_size(&i_ctx_p->op_array_table_global.table)));
goto oppr;
}
INCR(p_exec_operator);
/* See the main plain_exec(t_operator) case */
/* for details of what happens here. */
#if PACKED_SPECIAL_OPS
/*
* We arranged in iinit.c that the special ops
* have operator indices starting at 1.
*
* The (int) cast in the next line is required
* because some compilers don't allow arithmetic
* involving two different enumerated types.
*/
# define case_xop(xop) case xop - (int)tx_op + 1
switch (index) {
case_xop(tx_op_add):goto x_add;
case_xop(tx_op_def):goto x_def;
case_xop(tx_op_dup):goto x_dup;
case_xop(tx_op_exch):goto x_exch;
case_xop(tx_op_if):goto x_if;
case_xop(tx_op_ifelse):goto x_ifelse;
case_xop(tx_op_index):goto x_index;
case_xop(tx_op_pop):goto x_pop;
case_xop(tx_op_roll):goto x_roll;
case_xop(tx_op_sub):goto x_sub;
case 0: /* for dumb compilers */
default:
;
}
# undef case_xop
#endif
INCR(p_exec_non_x_operator);
esp = iesp;
osp = iosp;
switch (code = call_operator(op_index_proc(index), i_ctx_p)) {
case 0:
case 1:
iosp = osp;
next_short();
case o_push_estack:
store_state_short(iesp);
goto opush;
case o_pop_estack:
iosp = osp;
if (esp == iesp) {
next_short();
}
iesp = esp;
goto up;
case o_reschedule:
store_state_short(iesp);
goto res;
case gs_error_Remap_Color:
store_state_short(iesp);
goto remap;
}
iosp = osp;
iesp = esp;
return_with_code_iref();
case pt_integer:
INCR(p_integer);
if (iosp >= ostop)
return_with_stackoverflow_iref();
++iosp;
make_int(iosp,
((int)*iref_packed & packed_int_mask) +
packed_min_intval);
next_short();
case pt_literal_name:
INCR(p_lit_name);
{
uint nidx = *iref_packed & packed_value_mask;
if (iosp >= ostop)
return_with_stackoverflow_iref();
++iosp;
name_index_ref_inline(int_nt, nidx, iosp);
next_short();
}
case pt_executable_name:
INCR(p_exec_name);
{
uint nidx = *iref_packed & packed_value_mask;
pvalue = name_index_ptr_inline(int_nt, nidx)->pvalue;
if (!pv_valid(pvalue)) {
uint htemp = 0;
INCR(p_find_name);
if ((pvalue = dict_find_name_by_index_inline(nidx, htemp)) == 0) {
names_index_ref(int_nt, nidx, &token);
return_with_error(gs_error_undefined, &token);
}
}
if (r_has_masked_attrs(pvalue, a_execute, a_execute + a_executable)) { /* Literal, push it. */
INCR(p_name_lit);
if (iosp >= ostop)
return_with_stackoverflow_iref();
++iosp;
ref_assign_inline(iosp, pvalue);
next_short();
}
if (r_is_proc(pvalue)) { /* This is an executable procedure, */
/* execute it. */
INCR(p_name_proc);
store_state_short(iesp);
goto pr;
}
/* Not a literal or procedure, reinterpret it. */
store_state_short(iesp);
icount = 0;
SET_IREF(pvalue);
goto top;
}
/* default can't happen here */
}
}
}
/* Literal type, just push it. */
if (iosp >= ostop)
return_with_stackoverflow_iref();
++iosp;
ref_assign_inline(iosp, IREF);
bot:next();
out: /* At most 1 more token in the current procedure. */
/* (We already decremented icount.) */
if (!icount) {
/* Pop the execution stack for tail recursion. */
iesp--;
iref_packed = IREF_NEXT(iref_packed);
goto top;
}
up:if (--(*ticks_left) < 0)
goto slice;
/* See if there is anything left on the execution stack. */
if (!r_is_proc(iesp)) {
SET_IREF(iesp--);
icount = 0;
goto top;
}
SET_IREF(iesp->value.refs); /* next element of array */
icount = r_size(iesp) - 1;
if (icount <= 0) { /* <= 1 more elements */
iesp--; /* pop, or tail recursion */
if (icount < 0)
goto up;
}
goto top;
res:
/* Some operator has asked for context rescheduling. */
/* We've done a store_state. */
*pi_ctx_p = i_ctx_p;
code = (*i_ctx_p->reschedule_proc)(pi_ctx_p);
i_ctx_p = *pi_ctx_p;
sched: /* We've just called a scheduling procedure. */
/* The interpreter state is in memory; iref is not current. */
if (code < 0) {
set_error(code);
/*
* We need a real object to return as the error object.
* (It only has to last long enough to store in
* *perror_object.)
*/
make_null_proc(&ierror.full);
SET_IREF(ierror.obj = &ierror.full);
goto error_exit;
}
/* Reload state information from memory. */
iosp = osp;
iesp = esp;
goto up;
#if 0 /****** ****** ***** */
sst: /* Time-slice, but push the current object first. */
store_state(iesp);
if (iesp >= estop)
return_with_error_iref(gs_error_execstackoverflow);
iesp++;
ref_assign_inline(iesp, iref);
#endif /****** ****** ***** */
slice: /* It's time to time-slice or garbage collect. */
/* iref is not live, so we don't need to do a store_state. */
osp = iosp;
esp = iesp;
/* If *ticks_left <= -100, we need to GC now. */
if ((*ticks_left) <= -100) { /* We need to garbage collect now. */
*pi_ctx_p = i_ctx_p;
code = interp_reclaim(pi_ctx_p, -1);
i_ctx_p = *pi_ctx_p;
} else if (i_ctx_p->time_slice_proc != NULL) {
*pi_ctx_p = i_ctx_p;
code = (*i_ctx_p->time_slice_proc)(pi_ctx_p);
i_ctx_p = *pi_ctx_p;
} else
code = 0;
*ticks_left = i_ctx_p->time_slice_ticks;
set_code_on_interrupt(imemory, &code);
goto sched;
/* Error exits. */
rweci:
ierror.code = code;
rwei:
ierror.obj = IREF;
rwe:
if (!r_is_packed(iref_packed))
store_state(iesp);
else {
/*
* We need a real object to return as the error object.
* (It only has to last long enough to store in *perror_object.)
*/
packed_get(imemory, (const ref_packed *)ierror.obj, &ierror.full);
store_state_short(iesp);
if (IREF == ierror.obj)
SET_IREF(&ierror.full);
ierror.obj = &ierror.full;
}
error_exit:
if (GS_ERROR_IS_INTERRUPT(ierror.code)) { /* We must push the current object being interpreted */
/* back on the e-stack so it will be re-executed. */
/* Currently, this is always an executable operator, */
/* but it might be something else someday if we check */
/* for interrupts in the interpreter loop itself. */
if (iesp >= estop)
ierror.code = gs_error_execstackoverflow;
else {
iesp++;
ref_assign_inline(iesp, IREF);
}
}
esp = iesp;
osp = iosp;
ref_assign_inline(perror_object, ierror.obj);
#ifdef DEBUG
if (ierror.code == gs_error_InterpreterExit) {
/* Do not call gs_log_error to reduce the noise. */
return gs_error_InterpreterExit;
}
#endif
return gs_log_error(ierror.code, __FILE__, ierror.line);
} | 0 | []
| ghostpdl | a6807394bd94b708be24758287b606154daaaed9 | 338,514,046,006,828,080,000,000,000,000,000,000,000 | 1,022 | For hidden operators, pass a name object to error handler.
In normal operation, Postscript error handlers are passed the object which
triggered the error: this is invariably an operator object.
The issue arises when an error is triggered by an operator which is for internal
use only, and that operator is then passed to the error handler, meaning it
becomes visible to the error handler code.
By converting to a name object, the error message is still valid, but we no
longer expose internal use only operators.
The change in gs_dps1.ps is related to the above: previously an error in
scheck would throw an error against .gcheck, but as .gcheck is now a hidden
operator, it resulted in a name object being passed to the error handler. As
scheck is a 'real' operator, it's better to use the real operator, rather than
the name of an internal, hidden one. |
static void ccp_sg_free(struct ccp_sg_workarea *wa)
{
if (wa->dma_count)
dma_unmap_sg(wa->dma_dev, wa->dma_sg_head, wa->nents, wa->dma_dir);
wa->dma_count = 0;
} | 0 | [
"CWE-703",
"CWE-401"
]
| linux | 505d9dcb0f7ddf9d075e729523a33d38642ae680 | 337,624,057,912,692,500,000,000,000,000,000,000,000 | 7 | crypto: ccp - fix resource leaks in ccp_run_aes_gcm_cmd()
There are three bugs in this code:
1) If we ccp_init_data() fails for &src then we need to free aad.
Use goto e_aad instead of goto e_ctx.
2) The label to free the &final_wa was named incorrectly as "e_tag" but
it should have been "e_final_wa". One error path leaked &final_wa.
3) The &tag was leaked on one error path. In that case, I added a free
before the goto because the resource was local to that block.
Fixes: 36cf515b9bbe ("crypto: ccp - Enable support for AES GCM on v5 CCPs")
Reported-by: "minihanshen(沈明航)" <[email protected]>
Signed-off-by: Dan Carpenter <[email protected]>
Reviewed-by: John Allen <[email protected]>
Tested-by: John Allen <[email protected]>
Signed-off-by: Herbert Xu <[email protected]> |
static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb,
struct l2cap_ctrl *control)
{
int err = -EINVAL;
switch (control->sar) {
case L2CAP_SAR_UNSEGMENTED:
if (chan->sdu)
break;
err = chan->ops->recv(chan, skb);
break;
case L2CAP_SAR_START:
if (chan->sdu)
break;
if (!pskb_may_pull(skb, L2CAP_SDULEN_SIZE))
break;
chan->sdu_len = get_unaligned_le16(skb->data);
skb_pull(skb, L2CAP_SDULEN_SIZE);
if (chan->sdu_len > chan->imtu) {
err = -EMSGSIZE;
break;
}
if (skb->len >= chan->sdu_len)
break;
chan->sdu = skb;
chan->sdu_last_frag = skb;
skb = NULL;
err = 0;
break;
case L2CAP_SAR_CONTINUE:
if (!chan->sdu)
break;
append_skb_frag(chan->sdu, skb,
&chan->sdu_last_frag);
skb = NULL;
if (chan->sdu->len >= chan->sdu_len)
break;
err = 0;
break;
case L2CAP_SAR_END:
if (!chan->sdu)
break;
append_skb_frag(chan->sdu, skb,
&chan->sdu_last_frag);
skb = NULL;
if (chan->sdu->len != chan->sdu_len)
break;
err = chan->ops->recv(chan, chan->sdu);
if (!err) {
/* Reassembly complete */
chan->sdu = NULL;
chan->sdu_last_frag = NULL;
chan->sdu_len = 0;
}
break;
}
if (err) {
kfree_skb(skb);
kfree_skb(chan->sdu);
chan->sdu = NULL;
chan->sdu_last_frag = NULL;
chan->sdu_len = 0;
}
return err;
} | 0 | [
"CWE-787"
]
| linux | e860d2c904d1a9f38a24eb44c9f34b8f915a6ea3 | 273,368,084,356,612,460,000,000,000,000,000,000,000 | 84 | Bluetooth: Properly check L2CAP config option output buffer length
Validate the output buffer length for L2CAP config requests and responses
to avoid overflowing the stack buffer used for building the option blocks.
Cc: [email protected]
Signed-off-by: Ben Seri <[email protected]>
Signed-off-by: Marcel Holtmann <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]> |
idprime_set_security_env(struct sc_card *card,
const struct sc_security_env *env, int se_num)
{
int r;
struct sc_security_env new_env;
if (card == NULL || env == NULL) {
return SC_ERROR_INVALID_ARGUMENTS;
}
SC_FUNC_CALLED(card->ctx, SC_LOG_DEBUG_VERBOSE);
/* The card requires algorithm reference here */
new_env = *env;
new_env.flags |= SC_SEC_ENV_ALG_REF_PRESENT;
/* SHA-1 mechanisms are not allowed in the card I have available */
switch (env->operation) {
case SC_SEC_OPERATION_DECIPHER:
if (env->algorithm_flags & SC_ALGORITHM_RSA_PAD_OAEP) {
if (env->algorithm_flags & SC_ALGORITHM_RSA_HASH_SHA1) {
new_env.algorithm_ref = 0x1D;
} else if (env->algorithm_flags & SC_ALGORITHM_RSA_HASH_SHA256) {
new_env.algorithm_ref = 0x4D;
} else if (env->algorithm_flags & SC_ALGORITHM_RSA_HASH_SHA384) {
new_env.algorithm_ref = 0x5D;
} else if (env->algorithm_flags & SC_ALGORITHM_RSA_HASH_SHA512) {
new_env.algorithm_ref = 0x6D;
}
} else { /* RSA-PKCS without hashing */
new_env.algorithm_ref = 0x1A;
}
break;
case SC_SEC_OPERATION_SIGN:
if (env->algorithm_flags & SC_ALGORITHM_RSA_PAD_PSS) {
if (env->algorithm_flags & SC_ALGORITHM_RSA_HASH_SHA256) {
new_env.algorithm_ref = 0x45;
} else if (env->algorithm_flags & SC_ALGORITHM_RSA_HASH_SHA384) {
new_env.algorithm_ref = 0x55;
} else if (env->algorithm_flags & SC_ALGORITHM_RSA_HASH_SHA512) {
new_env.algorithm_ref = 0x65;
}
} else { /* RSA-PKCS */
if (env->algorithm_flags & SC_ALGORITHM_RSA_HASH_SHA256) {
new_env.algorithm_ref = 0x42;
} else if (env->algorithm_flags & SC_ALGORITHM_RSA_HASH_SHA384) {
new_env.algorithm_ref = 0x52;
} else if (env->algorithm_flags & SC_ALGORITHM_RSA_HASH_SHA512) {
new_env.algorithm_ref = 0x62;
} else { /* RSA-PKCS without hashing */
new_env.algorithm_ref = 0x02;
}
}
break;
default:
return SC_ERROR_INVALID_ARGUMENTS;
}
r = iso_ops->set_security_env(card,
(const struct sc_security_env *) &new_env, se_num);
LOG_FUNC_RETURN(card->ctx, r);
} | 0 | []
| OpenSC | f015746d22d249642c19674298a18ad824db0ed7 | 95,166,865,598,061,830,000,000,000,000,000,000,000 | 61 | idprime: Use temporary variable instead of messing up the passed one
Thanks oss-fuzz
https://bugs.chromium.org/p/oss-fuzz/issues/detail?id=28185 |
set_lenIV(const char* line)
{
char *p = strstr(line, "/lenIV ");
/* Allow lenIV to be negative. Thanks to Tom Kacvinsky <[email protected]> */
if (p && (isdigit((unsigned char) p[7]) || p[7] == '+' || p[7] == '-')) {
lenIV = atoi(p + 7);
}
} | 0 | [
"CWE-119",
"CWE-787"
]
| t1utils | 6b9d1aafcb61a3663c883663eb19ccdbfcde8d33 | 181,024,463,207,763,660,000,000,000,000,000,000,000 | 9 | Security fixes.
- Don't overflow the small cs_start buffer (reported by Niels
Thykier via the debian tracker (Jakub Wilk), found with a
fuzzer ("American fuzzy lop")).
- Cast arguments to <ctype.h> functions to unsigned char. |
void tcp_chrono_start(struct sock *sk, const enum tcp_chrono type)
{
struct tcp_sock *tp = tcp_sk(sk);
/* If there are multiple conditions worthy of tracking in a
* chronograph then the highest priority enum takes precedence
* over the other conditions. So that if something "more interesting"
* starts happening, stop the previous chrono and start a new one.
*/
if (type > tp->chrono_type)
tcp_chrono_set(tp, type);
} | 0 | [
"CWE-190"
]
| net | 3b4929f65b0d8249f19a50245cd88ed1a2f78cff | 73,382,811,098,056,890,000,000,000,000,000,000,000 | 12 | tcp: limit payload size of sacked skbs
Jonathan Looney reported that TCP can trigger the following crash
in tcp_shifted_skb() :
BUG_ON(tcp_skb_pcount(skb) < pcount);
This can happen if the remote peer has advertized the smallest
MSS that linux TCP accepts : 48
An skb can hold 17 fragments, and each fragment can hold 32KB
on x86, or 64KB on PowerPC.
This means that the 16bit witdh of TCP_SKB_CB(skb)->tcp_gso_segs
can overflow.
Note that tcp_sendmsg() builds skbs with less than 64KB
of payload, so this problem needs SACK to be enabled.
SACK blocks allow TCP to coalesce multiple skbs in the retransmit
queue, thus filling the 17 fragments to maximal capacity.
CVE-2019-11477 -- u16 overflow of TCP_SKB_CB(skb)->tcp_gso_segs
Fixes: 832d11c5cd07 ("tcp: Try to restore large SKBs while SACK processing")
Signed-off-by: Eric Dumazet <[email protected]>
Reported-by: Jonathan Looney <[email protected]>
Acked-by: Neal Cardwell <[email protected]>
Reviewed-by: Tyler Hicks <[email protected]>
Cc: Yuchung Cheng <[email protected]>
Cc: Bruce Curtis <[email protected]>
Cc: Jonathan Lemon <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
void Val(mongo::OID& v) const {
v = OID();
} | 0 | [
"CWE-613"
]
| mongo | e55d6e2292e5dbe2f97153251d8193d1cc89f5d7 | 68,121,603,032,361,840,000,000,000,000,000,000,000 | 3 | SERVER-38984 Validate unique User ID on UserCache hit |
static inline int check_mnt(struct mount *mnt)
{
return mnt->mnt_ns == current->nsproxy->mnt_ns;
} | 0 | [
"CWE-269"
]
| user-namespace | a6138db815df5ee542d848318e5dae681590fccd | 182,282,151,125,182,740,000,000,000,000,000,000,000 | 4 | mnt: Only change user settable mount flags in remount
Kenton Varda <[email protected]> discovered that by remounting a
read-only bind mount read-only in a user namespace the
MNT_LOCK_READONLY bit would be cleared, allowing an unprivileged user
to the remount a read-only mount read-write.
Correct this by replacing the mask of mount flags to preserve
with a mask of mount flags that may be changed, and preserve
all others. This ensures that any future bugs with this mask and
remount will fail in an easy to detect way where new mount flags
simply won't change.
Cc: [email protected]
Acked-by: Serge E. Hallyn <[email protected]>
Signed-off-by: "Eric W. Biederman" <[email protected]> |
TPMT_SYM_DEF_OBJECT_Marshal(TPMT_SYM_DEF_OBJECT *source, BYTE **buffer, INT32 *size)
{
UINT16 written = 0;
written += TPMI_ALG_SYM_OBJECT_Marshal(&source->algorithm, buffer, size);
written += TPMU_SYM_KEY_BITS_Marshal(&source->keyBits, buffer, size, source->algorithm);
written += TPMU_SYM_MODE_Marshal(&source->mode, buffer, size, source->algorithm);
return written;
} | 0 | [
"CWE-787"
]
| libtpms | 3ef9b26cb9f28bd64d738bff9505a20d4eb56acd | 163,057,697,811,178,610,000,000,000,000,000,000,000 | 9 | tpm2: Add maxSize parameter to TPM2B_Marshal for sanity checks
Add maxSize parameter to TPM2B_Marshal and assert on it checking
the size of the data intended to be marshaled versus the maximum
buffer size.
Signed-off-by: Stefan Berger <[email protected]> |
void RGWGetObjLayout_ObjStore_S3::send_response()
{
if (op_ret)
set_req_state_err(s, op_ret);
dump_errno(s);
end_header(s, this, "application/json");
JSONFormatter f;
if (op_ret < 0) {
return;
}
f.open_object_section("result");
::encode_json("head", head_obj, &f);
::encode_json("manifest", *manifest, &f);
f.open_array_section("data_location");
for (auto miter = manifest->obj_begin(); miter != manifest->obj_end(); ++miter) {
f.open_object_section("obj");
rgw_raw_obj raw_loc = miter.get_location().get_raw_obj(store->getRados());
uint64_t ofs = miter.get_ofs();
uint64_t left = manifest->get_obj_size() - ofs;
::encode_json("ofs", miter.get_ofs(), &f);
::encode_json("loc", raw_loc, &f);
::encode_json("loc_ofs", miter.location_ofs(), &f);
uint64_t loc_size = miter.get_stripe_size();
if (loc_size > left) {
loc_size = left;
}
::encode_json("loc_size", loc_size, &f);
f.close_section();
rgw_flush_formatter(s, &f);
}
f.close_section();
f.close_section();
rgw_flush_formatter(s, &f);
} | 0 | [
"CWE-79"
]
| ceph | 8f90658c731499722d5f4393c8ad70b971d05f77 | 174,624,644,710,123,240,000,000,000,000,000,000,000 | 37 | rgw: reject unauthenticated response-header actions
Signed-off-by: Matt Benjamin <[email protected]>
Reviewed-by: Casey Bodley <[email protected]>
(cherry picked from commit d8dd5e513c0c62bbd7d3044d7e2eddcd897bd400) |
unsigned long netdev_fix_features(unsigned long features, const char *name)
{
/* Fix illegal SG+CSUM combinations. */
if ((features & NETIF_F_SG) &&
!(features & NETIF_F_ALL_CSUM)) {
if (name)
printk(KERN_NOTICE "%s: Dropping NETIF_F_SG since no "
"checksum feature.\n", name);
features &= ~NETIF_F_SG;
}
/* TSO requires that SG is present as well. */
if ((features & NETIF_F_TSO) && !(features & NETIF_F_SG)) {
if (name)
printk(KERN_NOTICE "%s: Dropping NETIF_F_TSO since no "
"SG feature.\n", name);
features &= ~NETIF_F_TSO;
}
if (features & NETIF_F_UFO) {
/* maybe split UFO into V4 and V6? */
if (!((features & NETIF_F_GEN_CSUM) ||
(features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))
== (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
if (name)
printk(KERN_ERR "%s: Dropping NETIF_F_UFO "
"since no checksum offload features.\n",
name);
features &= ~NETIF_F_UFO;
}
if (!(features & NETIF_F_SG)) {
if (name)
printk(KERN_ERR "%s: Dropping NETIF_F_UFO "
"since no NETIF_F_SG feature.\n", name);
features &= ~NETIF_F_UFO;
}
}
return features;
} | 0 | [
"CWE-264"
]
| linux | 8909c9ad8ff03611c9c96c9a92656213e4bb495b | 320,112,637,193,339,900,000,000,000,000,000,000,000 | 41 | net: don't allow CAP_NET_ADMIN to load non-netdev kernel modules
Since a8f80e8ff94ecba629542d9b4b5f5a8ee3eb565c any process with
CAP_NET_ADMIN may load any module from /lib/modules/. This doesn't mean
that CAP_NET_ADMIN is a superset of CAP_SYS_MODULE as modules are
limited to /lib/modules/**. However, CAP_NET_ADMIN capability shouldn't
allow anybody load any module not related to networking.
This patch restricts an ability of autoloading modules to netdev modules
with explicit aliases. This fixes CVE-2011-1019.
Arnd Bergmann suggested to leave untouched the old pre-v2.6.32 behavior
of loading netdev modules by name (without any prefix) for processes
with CAP_SYS_MODULE to maintain the compatibility with network scripts
that use autoloading netdev modules by aliases like "eth0", "wlan0".
Currently there are only three users of the feature in the upstream
kernel: ipip, ip_gre and sit.
root@albatros:~# capsh --drop=$(seq -s, 0 11),$(seq -s, 13 34) --
root@albatros:~# grep Cap /proc/$$/status
CapInh: 0000000000000000
CapPrm: fffffff800001000
CapEff: fffffff800001000
CapBnd: fffffff800001000
root@albatros:~# modprobe xfs
FATAL: Error inserting xfs
(/lib/modules/2.6.38-rc6-00001-g2bf4ca3/kernel/fs/xfs/xfs.ko): Operation not permitted
root@albatros:~# lsmod | grep xfs
root@albatros:~# ifconfig xfs
xfs: error fetching interface information: Device not found
root@albatros:~# lsmod | grep xfs
root@albatros:~# lsmod | grep sit
root@albatros:~# ifconfig sit
sit: error fetching interface information: Device not found
root@albatros:~# lsmod | grep sit
root@albatros:~# ifconfig sit0
sit0 Link encap:IPv6-in-IPv4
NOARP MTU:1480 Metric:1
root@albatros:~# lsmod | grep sit
sit 10457 0
tunnel4 2957 1 sit
For CAP_SYS_MODULE module loading is still relaxed:
root@albatros:~# grep Cap /proc/$$/status
CapInh: 0000000000000000
CapPrm: ffffffffffffffff
CapEff: ffffffffffffffff
CapBnd: ffffffffffffffff
root@albatros:~# ifconfig xfs
xfs: error fetching interface information: Device not found
root@albatros:~# lsmod | grep xfs
xfs 745319 0
Reference: https://lkml.org/lkml/2011/2/24/203
Signed-off-by: Vasiliy Kulikov <[email protected]>
Signed-off-by: Michael Tokarev <[email protected]>
Acked-by: David S. Miller <[email protected]>
Acked-by: Kees Cook <[email protected]>
Signed-off-by: James Morris <[email protected]> |
udisks_log (UDisksLogLevel level,
const gchar *function,
const gchar *location,
const gchar *format,
...)
{
va_list var_args;
gchar *message;
va_start (var_args, format);
message = g_strdup_vprintf (format, var_args);
va_end (var_args);
#if GLIB_CHECK_VERSION(2, 50, 0)
g_log_structured ("udisks", (GLogLevelFlags) level,
"MESSAGE", "%s", message, "THREAD_ID", "%d", (gint) syscall (SYS_gettid),
"CODE_FUNC", function, "CODE_FILE", location);
#else
g_log ("udisks", level, "[%d]: %s [%s, %s()]", (gint) syscall (SYS_gettid), message, location, function);
#endif
g_free (message);
} | 0 | [
"CWE-134"
]
| udisks | e369a9b4b08e9373c814c05328b366c938284eb5 | 87,218,934,448,487,830,000,000,000,000,000,000,000 | 23 | Fix string format vulnerability
If the message in g_log_structured itself
contained format sequences like %d or %n they
were applied again, leading to leaked stack contents
and possibly memory corruption. It can be triggered
e.g. by a volume label containing format sequences.
Print the message argument itself into a "%s" string
to avoid intepreting format sequences.
https://github.com/storaged-project/udisks/issues/578 |
static void sock_def_write_space(struct sock *sk)
{
struct socket_wq *wq;
rcu_read_lock();
/* Do not wake up a writer until he can make "significant"
* progress. --DaveM
*/
if ((refcount_read(&sk->sk_wmem_alloc) << 1) <= READ_ONCE(sk->sk_sndbuf)) {
wq = rcu_dereference(sk->sk_wq);
if (skwq_has_sleeper(wq))
wake_up_interruptible_sync_poll(&wq->wait, EPOLLOUT |
EPOLLWRNORM | EPOLLWRBAND);
/* Should agree with poll, otherwise some programs break */
if (sock_writeable(sk))
sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
}
rcu_read_unlock();
} | 0 | []
| net | 35306eb23814444bd4021f8a1c3047d3cb0c8b2b | 327,259,554,568,882,500,000,000,000,000,000,000,000 | 22 | af_unix: fix races in sk_peer_pid and sk_peer_cred accesses
Jann Horn reported that SO_PEERCRED and SO_PEERGROUPS implementations
are racy, as af_unix can concurrently change sk_peer_pid and sk_peer_cred.
In order to fix this issue, this patch adds a new spinlock that needs
to be used whenever these fields are read or written.
Jann also pointed out that l2cap_sock_get_peer_pid_cb() is currently
reading sk->sk_peer_pid which makes no sense, as this field
is only possibly set by AF_UNIX sockets.
We will have to clean this in a separate patch.
This could be done by reverting b48596d1dc25 "Bluetooth: L2CAP: Add get_peer_pid callback"
or implementing what was truly expected.
Fixes: 109f6e39fa07 ("af_unix: Allow SO_PEERCRED to work across namespaces.")
Signed-off-by: Eric Dumazet <[email protected]>
Reported-by: Jann Horn <[email protected]>
Cc: Eric W. Biederman <[email protected]>
Cc: Luiz Augusto von Dentz <[email protected]>
Cc: Marcel Holtmann <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
gint32 ves_icall_System_Threading_Interlocked_Exchange_Int (gint32 *location, gint32 value)
{
MONO_ARCH_SAVE_REGS;
return InterlockedExchange(location, value);
} | 0 | [
"CWE-399",
"CWE-264"
]
| mono | 722f9890f09aadfc37ae479e7d946d5fc5ef7b91 | 148,470,598,372,963,330,000,000,000,000,000,000,000 | 6 | Fix access to freed members of a dead thread
* threads.c: Fix access to freed members of a dead thread. Found
and fixed by Rodrigo Kumpera <[email protected]>
Ref: CVE-2011-0992 |
static int ntop_list_http_hosts(lua_State* vm) {
NetworkInterface *ntop_interface = getCurrentInterface(vm);
char *key;
ntop->getTrace()->traceEvent(TRACE_DEBUG, "%s() called", __FUNCTION__);
if(!ntop_interface) return(CONST_LUA_ERROR);
if(lua_type(vm, 1) != LUA_TSTRING) /* Optional */
key = NULL;
else
key = (char*)lua_tostring(vm, 1);
ntop_interface->listHTTPHosts(vm, key);
return(CONST_LUA_OK);
} | 0 | [
"CWE-476"
]
| ntopng | 01f47e04fd7c8d54399c9e465f823f0017069f8f | 189,937,549,844,498,280,000,000,000,000,000,000,000 | 17 | Security fix: prevents empty host from being used |
fsobj_error(int *a_eno, struct archive_string *a_estr,
int err, const char *errstr, const char *path)
{
if (a_eno)
*a_eno = err;
if (a_estr)
archive_string_sprintf(a_estr, "%s%s", errstr, path);
} | 0 | [
"CWE-59",
"CWE-269"
]
| libarchive | b41daecb5ccb4c8e3b2c53fd6147109fc12c3043 | 75,732,856,503,056,600,000,000,000,000,000,000,000 | 8 | Do not follow symlinks when processing the fixup list
Use lchmod() instead of chmod() and tell the remaining functions that the
real file to be modified is a symbolic link.
Fixes #1566 |
GF_Err dfla_box_dump(GF_Box *a, FILE * trace)
{
GF_FLACConfigBox *ptr = (GF_FLACConfigBox *)a;
gf_isom_box_dump_start(a, "FLACSpecificBox", trace);
gf_fprintf(trace, " dataSize=\"%d\">\n", ptr->dataSize);
gf_isom_box_dump_done("FLACSpecificBox", a, trace);
return GF_OK;
} | 0 | [
"CWE-787"
]
| gpac | ea1eca00fd92fa17f0e25ac25652622924a9a6a0 | 216,495,899,546,224,680,000,000,000,000,000,000,000 | 8 | fixed #2138 |
birthday_populate (EContact *contact,
gchar **values)
{
if (values[0]) {
EContactDate *dt = e_contact_date_from_string (values[0]);
e_contact_set (contact, E_CONTACT_BIRTH_DATE, dt);
e_contact_date_free (dt);
}
} | 0 | []
| evolution-data-server | 34bad61738e2127736947ac50e0c7969cc944972 | 63,042,928,208,104,960,000,000,000,000,000,000,000 | 9 | Bug 796174 - strcat() considered unsafe for buffer overflow |
int sldns_str2wire_dname_buf_origin(const char* str, uint8_t* buf, size_t* len,
uint8_t* origin, size_t origin_len)
{
size_t dlen = *len;
int rel = 0;
int s = sldns_str2wire_dname_buf_rel(str, buf, &dlen, &rel);
if(s) return s;
if(rel && origin && dlen > 0) {
if((unsigned)dlen >= 0x00ffffffU ||
(unsigned)origin_len >= 0x00ffffffU)
/* guard against integer overflow in addition */
return RET_ERR(LDNS_WIREPARSE_ERR_GENERAL, *len);
if(dlen + origin_len - 1 > LDNS_MAX_DOMAINLEN)
return RET_ERR(LDNS_WIREPARSE_ERR_DOMAINNAME_OVERFLOW,
LDNS_MAX_DOMAINLEN);
if(dlen + origin_len - 1 > *len)
return RET_ERR(LDNS_WIREPARSE_ERR_BUFFER_TOO_SMALL,
*len);
memmove(buf+dlen-1, origin, origin_len);
*len = dlen + origin_len - 1;
} else
*len = dlen;
return LDNS_WIREPARSE_ERR_OK;
} | 0 | []
| unbound | 3f3cadd416d6efa92ff2d548ac090f42cd79fee9 | 265,728,198,206,763,070,000,000,000,000,000,000,000 | 25 | - Fix Out of Bounds Write in sldns_str2wire_str_buf(),
reported by X41 D-Sec. |
static inline int unuse_pmd_range(struct vm_area_struct *vma, pud_t *pud,
unsigned long addr, unsigned long end,
swp_entry_t entry, struct page *page)
{
pmd_t *pmd;
unsigned long next;
int ret;
pmd = pmd_offset(pud, addr);
do {
next = pmd_addr_end(addr, end);
if (unlikely(pmd_trans_huge(*pmd)))
continue;
if (pmd_none_or_clear_bad(pmd))
continue;
ret = unuse_pte_range(vma, pmd, addr, next, entry, page);
if (ret)
return ret;
} while (pmd++, addr = next, addr != end);
return 0;
} | 1 | [
"CWE-264"
]
| linux-2.6 | 1a5a9906d4e8d1976b701f889d8f35d54b928f25 | 205,910,742,923,901,300,000,000,000,000,000,000,000 | 21 | mm: thp: fix pmd_bad() triggering in code paths holding mmap_sem read mode
In some cases it may happen that pmd_none_or_clear_bad() is called with
the mmap_sem hold in read mode. In those cases the huge page faults can
allocate hugepmds under pmd_none_or_clear_bad() and that can trigger a
false positive from pmd_bad() that will not like to see a pmd
materializing as trans huge.
It's not khugepaged causing the problem, khugepaged holds the mmap_sem
in write mode (and all those sites must hold the mmap_sem in read mode
to prevent pagetables to go away from under them, during code review it
seems vm86 mode on 32bit kernels requires that too unless it's
restricted to 1 thread per process or UP builds). The race is only with
the huge pagefaults that can convert a pmd_none() into a
pmd_trans_huge().
Effectively all these pmd_none_or_clear_bad() sites running with
mmap_sem in read mode are somewhat speculative with the page faults, and
the result is always undefined when they run simultaneously. This is
probably why it wasn't common to run into this. For example if the
madvise(MADV_DONTNEED) runs zap_page_range() shortly before the page
fault, the hugepage will not be zapped, if the page fault runs first it
will be zapped.
Altering pmd_bad() not to error out if it finds hugepmds won't be enough
to fix this, because zap_pmd_range would then proceed to call
zap_pte_range (which would be incorrect if the pmd become a
pmd_trans_huge()).
The simplest way to fix this is to read the pmd in the local stack
(regardless of what we read, no need of actual CPU barriers, only
compiler barrier needed), and be sure it is not changing under the code
that computes its value. Even if the real pmd is changing under the
value we hold on the stack, we don't care. If we actually end up in
zap_pte_range it means the pmd was not none already and it was not huge,
and it can't become huge from under us (khugepaged locking explained
above).
All we need is to enforce that there is no way anymore that in a code
path like below, pmd_trans_huge can be false, but pmd_none_or_clear_bad
can run into a hugepmd. The overhead of a barrier() is just a compiler
tweak and should not be measurable (I only added it for THP builds). I
don't exclude different compiler versions may have prevented the race
too by caching the value of *pmd on the stack (that hasn't been
verified, but it wouldn't be impossible considering
pmd_none_or_clear_bad, pmd_bad, pmd_trans_huge, pmd_none are all inlines
and there's no external function called in between pmd_trans_huge and
pmd_none_or_clear_bad).
if (pmd_trans_huge(*pmd)) {
if (next-addr != HPAGE_PMD_SIZE) {
VM_BUG_ON(!rwsem_is_locked(&tlb->mm->mmap_sem));
split_huge_page_pmd(vma->vm_mm, pmd);
} else if (zap_huge_pmd(tlb, vma, pmd, addr))
continue;
/* fall through */
}
if (pmd_none_or_clear_bad(pmd))
Because this race condition could be exercised without special
privileges this was reported in CVE-2012-1179.
The race was identified and fully explained by Ulrich who debugged it.
I'm quoting his accurate explanation below, for reference.
====== start quote =======
mapcount 0 page_mapcount 1
kernel BUG at mm/huge_memory.c:1384!
At some point prior to the panic, a "bad pmd ..." message similar to the
following is logged on the console:
mm/memory.c:145: bad pmd ffff8800376e1f98(80000000314000e7).
The "bad pmd ..." message is logged by pmd_clear_bad() before it clears
the page's PMD table entry.
143 void pmd_clear_bad(pmd_t *pmd)
144 {
-> 145 pmd_ERROR(*pmd);
146 pmd_clear(pmd);
147 }
After the PMD table entry has been cleared, there is an inconsistency
between the actual number of PMD table entries that are mapping the page
and the page's map count (_mapcount field in struct page). When the page
is subsequently reclaimed, __split_huge_page() detects this inconsistency.
1381 if (mapcount != page_mapcount(page))
1382 printk(KERN_ERR "mapcount %d page_mapcount %d\n",
1383 mapcount, page_mapcount(page));
-> 1384 BUG_ON(mapcount != page_mapcount(page));
The root cause of the problem is a race of two threads in a multithreaded
process. Thread B incurs a page fault on a virtual address that has never
been accessed (PMD entry is zero) while Thread A is executing an madvise()
system call on a virtual address within the same 2 MB (huge page) range.
virtual address space
.---------------------.
| |
| |
.-|---------------------|
| | |
| | |<-- B(fault)
| | |
2 MB | |/////////////////////|-.
huge < |/////////////////////| > A(range)
page | |/////////////////////|-'
| | |
| | |
'-|---------------------|
| |
| |
'---------------------'
- Thread A is executing an madvise(..., MADV_DONTNEED) system call
on the virtual address range "A(range)" shown in the picture.
sys_madvise
// Acquire the semaphore in shared mode.
down_read(¤t->mm->mmap_sem)
...
madvise_vma
switch (behavior)
case MADV_DONTNEED:
madvise_dontneed
zap_page_range
unmap_vmas
unmap_page_range
zap_pud_range
zap_pmd_range
//
// Assume that this huge page has never been accessed.
// I.e. content of the PMD entry is zero (not mapped).
//
if (pmd_trans_huge(*pmd)) {
// We don't get here due to the above assumption.
}
//
// Assume that Thread B incurred a page fault and
.---------> // sneaks in here as shown below.
| //
| if (pmd_none_or_clear_bad(pmd))
| {
| if (unlikely(pmd_bad(*pmd)))
| pmd_clear_bad
| {
| pmd_ERROR
| // Log "bad pmd ..." message here.
| pmd_clear
| // Clear the page's PMD entry.
| // Thread B incremented the map count
| // in page_add_new_anon_rmap(), but
| // now the page is no longer mapped
| // by a PMD entry (-> inconsistency).
| }
| }
|
v
- Thread B is handling a page fault on virtual address "B(fault)" shown
in the picture.
...
do_page_fault
__do_page_fault
// Acquire the semaphore in shared mode.
down_read_trylock(&mm->mmap_sem)
...
handle_mm_fault
if (pmd_none(*pmd) && transparent_hugepage_enabled(vma))
// We get here due to the above assumption (PMD entry is zero).
do_huge_pmd_anonymous_page
alloc_hugepage_vma
// Allocate a new transparent huge page here.
...
__do_huge_pmd_anonymous_page
...
spin_lock(&mm->page_table_lock)
...
page_add_new_anon_rmap
// Here we increment the page's map count (starts at -1).
atomic_set(&page->_mapcount, 0)
set_pmd_at
// Here we set the page's PMD entry which will be cleared
// when Thread A calls pmd_clear_bad().
...
spin_unlock(&mm->page_table_lock)
The mmap_sem does not prevent the race because both threads are acquiring
it in shared mode (down_read). Thread B holds the page_table_lock while
the page's map count and PMD table entry are updated. However, Thread A
does not synchronize on that lock.
====== end quote =======
[[email protected]: checkpatch fixes]
Reported-by: Ulrich Obergfell <[email protected]>
Signed-off-by: Andrea Arcangeli <[email protected]>
Acked-by: Johannes Weiner <[email protected]>
Cc: Mel Gorman <[email protected]>
Cc: Hugh Dickins <[email protected]>
Cc: Dave Jones <[email protected]>
Acked-by: Larry Woodman <[email protected]>
Acked-by: Rik van Riel <[email protected]>
Cc: <[email protected]> [2.6.38+]
Cc: Mark Salter <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]> |
static struct binder_thread *binder_get_txn_from_and_acq_inner(
struct binder_transaction *t)
{
struct binder_thread *from;
from = binder_get_txn_from(t);
if (!from)
return NULL;
binder_inner_proc_lock(from->proc);
if (t->from) {
BUG_ON(from != t->from);
return from;
}
binder_inner_proc_unlock(from->proc);
binder_thread_dec_tmpref(from);
return NULL;
} | 0 | [
"CWE-416"
]
| linux | 7bada55ab50697861eee6bb7d60b41e68a961a9c | 64,698,462,756,021,850,000,000,000,000,000,000,000 | 17 | binder: fix race that allows malicious free of live buffer
Malicious code can attempt to free buffers using the BC_FREE_BUFFER
ioctl to binder. There are protections against a user freeing a buffer
while in use by the kernel, however there was a window where
BC_FREE_BUFFER could be used to free a recently allocated buffer that
was not completely initialized. This resulted in a use-after-free
detected by KASAN with a malicious test program.
This window is closed by setting the buffer's allow_user_free attribute
to 0 when the buffer is allocated or when the user has previously freed
it instead of waiting for the caller to set it. The problem was that
when the struct buffer was recycled, allow_user_free was stale and set
to 1 allowing a free to go through.
Signed-off-by: Todd Kjos <[email protected]>
Acked-by: Arve Hjønnevåg <[email protected]>
Cc: stable <[email protected]> # 4.14
Signed-off-by: Greg Kroah-Hartman <[email protected]> |
static int rawv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
{
struct ipv6_txoptions *opt_to_free = NULL;
struct ipv6_txoptions opt_space;
DECLARE_SOCKADDR(struct sockaddr_in6 *, sin6, msg->msg_name);
struct in6_addr *daddr, *final_p, final;
struct inet_sock *inet = inet_sk(sk);
struct ipv6_pinfo *np = inet6_sk(sk);
struct raw6_sock *rp = raw6_sk(sk);
struct ipv6_txoptions *opt = NULL;
struct ip6_flowlabel *flowlabel = NULL;
struct dst_entry *dst = NULL;
struct raw6_frag_vec rfv;
struct flowi6 fl6;
int addr_len = msg->msg_namelen;
int hlimit = -1;
int tclass = -1;
int dontfrag = -1;
u16 proto;
int err;
/* Rough check on arithmetic overflow,
better check is made in ip6_append_data().
*/
if (len > INT_MAX)
return -EMSGSIZE;
/* Mirror BSD error message compatibility */
if (msg->msg_flags & MSG_OOB)
return -EOPNOTSUPP;
/*
* Get and verify the address.
*/
memset(&fl6, 0, sizeof(fl6));
fl6.flowi6_mark = sk->sk_mark;
if (sin6) {
if (addr_len < SIN6_LEN_RFC2133)
return -EINVAL;
if (sin6->sin6_family && sin6->sin6_family != AF_INET6)
return -EAFNOSUPPORT;
/* port is the proto value [0..255] carried in nexthdr */
proto = ntohs(sin6->sin6_port);
if (!proto)
proto = inet->inet_num;
else if (proto != inet->inet_num)
return -EINVAL;
if (proto > 255)
return -EINVAL;
daddr = &sin6->sin6_addr;
if (np->sndflow) {
fl6.flowlabel = sin6->sin6_flowinfo&IPV6_FLOWINFO_MASK;
if (fl6.flowlabel&IPV6_FLOWLABEL_MASK) {
flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
if (!flowlabel)
return -EINVAL;
}
}
/*
* Otherwise it will be difficult to maintain
* sk->sk_dst_cache.
*/
if (sk->sk_state == TCP_ESTABLISHED &&
ipv6_addr_equal(daddr, &sk->sk_v6_daddr))
daddr = &sk->sk_v6_daddr;
if (addr_len >= sizeof(struct sockaddr_in6) &&
sin6->sin6_scope_id &&
__ipv6_addr_needs_scope_id(__ipv6_addr_type(daddr)))
fl6.flowi6_oif = sin6->sin6_scope_id;
} else {
if (sk->sk_state != TCP_ESTABLISHED)
return -EDESTADDRREQ;
proto = inet->inet_num;
daddr = &sk->sk_v6_daddr;
fl6.flowlabel = np->flow_label;
}
if (fl6.flowi6_oif == 0)
fl6.flowi6_oif = sk->sk_bound_dev_if;
if (msg->msg_controllen) {
opt = &opt_space;
memset(opt, 0, sizeof(struct ipv6_txoptions));
opt->tot_len = sizeof(struct ipv6_txoptions);
err = ip6_datagram_send_ctl(sock_net(sk), sk, msg, &fl6, opt,
&hlimit, &tclass, &dontfrag);
if (err < 0) {
fl6_sock_release(flowlabel);
return err;
}
if ((fl6.flowlabel&IPV6_FLOWLABEL_MASK) && !flowlabel) {
flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
if (!flowlabel)
return -EINVAL;
}
if (!(opt->opt_nflen|opt->opt_flen))
opt = NULL;
}
if (!opt) {
opt = txopt_get(np);
opt_to_free = opt;
}
if (flowlabel)
opt = fl6_merge_options(&opt_space, flowlabel, opt);
opt = ipv6_fixup_options(&opt_space, opt);
fl6.flowi6_proto = proto;
rfv.msg = msg;
rfv.hlen = 0;
err = rawv6_probe_proto_opt(&rfv, &fl6);
if (err)
goto out;
if (!ipv6_addr_any(daddr))
fl6.daddr = *daddr;
else
fl6.daddr.s6_addr[15] = 0x1; /* :: means loopback (BSD'ism) */
if (ipv6_addr_any(&fl6.saddr) && !ipv6_addr_any(&np->saddr))
fl6.saddr = np->saddr;
final_p = fl6_update_dst(&fl6, opt, &final);
if (!fl6.flowi6_oif && ipv6_addr_is_multicast(&fl6.daddr))
fl6.flowi6_oif = np->mcast_oif;
else if (!fl6.flowi6_oif)
fl6.flowi6_oif = np->ucast_oif;
security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
if (inet->hdrincl)
fl6.flowi6_flags |= FLOWI_FLAG_KNOWN_NH;
dst = ip6_dst_lookup_flow(sk, &fl6, final_p);
if (IS_ERR(dst)) {
err = PTR_ERR(dst);
goto out;
}
if (hlimit < 0)
hlimit = ip6_sk_dst_hoplimit(np, &fl6, dst);
if (tclass < 0)
tclass = np->tclass;
if (dontfrag < 0)
dontfrag = np->dontfrag;
if (msg->msg_flags&MSG_CONFIRM)
goto do_confirm;
back_from_confirm:
if (inet->hdrincl)
err = rawv6_send_hdrinc(sk, msg, len, &fl6, &dst, msg->msg_flags);
else {
lock_sock(sk);
err = ip6_append_data(sk, raw6_getfrag, &rfv,
len, 0, hlimit, tclass, opt, &fl6, (struct rt6_info *)dst,
msg->msg_flags, dontfrag);
if (err)
ip6_flush_pending_frames(sk);
else if (!(msg->msg_flags & MSG_MORE))
err = rawv6_push_pending_frames(sk, &fl6, rp);
release_sock(sk);
}
done:
dst_release(dst);
out:
fl6_sock_release(flowlabel);
txopt_put(opt_to_free);
return err < 0 ? err : len;
do_confirm:
dst_confirm(dst);
if (!(msg->msg_flags & MSG_PROBE) || len)
goto back_from_confirm;
err = 0;
goto done;
} | 0 | [
"CWE-416",
"CWE-284",
"CWE-264"
]
| linux | 45f6fad84cc305103b28d73482b344d7f5b76f39 | 84,114,743,361,403,110,000,000,000,000,000,000,000 | 187 | ipv6: add complete rcu protection around np->opt
This patch addresses multiple problems :
UDP/RAW sendmsg() need to get a stable struct ipv6_txoptions
while socket is not locked : Other threads can change np->opt
concurrently. Dmitry posted a syzkaller
(http://github.com/google/syzkaller) program desmonstrating
use-after-free.
Starting with TCP/DCCP lockless listeners, tcp_v6_syn_recv_sock()
and dccp_v6_request_recv_sock() also need to use RCU protection
to dereference np->opt once (before calling ipv6_dup_options())
This patch adds full RCU protection to np->opt
Reported-by: Dmitry Vyukov <[email protected]>
Signed-off-by: Eric Dumazet <[email protected]>
Acked-by: Hannes Frederic Sowa <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
ssize_t generic_file_splice_read(struct file *in, loff_t *ppos,
struct pipe_inode_info *pipe, size_t len,
unsigned int flags)
{
loff_t isize, left;
int ret;
isize = i_size_read(in->f_mapping->host);
if (unlikely(*ppos >= isize))
return 0;
left = isize - *ppos;
if (unlikely(left < len))
len = left;
ret = __generic_file_splice_read(in, ppos, pipe, len, flags);
if (ret > 0) {
*ppos += ret;
file_accessed(in);
}
return ret;
} | 0 | [
"CWE-284",
"CWE-264"
]
| linux | 8d0207652cbe27d1f962050737848e5ad4671958 | 220,431,777,718,407,000,000,000,000,000,000,000,000 | 23 | ->splice_write() via ->write_iter()
iter_file_splice_write() - a ->splice_write() instance that gathers the
pipe buffers, builds a bio_vec-based iov_iter covering those and feeds
it to ->write_iter(). A bunch of simple cases coverted to that...
[AV: fixed the braino spotted by Cyrill]
Signed-off-by: Al Viro <[email protected]> |
router_dir_info_changed(void)
{
need_to_update_have_min_dir_info = 1;
rend_hsdir_routers_changed();
} | 0 | [
"CWE-399"
]
| tor | 308f6dad20675c42b29862f4269ad1fbfb00dc9a | 91,331,866,059,817,300,000,000,000,000,000,000,000 | 5 | Mitigate a side-channel leak of which relays Tor chooses for a circuit
Tor's and OpenSSL's current design guarantee that there are other leaks,
but this one is likely to be more easily exploitable, and is easy to fix. |
load_vector_int(BitStream& bit_stream, int nitems, std::vector<T>& vec,
int bits_wanted, int_type T::*field)
{
bool append = vec.empty();
// nitems times, read bits_wanted from the given bit stream,
// storing results in the ith vector entry.
for (int i = 0; i < nitems; ++i)
{
if (append)
{
vec.push_back(T());
}
vec.at(i).*field = bit_stream.getBits(bits_wanted);
}
if (static_cast<int>(vec.size()) != nitems)
{
throw std::logic_error("vector has wrong size in load_vector_int");
}
// The PDF spec says that each hint table starts at a byte
// boundary. Each "row" actually must start on a byte boundary.
bit_stream.skipToNextByte();
} | 1 | [
"CWE-787"
]
| qpdf | d71f05ca07eb5c7cfa4d6d23e5c1f2a800f52e8e | 77,745,159,151,214,640,000,000,000,000,000,000,000 | 23 | Fix sign and conversion warnings (major)
This makes all integer type conversions that have potential data loss
explicit with calls that do range checks and raise an exception. After
this commit, qpdf builds with no warnings when -Wsign-conversion
-Wconversion is used with gcc or clang or when -W3 -Wd4800 is used
with MSVC. This significantly reduces the likelihood of potential
crashes from bogus integer values.
There are some parts of the code that take int when they should take
size_t or an offset. Such places would make qpdf not support files
with more than 2^31 of something that usually wouldn't be so large. In
the event that such a file shows up and is valid, at least qpdf would
raise an error in the right spot so the issue could be legitimately
addressed rather than failing in some weird way because of a silent
overflow condition. |
static bool r_parse_pointer(RParsedPointer *ptr, ut64 decorated_addr, RKernelCacheObj *obj) {
/*
* Logic taken from:
* https://github.com/Synacktiv/kernelcache-laundering/blob/master/ios12_kernel_cache_helper.py
*/
if ((decorated_addr & 0x4000000000000000LL) == 0 && obj->rebase_info) {
if (decorated_addr & 0x8000000000000000LL) {
ptr->address = obj->rebase_info->kernel_base + (decorated_addr & 0xFFFFFFFFLL);
} else {
ptr->address = ((decorated_addr << 13) & 0xFF00000000000000LL) | (decorated_addr & 0x7ffffffffffLL);
if (decorated_addr & 0x40000000000LL) {
ptr->address |= 0xfffc0000000000LL;
}
}
} else {
ptr->address = decorated_addr;
}
return true;
} | 0 | [
"CWE-476"
]
| radare2 | feaa4e7f7399c51ee6f52deb84dc3f795b4035d6 | 62,318,195,815,103,970,000,000,000,000,000,000,000 | 21 | Fix null deref in xnu.kernelcache ##crash
* Reported by @xshad3 via huntr.dev |
e1000e_build_rx_metadata(E1000ECore *core,
struct NetRxPkt *pkt,
bool is_eop,
const E1000E_RSSInfo *rss_info,
uint32_t *rss, uint32_t *mrq,
uint32_t *status_flags,
uint16_t *ip_id,
uint16_t *vlan_tag)
{
struct virtio_net_hdr *vhdr;
bool isip4, isip6, istcp, isudp;
uint32_t pkt_type;
*status_flags = E1000_RXD_STAT_DD;
/* No additional metadata needed for non-EOP descriptors */
if (!is_eop) {
goto func_exit;
}
*status_flags |= E1000_RXD_STAT_EOP;
net_rx_pkt_get_protocols(pkt, &isip4, &isip6, &isudp, &istcp);
trace_e1000e_rx_metadata_protocols(isip4, isip6, isudp, istcp);
/* VLAN state */
if (net_rx_pkt_is_vlan_stripped(pkt)) {
*status_flags |= E1000_RXD_STAT_VP;
*vlan_tag = cpu_to_le16(net_rx_pkt_get_vlan_tag(pkt));
trace_e1000e_rx_metadata_vlan(*vlan_tag);
}
/* Packet parsing results */
if ((core->mac[RXCSUM] & E1000_RXCSUM_PCSD) != 0) {
if (rss_info->enabled) {
*rss = cpu_to_le32(rss_info->hash);
*mrq = cpu_to_le32(rss_info->type | (rss_info->queue << 8));
trace_e1000e_rx_metadata_rss(*rss, *mrq);
}
} else if (isip4) {
*status_flags |= E1000_RXD_STAT_IPIDV;
*ip_id = cpu_to_le16(net_rx_pkt_get_ip_id(pkt));
trace_e1000e_rx_metadata_ip_id(*ip_id);
}
if (istcp && e1000e_is_tcp_ack(core, pkt)) {
*status_flags |= E1000_RXD_STAT_ACK;
trace_e1000e_rx_metadata_ack();
}
if (isip6 && (core->mac[RFCTL] & E1000_RFCTL_IPV6_DIS)) {
trace_e1000e_rx_metadata_ipv6_filtering_disabled();
pkt_type = E1000_RXD_PKT_MAC;
} else if (istcp || isudp) {
pkt_type = isip4 ? E1000_RXD_PKT_IP4_XDP : E1000_RXD_PKT_IP6_XDP;
} else if (isip4 || isip6) {
pkt_type = isip4 ? E1000_RXD_PKT_IP4 : E1000_RXD_PKT_IP6;
} else {
pkt_type = E1000_RXD_PKT_MAC;
}
*status_flags |= E1000_RXD_PKT_TYPE(pkt_type);
trace_e1000e_rx_metadata_pkt_type(pkt_type);
/* RX CSO information */
if (isip6 && (core->mac[RFCTL] & E1000_RFCTL_IPV6_XSUM_DIS)) {
trace_e1000e_rx_metadata_ipv6_sum_disabled();
goto func_exit;
}
if (!net_rx_pkt_has_virt_hdr(pkt)) {
trace_e1000e_rx_metadata_no_virthdr();
e1000e_verify_csum_in_sw(core, pkt, status_flags, istcp, isudp);
goto func_exit;
}
vhdr = net_rx_pkt_get_vhdr(pkt);
if (!(vhdr->flags & VIRTIO_NET_HDR_F_DATA_VALID) &&
!(vhdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM)) {
trace_e1000e_rx_metadata_virthdr_no_csum_info();
e1000e_verify_csum_in_sw(core, pkt, status_flags, istcp, isudp);
goto func_exit;
}
if (e1000e_rx_l3_cso_enabled(core)) {
*status_flags |= isip4 ? E1000_RXD_STAT_IPCS : 0;
} else {
trace_e1000e_rx_metadata_l3_cso_disabled();
}
if (e1000e_rx_l4_cso_enabled(core)) {
if (istcp) {
*status_flags |= E1000_RXD_STAT_TCPCS;
} else if (isudp) {
*status_flags |= E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS;
}
} else {
trace_e1000e_rx_metadata_l4_cso_disabled();
}
trace_e1000e_rx_metadata_status_flags(*status_flags);
func_exit:
*status_flags = cpu_to_le32(*status_flags);
} | 0 | [
"CWE-835"
]
| qemu | 4154c7e03fa55b4cf52509a83d50d6c09d743b77 | 281,772,676,525,651,960,000,000,000,000,000,000,000 | 106 | net: e1000e: fix an infinite loop issue
This issue is like the issue in e1000 network card addressed in
this commit:
e1000: eliminate infinite loops on out-of-bounds transfer start.
Signed-off-by: Li Qiang <[email protected]>
Reviewed-by: Dmitry Fleytman <[email protected]>
Signed-off-by: Jason Wang <[email protected]> |
static bool toneport_has_led(struct usb_line6_toneport *toneport)
{
switch (toneport->type) {
case LINE6_GUITARPORT:
case LINE6_TONEPORT_GX:
/* add your device here if you are missing support for the LEDs */
return true;
default:
return false;
}
} | 0 | [
"CWE-476"
]
| linux | 0b074ab7fc0d575247b9cc9f93bb7e007ca38840 | 200,266,606,423,086,100,000,000,000,000,000,000,000 | 12 | ALSA: line6: Assure canceling delayed work at disconnection
The current code performs the cancel of a delayed work at the late
stage of disconnection procedure, which may lead to the access to the
already cleared state.
This patch assures to call cancel_delayed_work_sync() at the beginning
of the disconnection procedure for avoiding that race. The delayed
work object is now assigned in the common line6 object instead of its
derivative, so that we can call cancel_delayed_work_sync().
Along with the change, the startup function is called via the new
callback instead. This will make it easier to port other LINE6
drivers to use the delayed work for startup in later patches.
Reported-by: [email protected]
Fixes: 7f84ff68be05 ("ALSA: line6: toneport: Fix broken usage of timer for delayed execution")
Cc: <[email protected]>
Signed-off-by: Takashi Iwai <[email protected]> |
read_rsrc_short (const SD2_RSRC *prsrc, int offset)
{ const unsigned char * data = prsrc->rsrc_data ;
if (offset < 0 || offset + 1 >= prsrc->rsrc_len)
return 0 ;
return (data [offset] << 8) + data [offset + 1] ;
} /* read_rsrc_short */ | 0 | [
"CWE-119",
"CWE-787"
]
| libsndfile | dbe14f00030af5d3577f4cabbf9861db59e9c378 | 129,682,064,665,722,580,000,000,000,000,000,000,000 | 6 | src/sd2.c : Fix two potential buffer read overflows.
Closes: https://github.com/erikd/libsndfile/issues/93 |
int ip6_forward(struct sk_buff *skb)
{
struct dst_entry *dst = skb_dst(skb);
struct ipv6hdr *hdr = ipv6_hdr(skb);
struct inet6_skb_parm *opt = IP6CB(skb);
struct net *net = dev_net(dst->dev);
u32 mtu;
if (net->ipv6.devconf_all->forwarding == 0)
goto error;
if (skb_warn_if_lro(skb))
goto drop;
if (!xfrm6_policy_check(NULL, XFRM_POLICY_FWD, skb)) {
IP6_INC_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_INDISCARDS);
goto drop;
}
if (skb->pkt_type != PACKET_HOST)
goto drop;
skb_forward_csum(skb);
/*
* We DO NOT make any processing on
* RA packets, pushing them to user level AS IS
* without ane WARRANTY that application will be able
* to interpret them. The reason is that we
* cannot make anything clever here.
*
* We are not end-node, so that if packet contains
* AH/ESP, we cannot make anything.
* Defragmentation also would be mistake, RA packets
* cannot be fragmented, because there is no warranty
* that different fragments will go along one path. --ANK
*/
if (opt->ra) {
u8 *ptr = skb_network_header(skb) + opt->ra;
if (ip6_call_ra_chain(skb, (ptr[2]<<8) + ptr[3]))
return 0;
}
/*
* check and decrement ttl
*/
if (hdr->hop_limit <= 1) {
/* Force OUTPUT device used as source address */
skb->dev = dst->dev;
icmpv6_send(skb, ICMPV6_TIME_EXCEED, ICMPV6_EXC_HOPLIMIT, 0);
IP6_INC_STATS_BH(net,
ip6_dst_idev(dst), IPSTATS_MIB_INHDRERRORS);
kfree_skb(skb);
return -ETIMEDOUT;
}
/* XXX: idev->cnf.proxy_ndp? */
if (net->ipv6.devconf_all->proxy_ndp &&
pneigh_lookup(&nd_tbl, net, &hdr->daddr, skb->dev, 0)) {
int proxied = ip6_forward_proxy_check(skb);
if (proxied > 0)
return ip6_input(skb);
else if (proxied < 0) {
IP6_INC_STATS(net, ip6_dst_idev(dst),
IPSTATS_MIB_INDISCARDS);
goto drop;
}
}
if (!xfrm6_route_forward(skb)) {
IP6_INC_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_INDISCARDS);
goto drop;
}
dst = skb_dst(skb);
/* IPv6 specs say nothing about it, but it is clear that we cannot
send redirects to source routed frames.
We don't send redirects to frames decapsulated from IPsec.
*/
if (skb->dev == dst->dev && opt->srcrt == 0 && !skb_sec_path(skb)) {
struct in6_addr *target = NULL;
struct inet_peer *peer;
struct rt6_info *rt;
/*
* incoming and outgoing devices are the same
* send a redirect.
*/
rt = (struct rt6_info *) dst;
if (rt->rt6i_flags & RTF_GATEWAY)
target = &rt->rt6i_gateway;
else
target = &hdr->daddr;
peer = inet_getpeer_v6(net->ipv6.peers, &rt->rt6i_dst.addr, 1);
/* Limit redirects both by destination (here)
and by source (inside ndisc_send_redirect)
*/
if (inet_peer_xrlim_allow(peer, 1*HZ))
ndisc_send_redirect(skb, target);
if (peer)
inet_putpeer(peer);
} else {
int addrtype = ipv6_addr_type(&hdr->saddr);
/* This check is security critical. */
if (addrtype == IPV6_ADDR_ANY ||
addrtype & (IPV6_ADDR_MULTICAST | IPV6_ADDR_LOOPBACK))
goto error;
if (addrtype & IPV6_ADDR_LINKLOCAL) {
icmpv6_send(skb, ICMPV6_DEST_UNREACH,
ICMPV6_NOT_NEIGHBOUR, 0);
goto error;
}
}
mtu = dst_mtu(dst);
if (mtu < IPV6_MIN_MTU)
mtu = IPV6_MIN_MTU;
if ((!skb->local_df && skb->len > mtu && !skb_is_gso(skb)) ||
(IP6CB(skb)->frag_max_size && IP6CB(skb)->frag_max_size > mtu)) {
/* Again, force OUTPUT device used as source address */
skb->dev = dst->dev;
icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
IP6_INC_STATS_BH(net,
ip6_dst_idev(dst), IPSTATS_MIB_INTOOBIGERRORS);
IP6_INC_STATS_BH(net,
ip6_dst_idev(dst), IPSTATS_MIB_FRAGFAILS);
kfree_skb(skb);
return -EMSGSIZE;
}
if (skb_cow(skb, dst->dev->hard_header_len)) {
IP6_INC_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTDISCARDS);
goto drop;
}
hdr = ipv6_hdr(skb);
/* Mangling hops number delayed to point after skb COW */
hdr->hop_limit--;
IP6_INC_STATS_BH(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTFORWDATAGRAMS);
IP6_ADD_STATS_BH(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTOCTETS, skb->len);
return NF_HOOK(NFPROTO_IPV6, NF_INET_FORWARD, skb, skb->dev, dst->dev,
ip6_forward_finish);
error:
IP6_INC_STATS_BH(net, ip6_dst_idev(dst), IPSTATS_MIB_INADDRERRORS);
drop:
kfree_skb(skb);
return -EINVAL;
} | 0 | [
"CWE-399"
]
| linux | 75a493e60ac4bbe2e977e7129d6d8cbb0dd236be | 186,644,210,773,340,370,000,000,000,000,000,000,000 | 158 | ipv6: ip6_append_data_mtu did not care about pmtudisc and frag_size
If the socket had an IPV6_MTU value set, ip6_append_data_mtu lost track
of this when appending the second frame on a corked socket. This results
in the following splat:
[37598.993962] ------------[ cut here ]------------
[37598.994008] kernel BUG at net/core/skbuff.c:2064!
[37598.994008] invalid opcode: 0000 [#1] SMP
[37598.994008] Modules linked in: tcp_lp uvcvideo videobuf2_vmalloc videobuf2_memops videobuf2_core videodev media vfat fat usb_storage fuse ebtable_nat xt_CHECKSUM bridge stp llc ipt_MASQUERADE nf_conntrack_netbios_ns nf_conntrack_broadcast ip6table_mangle ip6t_REJECT nf_conntrack_ipv6 nf_defrag_ipv6 iptable_nat
+nf_nat_ipv4 nf_nat iptable_mangle nf_conntrack_ipv4 nf_defrag_ipv4 xt_conntrack nf_conntrack ebtable_filter ebtables ip6table_filter ip6_tables be2iscsi iscsi_boot_sysfs bnx2i cnic uio cxgb4i cxgb4 cxgb3i cxgb3 mdio libcxgbi ib_iser rdma_cm ib_addr iw_cm ib_cm ib_sa ib_mad ib_core iscsi_tcp libiscsi_tcp libiscsi
+scsi_transport_iscsi rfcomm bnep iTCO_wdt iTCO_vendor_support snd_hda_codec_conexant arc4 iwldvm mac80211 snd_hda_intel acpi_cpufreq mperf coretemp snd_hda_codec microcode cdc_wdm cdc_acm
[37598.994008] snd_hwdep cdc_ether snd_seq snd_seq_device usbnet mii joydev btusb snd_pcm bluetooth i2c_i801 e1000e lpc_ich mfd_core ptp iwlwifi pps_core snd_page_alloc mei cfg80211 snd_timer thinkpad_acpi snd tpm_tis soundcore rfkill tpm tpm_bios vhost_net tun macvtap macvlan kvm_intel kvm uinput binfmt_misc
+dm_crypt i915 i2c_algo_bit drm_kms_helper drm i2c_core wmi video
[37598.994008] CPU 0
[37598.994008] Pid: 27320, comm: t2 Not tainted 3.9.6-200.fc18.x86_64 #1 LENOVO 27744PG/27744PG
[37598.994008] RIP: 0010:[<ffffffff815443a5>] [<ffffffff815443a5>] skb_copy_and_csum_bits+0x325/0x330
[37598.994008] RSP: 0018:ffff88003670da18 EFLAGS: 00010202
[37598.994008] RAX: ffff88018105c018 RBX: 0000000000000004 RCX: 00000000000006c0
[37598.994008] RDX: ffff88018105a6c0 RSI: ffff88018105a000 RDI: ffff8801e1b0aa00
[37598.994008] RBP: ffff88003670da78 R08: 0000000000000000 R09: ffff88018105c040
[37598.994008] R10: ffff8801e1b0aa00 R11: 0000000000000000 R12: 000000000000fff8
[37598.994008] R13: 00000000000004fc R14: 00000000ffff0504 R15: 0000000000000000
[37598.994008] FS: 00007f28eea59740(0000) GS:ffff88023bc00000(0000) knlGS:0000000000000000
[37598.994008] CS: 0010 DS: 0000 ES: 0000 CR0: 000000008005003b
[37598.994008] CR2: 0000003d935789e0 CR3: 00000000365cb000 CR4: 00000000000407f0
[37598.994008] DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000
[37598.994008] DR3: 0000000000000000 DR6: 00000000ffff0ff0 DR7: 0000000000000400
[37598.994008] Process t2 (pid: 27320, threadinfo ffff88003670c000, task ffff88022c162ee0)
[37598.994008] Stack:
[37598.994008] ffff88022e098a00 ffff88020f973fc0 0000000000000008 00000000000004c8
[37598.994008] ffff88020f973fc0 00000000000004c4 ffff88003670da78 ffff8801e1b0a200
[37598.994008] 0000000000000018 00000000000004c8 ffff88020f973fc0 00000000000004c4
[37598.994008] Call Trace:
[37598.994008] [<ffffffff815fc21f>] ip6_append_data+0xccf/0xfe0
[37598.994008] [<ffffffff8158d9f0>] ? ip_copy_metadata+0x1a0/0x1a0
[37598.994008] [<ffffffff81661f66>] ? _raw_spin_lock_bh+0x16/0x40
[37598.994008] [<ffffffff8161548d>] udpv6_sendmsg+0x1ed/0xc10
[37598.994008] [<ffffffff812a2845>] ? sock_has_perm+0x75/0x90
[37598.994008] [<ffffffff815c3693>] inet_sendmsg+0x63/0xb0
[37598.994008] [<ffffffff812a2973>] ? selinux_socket_sendmsg+0x23/0x30
[37598.994008] [<ffffffff8153a450>] sock_sendmsg+0xb0/0xe0
[37598.994008] [<ffffffff810135d1>] ? __switch_to+0x181/0x4a0
[37598.994008] [<ffffffff8153d97d>] sys_sendto+0x12d/0x180
[37598.994008] [<ffffffff810dfb64>] ? __audit_syscall_entry+0x94/0xf0
[37598.994008] [<ffffffff81020ed1>] ? syscall_trace_enter+0x231/0x240
[37598.994008] [<ffffffff8166a7e7>] tracesys+0xdd/0xe2
[37598.994008] Code: fe 07 00 00 48 c7 c7 04 28 a6 81 89 45 a0 4c 89 4d b8 44 89 5d a8 e8 1b ac b1 ff 44 8b 5d a8 4c 8b 4d b8 8b 45 a0 e9 cf fe ff ff <0f> 0b 66 0f 1f 84 00 00 00 00 00 66 66 66 66 90 55 48 89 e5 48
[37598.994008] RIP [<ffffffff815443a5>] skb_copy_and_csum_bits+0x325/0x330
[37598.994008] RSP <ffff88003670da18>
[37599.007323] ---[ end trace d69f6a17f8ac8eee ]---
While there, also check if path mtu discovery is activated for this
socket. The logic was adapted from ip6_append_data when first writing
on the corked socket.
This bug was introduced with commit
0c1833797a5a6ec23ea9261d979aa18078720b74 ("ipv6: fix incorrect ipsec
fragment").
v2:
a) Replace IPV6_PMTU_DISC_DO with IPV6_PMTUDISC_PROBE.
b) Don't pass ipv6_pinfo to ip6_append_data_mtu (suggestion by Gao
feng, thanks!).
c) Change mtu to unsigned int, else we get a warning about
non-matching types because of the min()-macro type-check.
Acked-by: Gao feng <[email protected]>
Cc: YOSHIFUJI Hideaki <[email protected]>
Signed-off-by: Hannes Frederic Sowa <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
PHPAPI void php_session_reset_id(TSRMLS_D) /* {{{ */
{
int module_number = PS(module_number);
if (!PS(id)) {
php_error_docref(NULL TSRMLS_CC, E_WARNING, "Cannot set session ID - session ID is not initialized");
return;
}
if (PS(use_cookies) && PS(send_cookie)) {
php_session_send_cookie(TSRMLS_C);
PS(send_cookie) = 0;
}
/* if the SID constant exists, destroy it. */
zend_hash_del(EG(zend_constants), "sid", sizeof("sid"));
if (PS(define_sid)) {
smart_str var = {0};
smart_str_appends(&var, PS(session_name));
smart_str_appendc(&var, '=');
smart_str_appends(&var, PS(id));
smart_str_0(&var);
REGISTER_STRINGL_CONSTANT("SID", var.c, var.len, 0);
} else {
REGISTER_STRINGL_CONSTANT("SID", STR_EMPTY_ALLOC(), 0, 0);
}
if (PS(apply_trans_sid)) {
php_url_scanner_reset_vars(TSRMLS_C);
php_url_scanner_add_var(PS(session_name), strlen(PS(session_name)), PS(id), strlen(PS(id)), 1 TSRMLS_CC);
}
} | 0 | [
"CWE-264"
]
| php-src | 25e8fcc88fa20dc9d4c47184471003f436927cde | 108,124,407,647,991,200,000,000,000,000,000,000,000 | 34 | Strict session |
BOOL update_write_opaque_rect_order(wStream* s, ORDER_INFO* orderInfo,
const OPAQUE_RECT_ORDER* opaque_rect)
{
BYTE byte;
int inf = update_approximate_opaque_rect_order(orderInfo, opaque_rect);
if (!Stream_EnsureRemainingCapacity(s, inf))
return FALSE;
// TODO: Color format conversion
orderInfo->fieldFlags = 0;
orderInfo->fieldFlags |= ORDER_FIELD_01;
update_write_coord(s, opaque_rect->nLeftRect);
orderInfo->fieldFlags |= ORDER_FIELD_02;
update_write_coord(s, opaque_rect->nTopRect);
orderInfo->fieldFlags |= ORDER_FIELD_03;
update_write_coord(s, opaque_rect->nWidth);
orderInfo->fieldFlags |= ORDER_FIELD_04;
update_write_coord(s, opaque_rect->nHeight);
orderInfo->fieldFlags |= ORDER_FIELD_05;
byte = opaque_rect->color & 0x000000FF;
Stream_Write_UINT8(s, byte);
orderInfo->fieldFlags |= ORDER_FIELD_06;
byte = (opaque_rect->color & 0x0000FF00) >> 8;
Stream_Write_UINT8(s, byte);
orderInfo->fieldFlags |= ORDER_FIELD_07;
byte = (opaque_rect->color & 0x00FF0000) >> 16;
Stream_Write_UINT8(s, byte);
return TRUE;
} | 0 | [
"CWE-415"
]
| FreeRDP | 67c2aa52b2ae0341d469071d1bc8aab91f8d2ed8 | 272,596,382,856,367,420,000,000,000,000,000,000,000 | 30 | Fixed #6013: Check new length is > 0 |
static int bt_seq_show(struct seq_file *seq, void *v)
{
struct bt_seq_state *s = seq->private;
struct bt_sock_list *l = s->l;
if (v == SEQ_START_TOKEN) {
seq_puts(seq ,"sk RefCnt Rmem Wmem User Inode Parent");
if (l->custom_seq_show) {
seq_putc(seq, ' ');
l->custom_seq_show(seq, v);
}
seq_putc(seq, '\n');
} else {
struct sock *sk = sk_entry(v);
struct bt_sock *bt = bt_sk(sk);
seq_printf(seq,
"%pK %-6d %-6u %-6u %-6u %-6lu %-6lu",
sk,
atomic_read(&sk->sk_refcnt),
sk_rmem_alloc_get(sk),
sk_wmem_alloc_get(sk),
from_kuid(seq_user_ns(seq), sock_i_uid(sk)),
sock_i_ino(sk),
bt->parent? sock_i_ino(bt->parent): 0LU);
if (l->custom_seq_show) {
seq_putc(seq, ' ');
l->custom_seq_show(seq, v);
}
seq_putc(seq, '\n');
}
return 0;
} | 0 | [
"CWE-20",
"CWE-269"
]
| linux | f3d3342602f8bcbf37d7c46641cb9bca7618eb1c | 124,600,892,396,178,430,000,000,000,000,000,000,000 | 37 | net: rework recvmsg handler msg_name and msg_namelen logic
This patch now always passes msg->msg_namelen as 0. recvmsg handlers must
set msg_namelen to the proper size <= sizeof(struct sockaddr_storage)
to return msg_name to the user.
This prevents numerous uninitialized memory leaks we had in the
recvmsg handlers and makes it harder for new code to accidentally leak
uninitialized memory.
Optimize for the case recvfrom is called with NULL as address. We don't
need to copy the address at all, so set it to NULL before invoking the
recvmsg handler. We can do so, because all the recvmsg handlers must
cope with the case a plain read() is called on them. read() also sets
msg_name to NULL.
Also document these changes in include/linux/net.h as suggested by David
Miller.
Changes since RFC:
Set msg->msg_name = NULL if user specified a NULL in msg_name but had a
non-null msg_namelen in verify_iovec/verify_compat_iovec. This doesn't
affect sendto as it would bail out earlier while trying to copy-in the
address. It also more naturally reflects the logic by the callers of
verify_iovec.
With this change in place I could remove "
if (!uaddr || msg_sys->msg_namelen == 0)
msg->msg_name = NULL
".
This change does not alter the user visible error logic as we ignore
msg_namelen as long as msg_name is NULL.
Also remove two unnecessary curly brackets in ___sys_recvmsg and change
comments to netdev style.
Cc: David Miller <[email protected]>
Suggested-by: Eric Dumazet <[email protected]>
Signed-off-by: Hannes Frederic Sowa <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
static void vmxnet3_get_tx_stats_from_file(QEMUFile *f,
struct UPT1_TxStats *tx_stat)
{
tx_stat->TSOPktsTxOK = qemu_get_be64(f);
tx_stat->TSOBytesTxOK = qemu_get_be64(f);
tx_stat->ucastPktsTxOK = qemu_get_be64(f);
tx_stat->ucastBytesTxOK = qemu_get_be64(f);
tx_stat->mcastPktsTxOK = qemu_get_be64(f);
tx_stat->mcastBytesTxOK = qemu_get_be64(f);
tx_stat->bcastPktsTxOK = qemu_get_be64(f);
tx_stat->bcastBytesTxOK = qemu_get_be64(f);
tx_stat->pktsTxError = qemu_get_be64(f);
tx_stat->pktsTxDiscard = qemu_get_be64(f);
} | 0 | [
"CWE-20"
]
| qemu | a7278b36fcab9af469563bd7b9dadebe2ae25e48 | 101,091,731,663,647,300,000,000,000,000,000,000,000 | 14 | net/vmxnet3: Refine l2 header validation
Validation of l2 header length assumed minimal packet size as
eth_header + 2 * vlan_header regardless of the actual protocol.
This caused crash for valid non-IP packets shorter than 22 bytes, as
'tx_pkt->packet_type' hasn't been assigned for such packets, and
'vmxnet3_on_tx_done_update_stats()' expects it to be properly set.
Refine header length validation in 'vmxnet_tx_pkt_parse_headers'.
Check its return value during packet processing flow.
As a side effect, in case IPv4 and IPv6 header validation failure,
corrupt packets will be dropped.
Signed-off-by: Dana Rubin <[email protected]>
Signed-off-by: Shmulik Ladkani <[email protected]>
Signed-off-by: Jason Wang <[email protected]> |
Subsets and Splits