instruction
stringclasses 1
value | input
stringlengths 222
112k
| output
stringlengths 21
113k
| __index_level_0__
int64 15
30k
|
---|---|---|---|
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: krb5_gss_process_context_token(minor_status, context_handle,
token_buffer)
OM_uint32 *minor_status;
gss_ctx_id_t context_handle;
gss_buffer_t token_buffer;
{
krb5_gss_ctx_id_rec *ctx;
OM_uint32 majerr;
ctx = (krb5_gss_ctx_id_t) context_handle;
if (! ctx->established) {
*minor_status = KG_CTX_INCOMPLETE;
return(GSS_S_NO_CONTEXT);
}
/* "unseal" the token */
if (GSS_ERROR(majerr = kg_unseal(minor_status, context_handle,
token_buffer,
GSS_C_NO_BUFFER, NULL, NULL,
KG_TOK_DEL_CTX)))
return(majerr);
/* that's it. delete the context */
return(krb5_gss_delete_sec_context(minor_status, &context_handle,
GSS_C_NO_BUFFER));
}
Commit Message: Fix gss_process_context_token() [CVE-2014-5352]
[MITKRB5-SA-2015-001] The krb5 gss_process_context_token() should not
actually delete the context; that leaves the caller with a dangling
pointer and no way to know that it is invalid. Instead, mark the
context as terminated, and check for terminated contexts in the GSS
functions which expect established contexts. Also add checks in
export_sec_context and pseudo_random, and adjust t_prf.c for the
pseudo_random check.
ticket: 8055 (new)
target_version: 1.13.1
tags: pullup
CWE ID: | krb5_gss_process_context_token(minor_status, context_handle,
token_buffer)
OM_uint32 *minor_status;
gss_ctx_id_t context_handle;
gss_buffer_t token_buffer;
{
krb5_gss_ctx_id_rec *ctx;
OM_uint32 majerr;
ctx = (krb5_gss_ctx_id_t) context_handle;
if (ctx->terminated || !ctx->established) {
*minor_status = KG_CTX_INCOMPLETE;
return(GSS_S_NO_CONTEXT);
}
/* We only support context deletion tokens for now, and RFC 4121 does not
* define a context deletion token. */
if (ctx->proto) {
*minor_status = 0;
return(GSS_S_DEFECTIVE_TOKEN);
}
/* "unseal" the token */
if (GSS_ERROR(majerr = kg_unseal(minor_status, context_handle,
token_buffer,
GSS_C_NO_BUFFER, NULL, NULL,
KG_TOK_DEL_CTX)))
return(majerr);
/* Mark the context as terminated, but do not delete it (as that would
* leave the caller with a dangling context handle). */
ctx->terminated = 1;
return(GSS_S_COMPLETE);
}
| 329 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: static void destroy_server_connect(SERVER_CONNECT_REC *conn)
{
IRC_SERVER_CONNECT_REC *ircconn;
ircconn = IRC_SERVER_CONNECT(conn);
if (ircconn == NULL)
return;
g_free_not_null(ircconn->usermode);
g_free_not_null(ircconn->alternate_nick);
}
Commit Message: Merge pull request #1058 from ailin-nemui/sasl-reconnect
copy sasl username and password values
CWE ID: CWE-416 | static void destroy_server_connect(SERVER_CONNECT_REC *conn)
{
IRC_SERVER_CONNECT_REC *ircconn;
ircconn = IRC_SERVER_CONNECT(conn);
if (ircconn == NULL)
return;
g_free_not_null(ircconn->usermode);
g_free_not_null(ircconn->alternate_nick);
g_free_not_null(ircconn->sasl_username);
g_free_not_null(ircconn->sasl_password);
}
| 19,999 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: ieee802_15_4_if_print(netdissect_options *ndo,
const struct pcap_pkthdr *h, const u_char *p)
{
u_int caplen = h->caplen;
u_int hdrlen;
uint16_t fc;
uint8_t seq;
uint16_t panid = 0;
if (caplen < 3) {
ND_PRINT((ndo, "[|802.15.4]"));
return caplen;
}
hdrlen = 3;
fc = EXTRACT_LE_16BITS(p);
seq = EXTRACT_LE_8BITS(p + 2);
p += 3;
caplen -= 3;
ND_PRINT((ndo,"IEEE 802.15.4 %s packet ", ftypes[FC_FRAME_TYPE(fc)]));
if (ndo->ndo_vflag)
ND_PRINT((ndo,"seq %02x ", seq));
/*
* Destination address and PAN ID, if present.
*/
switch (FC_DEST_ADDRESSING_MODE(fc)) {
case FC_ADDRESSING_MODE_NONE:
if (fc & FC_PAN_ID_COMPRESSION) {
/*
* PAN ID compression; this requires that both
* the source and destination addresses be present,
* but the destination address is missing.
*/
ND_PRINT((ndo, "[|802.15.4]"));
return hdrlen;
}
if (ndo->ndo_vflag)
ND_PRINT((ndo,"none "));
break;
case FC_ADDRESSING_MODE_RESERVED:
if (ndo->ndo_vflag)
ND_PRINT((ndo,"reserved destination addressing mode"));
return hdrlen;
case FC_ADDRESSING_MODE_SHORT:
if (caplen < 2) {
ND_PRINT((ndo, "[|802.15.4]"));
return hdrlen;
}
panid = EXTRACT_LE_16BITS(p);
p += 2;
caplen -= 2;
hdrlen += 2;
if (caplen < 2) {
ND_PRINT((ndo, "[|802.15.4]"));
return hdrlen;
}
if (ndo->ndo_vflag)
ND_PRINT((ndo,"%04x:%04x ", panid, EXTRACT_LE_16BITS(p + 2)));
p += 2;
caplen -= 2;
hdrlen += 2;
break;
case FC_ADDRESSING_MODE_LONG:
if (caplen < 2) {
ND_PRINT((ndo, "[|802.15.4]"));
return hdrlen;
}
panid = EXTRACT_LE_16BITS(p);
p += 2;
caplen -= 2;
hdrlen += 2;
if (caplen < 8) {
ND_PRINT((ndo, "[|802.15.4]"));
return hdrlen;
}
if (ndo->ndo_vflag)
ND_PRINT((ndo,"%04x:%s ", panid, le64addr_string(ndo, p + 2)));
p += 8;
caplen -= 8;
hdrlen += 8;
break;
}
if (ndo->ndo_vflag)
ND_PRINT((ndo,"< "));
/*
* Source address and PAN ID, if present.
*/
switch (FC_SRC_ADDRESSING_MODE(fc)) {
case FC_ADDRESSING_MODE_NONE:
if (ndo->ndo_vflag)
ND_PRINT((ndo,"none "));
break;
case FC_ADDRESSING_MODE_RESERVED:
if (ndo->ndo_vflag)
ND_PRINT((ndo,"reserved source addressing mode"));
return 0;
case FC_ADDRESSING_MODE_SHORT:
if (!(fc & FC_PAN_ID_COMPRESSION)) {
/*
* The source PAN ID is not compressed out, so
* fetch it. (Otherwise, we'll use the destination
* PAN ID, fetched above.)
*/
if (caplen < 2) {
ND_PRINT((ndo, "[|802.15.4]"));
return hdrlen;
}
panid = EXTRACT_LE_16BITS(p);
p += 2;
caplen -= 2;
hdrlen += 2;
}
if (caplen < 2) {
ND_PRINT((ndo, "[|802.15.4]"));
return hdrlen;
}
if (ndo->ndo_vflag)
ND_PRINT((ndo,"%04x:%04x ", panid, EXTRACT_LE_16BITS(p)));
p += 2;
caplen -= 2;
hdrlen += 2;
break;
case FC_ADDRESSING_MODE_LONG:
if (!(fc & FC_PAN_ID_COMPRESSION)) {
/*
* The source PAN ID is not compressed out, so
* fetch it. (Otherwise, we'll use the destination
* PAN ID, fetched above.)
*/
if (caplen < 2) {
ND_PRINT((ndo, "[|802.15.4]"));
return hdrlen;
}
panid = EXTRACT_LE_16BITS(p);
p += 2;
caplen -= 2;
hdrlen += 2;
}
if (caplen < 8) {
ND_PRINT((ndo, "[|802.15.4]"));
return hdrlen;
}
if (ndo->ndo_vflag)
ND_PRINT((ndo,"%04x:%s ", panid, le64addr_string(ndo, p)));
p += 8;
caplen -= 8;
hdrlen += 8;
break;
}
if (!ndo->ndo_suppress_default_print)
ND_DEFAULTPRINT(p, caplen);
return hdrlen;
}
Commit Message: CVE-2017-13000/IEEE 802.15.4: Fix bug introduced by previous fix.
We've already advanced the pointer past the PAN ID, if present; it now
points to the address, so don't add 2 to it.
This fixes a buffer over-read discovered by Forcepoint's security
researchers Otto Airamo & Antti Levomäki.
Add a test using the capture file supplied by the reporter(s).
CWE ID: CWE-125 | ieee802_15_4_if_print(netdissect_options *ndo,
const struct pcap_pkthdr *h, const u_char *p)
{
u_int caplen = h->caplen;
u_int hdrlen;
uint16_t fc;
uint8_t seq;
uint16_t panid = 0;
if (caplen < 3) {
ND_PRINT((ndo, "[|802.15.4]"));
return caplen;
}
hdrlen = 3;
fc = EXTRACT_LE_16BITS(p);
seq = EXTRACT_LE_8BITS(p + 2);
p += 3;
caplen -= 3;
ND_PRINT((ndo,"IEEE 802.15.4 %s packet ", ftypes[FC_FRAME_TYPE(fc)]));
if (ndo->ndo_vflag)
ND_PRINT((ndo,"seq %02x ", seq));
/*
* Destination address and PAN ID, if present.
*/
switch (FC_DEST_ADDRESSING_MODE(fc)) {
case FC_ADDRESSING_MODE_NONE:
if (fc & FC_PAN_ID_COMPRESSION) {
/*
* PAN ID compression; this requires that both
* the source and destination addresses be present,
* but the destination address is missing.
*/
ND_PRINT((ndo, "[|802.15.4]"));
return hdrlen;
}
if (ndo->ndo_vflag)
ND_PRINT((ndo,"none "));
break;
case FC_ADDRESSING_MODE_RESERVED:
if (ndo->ndo_vflag)
ND_PRINT((ndo,"reserved destination addressing mode"));
return hdrlen;
case FC_ADDRESSING_MODE_SHORT:
if (caplen < 2) {
ND_PRINT((ndo, "[|802.15.4]"));
return hdrlen;
}
panid = EXTRACT_LE_16BITS(p);
p += 2;
caplen -= 2;
hdrlen += 2;
if (caplen < 2) {
ND_PRINT((ndo, "[|802.15.4]"));
return hdrlen;
}
if (ndo->ndo_vflag)
ND_PRINT((ndo,"%04x:%04x ", panid, EXTRACT_LE_16BITS(p + 2)));
p += 2;
caplen -= 2;
hdrlen += 2;
break;
case FC_ADDRESSING_MODE_LONG:
if (caplen < 2) {
ND_PRINT((ndo, "[|802.15.4]"));
return hdrlen;
}
panid = EXTRACT_LE_16BITS(p);
p += 2;
caplen -= 2;
hdrlen += 2;
if (caplen < 8) {
ND_PRINT((ndo, "[|802.15.4]"));
return hdrlen;
}
if (ndo->ndo_vflag)
ND_PRINT((ndo,"%04x:%s ", panid, le64addr_string(ndo, p)));
p += 8;
caplen -= 8;
hdrlen += 8;
break;
}
if (ndo->ndo_vflag)
ND_PRINT((ndo,"< "));
/*
* Source address and PAN ID, if present.
*/
switch (FC_SRC_ADDRESSING_MODE(fc)) {
case FC_ADDRESSING_MODE_NONE:
if (ndo->ndo_vflag)
ND_PRINT((ndo,"none "));
break;
case FC_ADDRESSING_MODE_RESERVED:
if (ndo->ndo_vflag)
ND_PRINT((ndo,"reserved source addressing mode"));
return 0;
case FC_ADDRESSING_MODE_SHORT:
if (!(fc & FC_PAN_ID_COMPRESSION)) {
/*
* The source PAN ID is not compressed out, so
* fetch it. (Otherwise, we'll use the destination
* PAN ID, fetched above.)
*/
if (caplen < 2) {
ND_PRINT((ndo, "[|802.15.4]"));
return hdrlen;
}
panid = EXTRACT_LE_16BITS(p);
p += 2;
caplen -= 2;
hdrlen += 2;
}
if (caplen < 2) {
ND_PRINT((ndo, "[|802.15.4]"));
return hdrlen;
}
if (ndo->ndo_vflag)
ND_PRINT((ndo,"%04x:%04x ", panid, EXTRACT_LE_16BITS(p)));
p += 2;
caplen -= 2;
hdrlen += 2;
break;
case FC_ADDRESSING_MODE_LONG:
if (!(fc & FC_PAN_ID_COMPRESSION)) {
/*
* The source PAN ID is not compressed out, so
* fetch it. (Otherwise, we'll use the destination
* PAN ID, fetched above.)
*/
if (caplen < 2) {
ND_PRINT((ndo, "[|802.15.4]"));
return hdrlen;
}
panid = EXTRACT_LE_16BITS(p);
p += 2;
caplen -= 2;
hdrlen += 2;
}
if (caplen < 8) {
ND_PRINT((ndo, "[|802.15.4]"));
return hdrlen;
}
if (ndo->ndo_vflag)
ND_PRINT((ndo,"%04x:%s ", panid, le64addr_string(ndo, p)));
p += 8;
caplen -= 8;
hdrlen += 8;
break;
}
if (!ndo->ndo_suppress_default_print)
ND_DEFAULTPRINT(p, caplen);
return hdrlen;
}
| 26,073 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: int ext4_orphan_del(handle_t *handle, struct inode *inode)
{
struct list_head *prev;
struct ext4_inode_info *ei = EXT4_I(inode);
struct ext4_sb_info *sbi;
__u32 ino_next;
struct ext4_iloc iloc;
int err = 0;
if (!EXT4_SB(inode->i_sb)->s_journal)
return 0;
mutex_lock(&EXT4_SB(inode->i_sb)->s_orphan_lock);
if (list_empty(&ei->i_orphan))
goto out;
ino_next = NEXT_ORPHAN(inode);
prev = ei->i_orphan.prev;
sbi = EXT4_SB(inode->i_sb);
jbd_debug(4, "remove inode %lu from orphan list\n", inode->i_ino);
list_del_init(&ei->i_orphan);
/* If we're on an error path, we may not have a valid
* transaction handle with which to update the orphan list on
* disk, but we still need to remove the inode from the linked
* list in memory. */
if (!handle)
goto out;
err = ext4_reserve_inode_write(handle, inode, &iloc);
if (err)
goto out_err;
if (prev == &sbi->s_orphan) {
jbd_debug(4, "superblock will point to %u\n", ino_next);
BUFFER_TRACE(sbi->s_sbh, "get_write_access");
err = ext4_journal_get_write_access(handle, sbi->s_sbh);
if (err)
goto out_brelse;
sbi->s_es->s_last_orphan = cpu_to_le32(ino_next);
err = ext4_handle_dirty_super(handle, inode->i_sb);
} else {
struct ext4_iloc iloc2;
struct inode *i_prev =
&list_entry(prev, struct ext4_inode_info, i_orphan)->vfs_inode;
jbd_debug(4, "orphan inode %lu will point to %u\n",
i_prev->i_ino, ino_next);
err = ext4_reserve_inode_write(handle, i_prev, &iloc2);
if (err)
goto out_brelse;
NEXT_ORPHAN(i_prev) = ino_next;
err = ext4_mark_iloc_dirty(handle, i_prev, &iloc2);
}
if (err)
goto out_brelse;
NEXT_ORPHAN(inode) = 0;
err = ext4_mark_iloc_dirty(handle, inode, &iloc);
out_err:
ext4_std_error(inode->i_sb, err);
out:
mutex_unlock(&EXT4_SB(inode->i_sb)->s_orphan_lock);
return err;
out_brelse:
brelse(iloc.bh);
goto out_err;
}
Commit Message: ext4: avoid hang when mounting non-journal filesystems with orphan list
When trying to mount a file system which does not contain a journal,
but which does have a orphan list containing an inode which needs to
be truncated, the mount call with hang forever in
ext4_orphan_cleanup() because ext4_orphan_del() will return
immediately without removing the inode from the orphan list, leading
to an uninterruptible loop in kernel code which will busy out one of
the CPU's on the system.
This can be trivially reproduced by trying to mount the file system
found in tests/f_orphan_extents_inode/image.gz from the e2fsprogs
source tree. If a malicious user were to put this on a USB stick, and
mount it on a Linux desktop which has automatic mounts enabled, this
could be considered a potential denial of service attack. (Not a big
deal in practice, but professional paranoids worry about such things,
and have even been known to allocate CVE numbers for such problems.)
Signed-off-by: "Theodore Ts'o" <[email protected]>
Reviewed-by: Zheng Liu <[email protected]>
Cc: [email protected]
CWE ID: CWE-399 | int ext4_orphan_del(handle_t *handle, struct inode *inode)
{
struct list_head *prev;
struct ext4_inode_info *ei = EXT4_I(inode);
struct ext4_sb_info *sbi;
__u32 ino_next;
struct ext4_iloc iloc;
int err = 0;
if ((!EXT4_SB(inode->i_sb)->s_journal) &&
!(EXT4_SB(inode->i_sb)->s_mount_state & EXT4_ORPHAN_FS))
return 0;
mutex_lock(&EXT4_SB(inode->i_sb)->s_orphan_lock);
if (list_empty(&ei->i_orphan))
goto out;
ino_next = NEXT_ORPHAN(inode);
prev = ei->i_orphan.prev;
sbi = EXT4_SB(inode->i_sb);
jbd_debug(4, "remove inode %lu from orphan list\n", inode->i_ino);
list_del_init(&ei->i_orphan);
/* If we're on an error path, we may not have a valid
* transaction handle with which to update the orphan list on
* disk, but we still need to remove the inode from the linked
* list in memory. */
if (!handle)
goto out;
err = ext4_reserve_inode_write(handle, inode, &iloc);
if (err)
goto out_err;
if (prev == &sbi->s_orphan) {
jbd_debug(4, "superblock will point to %u\n", ino_next);
BUFFER_TRACE(sbi->s_sbh, "get_write_access");
err = ext4_journal_get_write_access(handle, sbi->s_sbh);
if (err)
goto out_brelse;
sbi->s_es->s_last_orphan = cpu_to_le32(ino_next);
err = ext4_handle_dirty_super(handle, inode->i_sb);
} else {
struct ext4_iloc iloc2;
struct inode *i_prev =
&list_entry(prev, struct ext4_inode_info, i_orphan)->vfs_inode;
jbd_debug(4, "orphan inode %lu will point to %u\n",
i_prev->i_ino, ino_next);
err = ext4_reserve_inode_write(handle, i_prev, &iloc2);
if (err)
goto out_brelse;
NEXT_ORPHAN(i_prev) = ino_next;
err = ext4_mark_iloc_dirty(handle, i_prev, &iloc2);
}
if (err)
goto out_brelse;
NEXT_ORPHAN(inode) = 0;
err = ext4_mark_iloc_dirty(handle, inode, &iloc);
out_err:
ext4_std_error(inode->i_sb, err);
out:
mutex_unlock(&EXT4_SB(inode->i_sb)->s_orphan_lock);
return err;
out_brelse:
brelse(iloc.bh);
goto out_err;
}
| 20,528 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: Track::Track(
Segment* pSegment,
long long element_start,
long long element_size) :
m_pSegment(pSegment),
m_element_start(element_start),
m_element_size(element_size),
content_encoding_entries_(NULL),
content_encoding_entries_end_(NULL)
{
}
Commit Message: libwebm: Pull from upstream
Rolling mkvparser from upstream. Primarily for fixing a bug on parsing
failures with certain Opus WebM files.
Upstream commit hash of this pull: 574045edd4ecbeb802ee3f1d214b5510269852ae
The diff is so huge because there were some style clean ups upstream.
But it was ensured that there were no breaking changes when the style
clean ups was done upstream.
Change-Id: Ib6e907175484b4b0ae1b55ab39522ea3188ad039
CWE ID: CWE-119 | Track::Track(
Track::Track(Segment* pSegment, long long element_start, long long element_size)
: m_pSegment(pSegment),
m_element_start(element_start),
m_element_size(element_size),
content_encoding_entries_(NULL),
content_encoding_entries_end_(NULL) {}
Track::~Track() {
Info& info = const_cast<Info&>(m_info);
info.Clear();
ContentEncoding** i = content_encoding_entries_;
ContentEncoding** const j = content_encoding_entries_end_;
while (i != j) {
ContentEncoding* const encoding = *i++;
delete encoding;
}
delete[] content_encoding_entries_;
}
| 9,954 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: static int airspy_probe(struct usb_interface *intf,
const struct usb_device_id *id)
{
struct airspy *s;
int ret;
u8 u8tmp, buf[BUF_SIZE];
s = kzalloc(sizeof(struct airspy), GFP_KERNEL);
if (s == NULL) {
dev_err(&intf->dev, "Could not allocate memory for state\n");
return -ENOMEM;
}
mutex_init(&s->v4l2_lock);
mutex_init(&s->vb_queue_lock);
spin_lock_init(&s->queued_bufs_lock);
INIT_LIST_HEAD(&s->queued_bufs);
s->dev = &intf->dev;
s->udev = interface_to_usbdev(intf);
s->f_adc = bands[0].rangelow;
s->f_rf = bands_rf[0].rangelow;
s->pixelformat = formats[0].pixelformat;
s->buffersize = formats[0].buffersize;
/* Detect device */
ret = airspy_ctrl_msg(s, CMD_BOARD_ID_READ, 0, 0, &u8tmp, 1);
if (ret == 0)
ret = airspy_ctrl_msg(s, CMD_VERSION_STRING_READ, 0, 0,
buf, BUF_SIZE);
if (ret) {
dev_err(s->dev, "Could not detect board\n");
goto err_free_mem;
}
buf[BUF_SIZE - 1] = '\0';
dev_info(s->dev, "Board ID: %02x\n", u8tmp);
dev_info(s->dev, "Firmware version: %s\n", buf);
/* Init videobuf2 queue structure */
s->vb_queue.type = V4L2_BUF_TYPE_SDR_CAPTURE;
s->vb_queue.io_modes = VB2_MMAP | VB2_USERPTR | VB2_READ;
s->vb_queue.drv_priv = s;
s->vb_queue.buf_struct_size = sizeof(struct airspy_frame_buf);
s->vb_queue.ops = &airspy_vb2_ops;
s->vb_queue.mem_ops = &vb2_vmalloc_memops;
s->vb_queue.timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
ret = vb2_queue_init(&s->vb_queue);
if (ret) {
dev_err(s->dev, "Could not initialize vb2 queue\n");
goto err_free_mem;
}
/* Init video_device structure */
s->vdev = airspy_template;
s->vdev.queue = &s->vb_queue;
s->vdev.queue->lock = &s->vb_queue_lock;
video_set_drvdata(&s->vdev, s);
/* Register the v4l2_device structure */
s->v4l2_dev.release = airspy_video_release;
ret = v4l2_device_register(&intf->dev, &s->v4l2_dev);
if (ret) {
dev_err(s->dev, "Failed to register v4l2-device (%d)\n", ret);
goto err_free_mem;
}
/* Register controls */
v4l2_ctrl_handler_init(&s->hdl, 5);
s->lna_gain_auto = v4l2_ctrl_new_std(&s->hdl, &airspy_ctrl_ops,
V4L2_CID_RF_TUNER_LNA_GAIN_AUTO, 0, 1, 1, 0);
s->lna_gain = v4l2_ctrl_new_std(&s->hdl, &airspy_ctrl_ops,
V4L2_CID_RF_TUNER_LNA_GAIN, 0, 14, 1, 8);
v4l2_ctrl_auto_cluster(2, &s->lna_gain_auto, 0, false);
s->mixer_gain_auto = v4l2_ctrl_new_std(&s->hdl, &airspy_ctrl_ops,
V4L2_CID_RF_TUNER_MIXER_GAIN_AUTO, 0, 1, 1, 0);
s->mixer_gain = v4l2_ctrl_new_std(&s->hdl, &airspy_ctrl_ops,
V4L2_CID_RF_TUNER_MIXER_GAIN, 0, 15, 1, 8);
v4l2_ctrl_auto_cluster(2, &s->mixer_gain_auto, 0, false);
s->if_gain = v4l2_ctrl_new_std(&s->hdl, &airspy_ctrl_ops,
V4L2_CID_RF_TUNER_IF_GAIN, 0, 15, 1, 0);
if (s->hdl.error) {
ret = s->hdl.error;
dev_err(s->dev, "Could not initialize controls\n");
goto err_free_controls;
}
v4l2_ctrl_handler_setup(&s->hdl);
s->v4l2_dev.ctrl_handler = &s->hdl;
s->vdev.v4l2_dev = &s->v4l2_dev;
s->vdev.lock = &s->v4l2_lock;
ret = video_register_device(&s->vdev, VFL_TYPE_SDR, -1);
if (ret) {
dev_err(s->dev, "Failed to register as video device (%d)\n",
ret);
goto err_unregister_v4l2_dev;
}
dev_info(s->dev, "Registered as %s\n",
video_device_node_name(&s->vdev));
dev_notice(s->dev, "SDR API is still slightly experimental and functionality changes may follow\n");
return 0;
err_free_controls:
v4l2_ctrl_handler_free(&s->hdl);
err_unregister_v4l2_dev:
v4l2_device_unregister(&s->v4l2_dev);
err_free_mem:
kfree(s);
return ret;
}
Commit Message: media: fix airspy usb probe error path
Fix a memory leak on probe error of the airspy usb device driver.
The problem is triggered when more than 64 usb devices register with
v4l2 of type VFL_TYPE_SDR or VFL_TYPE_SUBDEV.
The memory leak is caused by the probe function of the airspy driver
mishandeling errors and not freeing the corresponding control structures
when an error occours registering the device to v4l2 core.
A badusb device can emulate 64 of these devices, and then through
continual emulated connect/disconnect of the 65th device, cause the
kernel to run out of RAM and crash the kernel, thus causing a local DOS
vulnerability.
Fixes CVE-2016-5400
Signed-off-by: James Patrick-Evans <[email protected]>
Reviewed-by: Kees Cook <[email protected]>
Cc: [email protected] # 3.17+
Signed-off-by: Linus Torvalds <[email protected]>
CWE ID: CWE-119 | static int airspy_probe(struct usb_interface *intf,
const struct usb_device_id *id)
{
struct airspy *s;
int ret;
u8 u8tmp, buf[BUF_SIZE];
s = kzalloc(sizeof(struct airspy), GFP_KERNEL);
if (s == NULL) {
dev_err(&intf->dev, "Could not allocate memory for state\n");
return -ENOMEM;
}
mutex_init(&s->v4l2_lock);
mutex_init(&s->vb_queue_lock);
spin_lock_init(&s->queued_bufs_lock);
INIT_LIST_HEAD(&s->queued_bufs);
s->dev = &intf->dev;
s->udev = interface_to_usbdev(intf);
s->f_adc = bands[0].rangelow;
s->f_rf = bands_rf[0].rangelow;
s->pixelformat = formats[0].pixelformat;
s->buffersize = formats[0].buffersize;
/* Detect device */
ret = airspy_ctrl_msg(s, CMD_BOARD_ID_READ, 0, 0, &u8tmp, 1);
if (ret == 0)
ret = airspy_ctrl_msg(s, CMD_VERSION_STRING_READ, 0, 0,
buf, BUF_SIZE);
if (ret) {
dev_err(s->dev, "Could not detect board\n");
goto err_free_mem;
}
buf[BUF_SIZE - 1] = '\0';
dev_info(s->dev, "Board ID: %02x\n", u8tmp);
dev_info(s->dev, "Firmware version: %s\n", buf);
/* Init videobuf2 queue structure */
s->vb_queue.type = V4L2_BUF_TYPE_SDR_CAPTURE;
s->vb_queue.io_modes = VB2_MMAP | VB2_USERPTR | VB2_READ;
s->vb_queue.drv_priv = s;
s->vb_queue.buf_struct_size = sizeof(struct airspy_frame_buf);
s->vb_queue.ops = &airspy_vb2_ops;
s->vb_queue.mem_ops = &vb2_vmalloc_memops;
s->vb_queue.timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
ret = vb2_queue_init(&s->vb_queue);
if (ret) {
dev_err(s->dev, "Could not initialize vb2 queue\n");
goto err_free_mem;
}
/* Init video_device structure */
s->vdev = airspy_template;
s->vdev.queue = &s->vb_queue;
s->vdev.queue->lock = &s->vb_queue_lock;
video_set_drvdata(&s->vdev, s);
/* Register the v4l2_device structure */
s->v4l2_dev.release = airspy_video_release;
ret = v4l2_device_register(&intf->dev, &s->v4l2_dev);
if (ret) {
dev_err(s->dev, "Failed to register v4l2-device (%d)\n", ret);
goto err_free_mem;
}
/* Register controls */
v4l2_ctrl_handler_init(&s->hdl, 5);
s->lna_gain_auto = v4l2_ctrl_new_std(&s->hdl, &airspy_ctrl_ops,
V4L2_CID_RF_TUNER_LNA_GAIN_AUTO, 0, 1, 1, 0);
s->lna_gain = v4l2_ctrl_new_std(&s->hdl, &airspy_ctrl_ops,
V4L2_CID_RF_TUNER_LNA_GAIN, 0, 14, 1, 8);
v4l2_ctrl_auto_cluster(2, &s->lna_gain_auto, 0, false);
s->mixer_gain_auto = v4l2_ctrl_new_std(&s->hdl, &airspy_ctrl_ops,
V4L2_CID_RF_TUNER_MIXER_GAIN_AUTO, 0, 1, 1, 0);
s->mixer_gain = v4l2_ctrl_new_std(&s->hdl, &airspy_ctrl_ops,
V4L2_CID_RF_TUNER_MIXER_GAIN, 0, 15, 1, 8);
v4l2_ctrl_auto_cluster(2, &s->mixer_gain_auto, 0, false);
s->if_gain = v4l2_ctrl_new_std(&s->hdl, &airspy_ctrl_ops,
V4L2_CID_RF_TUNER_IF_GAIN, 0, 15, 1, 0);
if (s->hdl.error) {
ret = s->hdl.error;
dev_err(s->dev, "Could not initialize controls\n");
goto err_free_controls;
}
v4l2_ctrl_handler_setup(&s->hdl);
s->v4l2_dev.ctrl_handler = &s->hdl;
s->vdev.v4l2_dev = &s->v4l2_dev;
s->vdev.lock = &s->v4l2_lock;
ret = video_register_device(&s->vdev, VFL_TYPE_SDR, -1);
if (ret) {
dev_err(s->dev, "Failed to register as video device (%d)\n",
ret);
goto err_free_controls;
}
dev_info(s->dev, "Registered as %s\n",
video_device_node_name(&s->vdev));
dev_notice(s->dev, "SDR API is still slightly experimental and functionality changes may follow\n");
return 0;
err_free_controls:
v4l2_ctrl_handler_free(&s->hdl);
v4l2_device_unregister(&s->v4l2_dev);
err_free_mem:
kfree(s);
return ret;
}
| 28,967 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: void HostCache::Set(const Key& key,
const Entry& entry,
base::TimeTicks now,
base::TimeDelta ttl) {
TRACE_EVENT0(kNetTracingCategory, "HostCache::Set");
DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
if (caching_is_disabled())
return;
auto it = entries_.find(key);
if (it != entries_.end()) {
bool is_stale = it->second.IsStale(now, network_changes_);
RecordSet(is_stale ? SET_UPDATE_STALE : SET_UPDATE_VALID, now, &it->second,
entry);
entries_.erase(it);
} else {
if (size() == max_entries_)
EvictOneEntry(now);
RecordSet(SET_INSERT, now, nullptr, entry);
}
AddEntry(Key(key), Entry(entry, now, ttl, network_changes_));
}
Commit Message: Add PersistenceDelegate to HostCache
PersistenceDelegate is a new interface for persisting the contents of
the HostCache. This commit includes the interface itself, the logic in
HostCache for interacting with it, and a mock implementation of the
interface for testing. It does not include support for immediate data
removal since that won't be needed for the currently planned use case.
BUG=605149
Review-Url: https://codereview.chromium.org/2943143002
Cr-Commit-Position: refs/heads/master@{#481015}
CWE ID: | void HostCache::Set(const Key& key,
const Entry& entry,
base::TimeTicks now,
base::TimeDelta ttl) {
TRACE_EVENT0(kNetTracingCategory, "HostCache::Set");
DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
if (caching_is_disabled())
return;
bool result_changed = false;
auto it = entries_.find(key);
if (it != entries_.end()) {
bool is_stale = it->second.IsStale(now, network_changes_);
AddressListDeltaType delta =
FindAddressListDeltaType(it->second.addresses(), entry.addresses());
RecordSet(is_stale ? SET_UPDATE_STALE : SET_UPDATE_VALID, now, &it->second,
entry, delta);
result_changed =
entry.error() == OK &&
(it->second.error() != entry.error() || delta != DELTA_IDENTICAL);
entries_.erase(it);
} else {
result_changed = true;
if (size() == max_entries_)
EvictOneEntry(now);
RecordSet(SET_INSERT, now, nullptr, entry, DELTA_DISJOINT);
}
AddEntry(Key(key), Entry(entry, now, ttl, network_changes_));
if (delegate_ && result_changed)
delegate_->ScheduleWrite();
}
| 27,469 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
{
struct fib* srbfib;
int status;
struct aac_srb *srbcmd = NULL;
struct user_aac_srb *user_srbcmd = NULL;
struct user_aac_srb __user *user_srb = arg;
struct aac_srb_reply __user *user_reply;
struct aac_srb_reply* reply;
u32 fibsize = 0;
u32 flags = 0;
s32 rcode = 0;
u32 data_dir;
void __user *sg_user[32];
void *sg_list[32];
u32 sg_indx = 0;
u32 byte_count = 0;
u32 actual_fibsize64, actual_fibsize = 0;
int i;
if (dev->in_reset) {
dprintk((KERN_DEBUG"aacraid: send raw srb -EBUSY\n"));
return -EBUSY;
}
if (!capable(CAP_SYS_ADMIN)){
dprintk((KERN_DEBUG"aacraid: No permission to send raw srb\n"));
return -EPERM;
}
/*
* Allocate and initialize a Fib then setup a SRB command
*/
if (!(srbfib = aac_fib_alloc(dev))) {
return -ENOMEM;
}
aac_fib_init(srbfib);
/* raw_srb FIB is not FastResponseCapable */
srbfib->hw_fib_va->header.XferState &= ~cpu_to_le32(FastResponseCapable);
srbcmd = (struct aac_srb*) fib_data(srbfib);
memset(sg_list, 0, sizeof(sg_list)); /* cleanup may take issue */
if(copy_from_user(&fibsize, &user_srb->count,sizeof(u32))){
dprintk((KERN_DEBUG"aacraid: Could not copy data size from user\n"));
rcode = -EFAULT;
goto cleanup;
}
if (fibsize > (dev->max_fib_size - sizeof(struct aac_fibhdr))) {
rcode = -EINVAL;
goto cleanup;
}
user_srbcmd = kmalloc(fibsize, GFP_KERNEL);
if (!user_srbcmd) {
dprintk((KERN_DEBUG"aacraid: Could not make a copy of the srb\n"));
rcode = -ENOMEM;
goto cleanup;
}
if(copy_from_user(user_srbcmd, user_srb,fibsize)){
dprintk((KERN_DEBUG"aacraid: Could not copy srb from user\n"));
rcode = -EFAULT;
goto cleanup;
}
user_reply = arg+fibsize;
flags = user_srbcmd->flags; /* from user in cpu order */
srbcmd->function = cpu_to_le32(SRBF_ExecuteScsi); // Force this
srbcmd->channel = cpu_to_le32(user_srbcmd->channel);
srbcmd->id = cpu_to_le32(user_srbcmd->id);
srbcmd->lun = cpu_to_le32(user_srbcmd->lun);
srbcmd->timeout = cpu_to_le32(user_srbcmd->timeout);
srbcmd->flags = cpu_to_le32(flags);
srbcmd->retry_limit = 0; // Obsolete parameter
srbcmd->cdb_size = cpu_to_le32(user_srbcmd->cdb_size);
memcpy(srbcmd->cdb, user_srbcmd->cdb, sizeof(srbcmd->cdb));
switch (flags & (SRB_DataIn | SRB_DataOut)) {
case SRB_DataOut:
data_dir = DMA_TO_DEVICE;
break;
case (SRB_DataIn | SRB_DataOut):
data_dir = DMA_BIDIRECTIONAL;
break;
case SRB_DataIn:
data_dir = DMA_FROM_DEVICE;
break;
default:
data_dir = DMA_NONE;
}
if (user_srbcmd->sg.count > ARRAY_SIZE(sg_list)) {
dprintk((KERN_DEBUG"aacraid: too many sg entries %d\n",
le32_to_cpu(srbcmd->sg.count)));
rcode = -EINVAL;
goto cleanup;
}
actual_fibsize = sizeof(struct aac_srb) - sizeof(struct sgentry) +
((user_srbcmd->sg.count & 0xff) * sizeof(struct sgentry));
actual_fibsize64 = actual_fibsize + (user_srbcmd->sg.count & 0xff) *
(sizeof(struct sgentry64) - sizeof(struct sgentry));
/* User made a mistake - should not continue */
if ((actual_fibsize != fibsize) && (actual_fibsize64 != fibsize)) {
dprintk((KERN_DEBUG"aacraid: Bad Size specified in "
"Raw SRB command calculated fibsize=%lu;%lu "
"user_srbcmd->sg.count=%d aac_srb=%lu sgentry=%lu;%lu "
"issued fibsize=%d\n",
actual_fibsize, actual_fibsize64, user_srbcmd->sg.count,
sizeof(struct aac_srb), sizeof(struct sgentry),
sizeof(struct sgentry64), fibsize));
rcode = -EINVAL;
goto cleanup;
}
if ((data_dir == DMA_NONE) && user_srbcmd->sg.count) {
dprintk((KERN_DEBUG"aacraid: SG with no direction specified in Raw SRB command\n"));
rcode = -EINVAL;
goto cleanup;
}
byte_count = 0;
if (dev->adapter_info.options & AAC_OPT_SGMAP_HOST64) {
struct user_sgmap64* upsg = (struct user_sgmap64*)&user_srbcmd->sg;
struct sgmap64* psg = (struct sgmap64*)&srbcmd->sg;
/*
* This should also catch if user used the 32 bit sgmap
*/
if (actual_fibsize64 == fibsize) {
actual_fibsize = actual_fibsize64;
for (i = 0; i < upsg->count; i++) {
u64 addr;
void* p;
if (upsg->sg[i].count >
((dev->adapter_info.options &
AAC_OPT_NEW_COMM) ?
(dev->scsi_host_ptr->max_sectors << 9) :
65536)) {
rcode = -EINVAL;
goto cleanup;
}
/* Does this really need to be GFP_DMA? */
p = kmalloc(upsg->sg[i].count,GFP_KERNEL|__GFP_DMA);
if(!p) {
dprintk((KERN_DEBUG"aacraid: Could not allocate SG buffer - size = %d buffer number %d of %d\n",
upsg->sg[i].count,i,upsg->count));
rcode = -ENOMEM;
goto cleanup;
}
addr = (u64)upsg->sg[i].addr[0];
addr += ((u64)upsg->sg[i].addr[1]) << 32;
sg_user[i] = (void __user *)(uintptr_t)addr;
sg_list[i] = p; // save so we can clean up later
sg_indx = i;
if (flags & SRB_DataOut) {
if(copy_from_user(p,sg_user[i],upsg->sg[i].count)){
dprintk((KERN_DEBUG"aacraid: Could not copy sg data from user\n"));
rcode = -EFAULT;
goto cleanup;
}
}
addr = pci_map_single(dev->pdev, p, upsg->sg[i].count, data_dir);
psg->sg[i].addr[0] = cpu_to_le32(addr & 0xffffffff);
psg->sg[i].addr[1] = cpu_to_le32(addr>>32);
byte_count += upsg->sg[i].count;
psg->sg[i].count = cpu_to_le32(upsg->sg[i].count);
}
} else {
struct user_sgmap* usg;
usg = kmalloc(actual_fibsize - sizeof(struct aac_srb)
+ sizeof(struct sgmap), GFP_KERNEL);
if (!usg) {
dprintk((KERN_DEBUG"aacraid: Allocation error in Raw SRB command\n"));
rcode = -ENOMEM;
goto cleanup;
}
memcpy (usg, upsg, actual_fibsize - sizeof(struct aac_srb)
+ sizeof(struct sgmap));
actual_fibsize = actual_fibsize64;
for (i = 0; i < usg->count; i++) {
u64 addr;
void* p;
if (usg->sg[i].count >
((dev->adapter_info.options &
AAC_OPT_NEW_COMM) ?
(dev->scsi_host_ptr->max_sectors << 9) :
65536)) {
kfree(usg);
rcode = -EINVAL;
goto cleanup;
}
/* Does this really need to be GFP_DMA? */
p = kmalloc(usg->sg[i].count,GFP_KERNEL|__GFP_DMA);
if(!p) {
dprintk((KERN_DEBUG "aacraid: Could not allocate SG buffer - size = %d buffer number %d of %d\n",
usg->sg[i].count,i,usg->count));
kfree(usg);
rcode = -ENOMEM;
goto cleanup;
}
sg_user[i] = (void __user *)(uintptr_t)usg->sg[i].addr;
sg_list[i] = p; // save so we can clean up later
sg_indx = i;
if (flags & SRB_DataOut) {
if(copy_from_user(p,sg_user[i],upsg->sg[i].count)){
kfree (usg);
dprintk((KERN_DEBUG"aacraid: Could not copy sg data from user\n"));
rcode = -EFAULT;
goto cleanup;
}
}
addr = pci_map_single(dev->pdev, p, usg->sg[i].count, data_dir);
psg->sg[i].addr[0] = cpu_to_le32(addr & 0xffffffff);
psg->sg[i].addr[1] = cpu_to_le32(addr>>32);
byte_count += usg->sg[i].count;
psg->sg[i].count = cpu_to_le32(usg->sg[i].count);
}
kfree (usg);
}
srbcmd->count = cpu_to_le32(byte_count);
psg->count = cpu_to_le32(sg_indx+1);
status = aac_fib_send(ScsiPortCommand64, srbfib, actual_fibsize, FsaNormal, 1, 1,NULL,NULL);
} else {
struct user_sgmap* upsg = &user_srbcmd->sg;
struct sgmap* psg = &srbcmd->sg;
if (actual_fibsize64 == fibsize) {
struct user_sgmap64* usg = (struct user_sgmap64 *)upsg;
for (i = 0; i < upsg->count; i++) {
uintptr_t addr;
void* p;
if (usg->sg[i].count >
((dev->adapter_info.options &
AAC_OPT_NEW_COMM) ?
(dev->scsi_host_ptr->max_sectors << 9) :
65536)) {
rcode = -EINVAL;
goto cleanup;
}
/* Does this really need to be GFP_DMA? */
p = kmalloc(usg->sg[i].count,GFP_KERNEL|__GFP_DMA);
if(!p) {
dprintk((KERN_DEBUG"aacraid: Could not allocate SG buffer - size = %d buffer number %d of %d\n",
usg->sg[i].count,i,usg->count));
rcode = -ENOMEM;
goto cleanup;
}
addr = (u64)usg->sg[i].addr[0];
addr += ((u64)usg->sg[i].addr[1]) << 32;
sg_user[i] = (void __user *)addr;
sg_list[i] = p; // save so we can clean up later
sg_indx = i;
if (flags & SRB_DataOut) {
if(copy_from_user(p,sg_user[i],usg->sg[i].count)){
dprintk((KERN_DEBUG"aacraid: Could not copy sg data from user\n"));
rcode = -EFAULT;
goto cleanup;
}
}
addr = pci_map_single(dev->pdev, p, usg->sg[i].count, data_dir);
psg->sg[i].addr = cpu_to_le32(addr & 0xffffffff);
byte_count += usg->sg[i].count;
psg->sg[i].count = cpu_to_le32(usg->sg[i].count);
}
} else {
for (i = 0; i < upsg->count; i++) {
dma_addr_t addr;
void* p;
if (upsg->sg[i].count >
((dev->adapter_info.options &
AAC_OPT_NEW_COMM) ?
(dev->scsi_host_ptr->max_sectors << 9) :
65536)) {
rcode = -EINVAL;
goto cleanup;
}
p = kmalloc(upsg->sg[i].count, GFP_KERNEL);
if (!p) {
dprintk((KERN_DEBUG"aacraid: Could not allocate SG buffer - size = %d buffer number %d of %d\n",
upsg->sg[i].count, i, upsg->count));
rcode = -ENOMEM;
goto cleanup;
}
sg_user[i] = (void __user *)(uintptr_t)upsg->sg[i].addr;
sg_list[i] = p; // save so we can clean up later
sg_indx = i;
if (flags & SRB_DataOut) {
if(copy_from_user(p, sg_user[i],
upsg->sg[i].count)) {
dprintk((KERN_DEBUG"aacraid: Could not copy sg data from user\n"));
rcode = -EFAULT;
goto cleanup;
}
}
addr = pci_map_single(dev->pdev, p,
upsg->sg[i].count, data_dir);
psg->sg[i].addr = cpu_to_le32(addr);
byte_count += upsg->sg[i].count;
psg->sg[i].count = cpu_to_le32(upsg->sg[i].count);
}
}
srbcmd->count = cpu_to_le32(byte_count);
psg->count = cpu_to_le32(sg_indx+1);
status = aac_fib_send(ScsiPortCommand, srbfib, actual_fibsize, FsaNormal, 1, 1, NULL, NULL);
}
if (status == -ERESTARTSYS) {
rcode = -ERESTARTSYS;
goto cleanup;
}
if (status != 0){
dprintk((KERN_DEBUG"aacraid: Could not send raw srb fib to hba\n"));
rcode = -ENXIO;
goto cleanup;
}
if (flags & SRB_DataIn) {
for(i = 0 ; i <= sg_indx; i++){
byte_count = le32_to_cpu(
(dev->adapter_info.options & AAC_OPT_SGMAP_HOST64)
? ((struct sgmap64*)&srbcmd->sg)->sg[i].count
: srbcmd->sg.sg[i].count);
if(copy_to_user(sg_user[i], sg_list[i], byte_count)){
dprintk((KERN_DEBUG"aacraid: Could not copy sg data to user\n"));
rcode = -EFAULT;
goto cleanup;
}
}
}
reply = (struct aac_srb_reply *) fib_data(srbfib);
if(copy_to_user(user_reply,reply,sizeof(struct aac_srb_reply))){
dprintk((KERN_DEBUG"aacraid: Could not copy reply to user\n"));
rcode = -EFAULT;
goto cleanup;
}
cleanup:
kfree(user_srbcmd);
for(i=0; i <= sg_indx; i++){
kfree(sg_list[i]);
}
if (rcode != -ERESTARTSYS) {
aac_fib_complete(srbfib);
aac_fib_free(srbfib);
}
return rcode;
}
Commit Message: aacraid: prevent invalid pointer dereference
It appears that driver runs into a problem here if fibsize is too small
because we allocate user_srbcmd with fibsize size only but later we
access it until user_srbcmd->sg.count to copy it over to srbcmd.
It is not correct to test (fibsize < sizeof(*user_srbcmd)) because this
structure already includes one sg element and this is not needed for
commands without data. So, we would recommend to add the following
(instead of test for fibsize == 0).
Signed-off-by: Mahesh Rajashekhara <[email protected]>
Reported-by: Nico Golde <[email protected]>
Reported-by: Fabian Yamaguchi <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
CWE ID: CWE-20 | static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
{
struct fib* srbfib;
int status;
struct aac_srb *srbcmd = NULL;
struct user_aac_srb *user_srbcmd = NULL;
struct user_aac_srb __user *user_srb = arg;
struct aac_srb_reply __user *user_reply;
struct aac_srb_reply* reply;
u32 fibsize = 0;
u32 flags = 0;
s32 rcode = 0;
u32 data_dir;
void __user *sg_user[32];
void *sg_list[32];
u32 sg_indx = 0;
u32 byte_count = 0;
u32 actual_fibsize64, actual_fibsize = 0;
int i;
if (dev->in_reset) {
dprintk((KERN_DEBUG"aacraid: send raw srb -EBUSY\n"));
return -EBUSY;
}
if (!capable(CAP_SYS_ADMIN)){
dprintk((KERN_DEBUG"aacraid: No permission to send raw srb\n"));
return -EPERM;
}
/*
* Allocate and initialize a Fib then setup a SRB command
*/
if (!(srbfib = aac_fib_alloc(dev))) {
return -ENOMEM;
}
aac_fib_init(srbfib);
/* raw_srb FIB is not FastResponseCapable */
srbfib->hw_fib_va->header.XferState &= ~cpu_to_le32(FastResponseCapable);
srbcmd = (struct aac_srb*) fib_data(srbfib);
memset(sg_list, 0, sizeof(sg_list)); /* cleanup may take issue */
if(copy_from_user(&fibsize, &user_srb->count,sizeof(u32))){
dprintk((KERN_DEBUG"aacraid: Could not copy data size from user\n"));
rcode = -EFAULT;
goto cleanup;
}
if ((fibsize < (sizeof(struct user_aac_srb) - sizeof(struct user_sgentry))) ||
(fibsize > (dev->max_fib_size - sizeof(struct aac_fibhdr)))) {
rcode = -EINVAL;
goto cleanup;
}
user_srbcmd = kmalloc(fibsize, GFP_KERNEL);
if (!user_srbcmd) {
dprintk((KERN_DEBUG"aacraid: Could not make a copy of the srb\n"));
rcode = -ENOMEM;
goto cleanup;
}
if(copy_from_user(user_srbcmd, user_srb,fibsize)){
dprintk((KERN_DEBUG"aacraid: Could not copy srb from user\n"));
rcode = -EFAULT;
goto cleanup;
}
user_reply = arg+fibsize;
flags = user_srbcmd->flags; /* from user in cpu order */
srbcmd->function = cpu_to_le32(SRBF_ExecuteScsi); // Force this
srbcmd->channel = cpu_to_le32(user_srbcmd->channel);
srbcmd->id = cpu_to_le32(user_srbcmd->id);
srbcmd->lun = cpu_to_le32(user_srbcmd->lun);
srbcmd->timeout = cpu_to_le32(user_srbcmd->timeout);
srbcmd->flags = cpu_to_le32(flags);
srbcmd->retry_limit = 0; // Obsolete parameter
srbcmd->cdb_size = cpu_to_le32(user_srbcmd->cdb_size);
memcpy(srbcmd->cdb, user_srbcmd->cdb, sizeof(srbcmd->cdb));
switch (flags & (SRB_DataIn | SRB_DataOut)) {
case SRB_DataOut:
data_dir = DMA_TO_DEVICE;
break;
case (SRB_DataIn | SRB_DataOut):
data_dir = DMA_BIDIRECTIONAL;
break;
case SRB_DataIn:
data_dir = DMA_FROM_DEVICE;
break;
default:
data_dir = DMA_NONE;
}
if (user_srbcmd->sg.count > ARRAY_SIZE(sg_list)) {
dprintk((KERN_DEBUG"aacraid: too many sg entries %d\n",
le32_to_cpu(srbcmd->sg.count)));
rcode = -EINVAL;
goto cleanup;
}
actual_fibsize = sizeof(struct aac_srb) - sizeof(struct sgentry) +
((user_srbcmd->sg.count & 0xff) * sizeof(struct sgentry));
actual_fibsize64 = actual_fibsize + (user_srbcmd->sg.count & 0xff) *
(sizeof(struct sgentry64) - sizeof(struct sgentry));
/* User made a mistake - should not continue */
if ((actual_fibsize != fibsize) && (actual_fibsize64 != fibsize)) {
dprintk((KERN_DEBUG"aacraid: Bad Size specified in "
"Raw SRB command calculated fibsize=%lu;%lu "
"user_srbcmd->sg.count=%d aac_srb=%lu sgentry=%lu;%lu "
"issued fibsize=%d\n",
actual_fibsize, actual_fibsize64, user_srbcmd->sg.count,
sizeof(struct aac_srb), sizeof(struct sgentry),
sizeof(struct sgentry64), fibsize));
rcode = -EINVAL;
goto cleanup;
}
if ((data_dir == DMA_NONE) && user_srbcmd->sg.count) {
dprintk((KERN_DEBUG"aacraid: SG with no direction specified in Raw SRB command\n"));
rcode = -EINVAL;
goto cleanup;
}
byte_count = 0;
if (dev->adapter_info.options & AAC_OPT_SGMAP_HOST64) {
struct user_sgmap64* upsg = (struct user_sgmap64*)&user_srbcmd->sg;
struct sgmap64* psg = (struct sgmap64*)&srbcmd->sg;
/*
* This should also catch if user used the 32 bit sgmap
*/
if (actual_fibsize64 == fibsize) {
actual_fibsize = actual_fibsize64;
for (i = 0; i < upsg->count; i++) {
u64 addr;
void* p;
if (upsg->sg[i].count >
((dev->adapter_info.options &
AAC_OPT_NEW_COMM) ?
(dev->scsi_host_ptr->max_sectors << 9) :
65536)) {
rcode = -EINVAL;
goto cleanup;
}
/* Does this really need to be GFP_DMA? */
p = kmalloc(upsg->sg[i].count,GFP_KERNEL|__GFP_DMA);
if(!p) {
dprintk((KERN_DEBUG"aacraid: Could not allocate SG buffer - size = %d buffer number %d of %d\n",
upsg->sg[i].count,i,upsg->count));
rcode = -ENOMEM;
goto cleanup;
}
addr = (u64)upsg->sg[i].addr[0];
addr += ((u64)upsg->sg[i].addr[1]) << 32;
sg_user[i] = (void __user *)(uintptr_t)addr;
sg_list[i] = p; // save so we can clean up later
sg_indx = i;
if (flags & SRB_DataOut) {
if(copy_from_user(p,sg_user[i],upsg->sg[i].count)){
dprintk((KERN_DEBUG"aacraid: Could not copy sg data from user\n"));
rcode = -EFAULT;
goto cleanup;
}
}
addr = pci_map_single(dev->pdev, p, upsg->sg[i].count, data_dir);
psg->sg[i].addr[0] = cpu_to_le32(addr & 0xffffffff);
psg->sg[i].addr[1] = cpu_to_le32(addr>>32);
byte_count += upsg->sg[i].count;
psg->sg[i].count = cpu_to_le32(upsg->sg[i].count);
}
} else {
struct user_sgmap* usg;
usg = kmalloc(actual_fibsize - sizeof(struct aac_srb)
+ sizeof(struct sgmap), GFP_KERNEL);
if (!usg) {
dprintk((KERN_DEBUG"aacraid: Allocation error in Raw SRB command\n"));
rcode = -ENOMEM;
goto cleanup;
}
memcpy (usg, upsg, actual_fibsize - sizeof(struct aac_srb)
+ sizeof(struct sgmap));
actual_fibsize = actual_fibsize64;
for (i = 0; i < usg->count; i++) {
u64 addr;
void* p;
if (usg->sg[i].count >
((dev->adapter_info.options &
AAC_OPT_NEW_COMM) ?
(dev->scsi_host_ptr->max_sectors << 9) :
65536)) {
kfree(usg);
rcode = -EINVAL;
goto cleanup;
}
/* Does this really need to be GFP_DMA? */
p = kmalloc(usg->sg[i].count,GFP_KERNEL|__GFP_DMA);
if(!p) {
dprintk((KERN_DEBUG "aacraid: Could not allocate SG buffer - size = %d buffer number %d of %d\n",
usg->sg[i].count,i,usg->count));
kfree(usg);
rcode = -ENOMEM;
goto cleanup;
}
sg_user[i] = (void __user *)(uintptr_t)usg->sg[i].addr;
sg_list[i] = p; // save so we can clean up later
sg_indx = i;
if (flags & SRB_DataOut) {
if(copy_from_user(p,sg_user[i],upsg->sg[i].count)){
kfree (usg);
dprintk((KERN_DEBUG"aacraid: Could not copy sg data from user\n"));
rcode = -EFAULT;
goto cleanup;
}
}
addr = pci_map_single(dev->pdev, p, usg->sg[i].count, data_dir);
psg->sg[i].addr[0] = cpu_to_le32(addr & 0xffffffff);
psg->sg[i].addr[1] = cpu_to_le32(addr>>32);
byte_count += usg->sg[i].count;
psg->sg[i].count = cpu_to_le32(usg->sg[i].count);
}
kfree (usg);
}
srbcmd->count = cpu_to_le32(byte_count);
psg->count = cpu_to_le32(sg_indx+1);
status = aac_fib_send(ScsiPortCommand64, srbfib, actual_fibsize, FsaNormal, 1, 1,NULL,NULL);
} else {
struct user_sgmap* upsg = &user_srbcmd->sg;
struct sgmap* psg = &srbcmd->sg;
if (actual_fibsize64 == fibsize) {
struct user_sgmap64* usg = (struct user_sgmap64 *)upsg;
for (i = 0; i < upsg->count; i++) {
uintptr_t addr;
void* p;
if (usg->sg[i].count >
((dev->adapter_info.options &
AAC_OPT_NEW_COMM) ?
(dev->scsi_host_ptr->max_sectors << 9) :
65536)) {
rcode = -EINVAL;
goto cleanup;
}
/* Does this really need to be GFP_DMA? */
p = kmalloc(usg->sg[i].count,GFP_KERNEL|__GFP_DMA);
if(!p) {
dprintk((KERN_DEBUG"aacraid: Could not allocate SG buffer - size = %d buffer number %d of %d\n",
usg->sg[i].count,i,usg->count));
rcode = -ENOMEM;
goto cleanup;
}
addr = (u64)usg->sg[i].addr[0];
addr += ((u64)usg->sg[i].addr[1]) << 32;
sg_user[i] = (void __user *)addr;
sg_list[i] = p; // save so we can clean up later
sg_indx = i;
if (flags & SRB_DataOut) {
if(copy_from_user(p,sg_user[i],usg->sg[i].count)){
dprintk((KERN_DEBUG"aacraid: Could not copy sg data from user\n"));
rcode = -EFAULT;
goto cleanup;
}
}
addr = pci_map_single(dev->pdev, p, usg->sg[i].count, data_dir);
psg->sg[i].addr = cpu_to_le32(addr & 0xffffffff);
byte_count += usg->sg[i].count;
psg->sg[i].count = cpu_to_le32(usg->sg[i].count);
}
} else {
for (i = 0; i < upsg->count; i++) {
dma_addr_t addr;
void* p;
if (upsg->sg[i].count >
((dev->adapter_info.options &
AAC_OPT_NEW_COMM) ?
(dev->scsi_host_ptr->max_sectors << 9) :
65536)) {
rcode = -EINVAL;
goto cleanup;
}
p = kmalloc(upsg->sg[i].count, GFP_KERNEL);
if (!p) {
dprintk((KERN_DEBUG"aacraid: Could not allocate SG buffer - size = %d buffer number %d of %d\n",
upsg->sg[i].count, i, upsg->count));
rcode = -ENOMEM;
goto cleanup;
}
sg_user[i] = (void __user *)(uintptr_t)upsg->sg[i].addr;
sg_list[i] = p; // save so we can clean up later
sg_indx = i;
if (flags & SRB_DataOut) {
if(copy_from_user(p, sg_user[i],
upsg->sg[i].count)) {
dprintk((KERN_DEBUG"aacraid: Could not copy sg data from user\n"));
rcode = -EFAULT;
goto cleanup;
}
}
addr = pci_map_single(dev->pdev, p,
upsg->sg[i].count, data_dir);
psg->sg[i].addr = cpu_to_le32(addr);
byte_count += upsg->sg[i].count;
psg->sg[i].count = cpu_to_le32(upsg->sg[i].count);
}
}
srbcmd->count = cpu_to_le32(byte_count);
psg->count = cpu_to_le32(sg_indx+1);
status = aac_fib_send(ScsiPortCommand, srbfib, actual_fibsize, FsaNormal, 1, 1, NULL, NULL);
}
if (status == -ERESTARTSYS) {
rcode = -ERESTARTSYS;
goto cleanup;
}
if (status != 0){
dprintk((KERN_DEBUG"aacraid: Could not send raw srb fib to hba\n"));
rcode = -ENXIO;
goto cleanup;
}
if (flags & SRB_DataIn) {
for(i = 0 ; i <= sg_indx; i++){
byte_count = le32_to_cpu(
(dev->adapter_info.options & AAC_OPT_SGMAP_HOST64)
? ((struct sgmap64*)&srbcmd->sg)->sg[i].count
: srbcmd->sg.sg[i].count);
if(copy_to_user(sg_user[i], sg_list[i], byte_count)){
dprintk((KERN_DEBUG"aacraid: Could not copy sg data to user\n"));
rcode = -EFAULT;
goto cleanup;
}
}
}
reply = (struct aac_srb_reply *) fib_data(srbfib);
if(copy_to_user(user_reply,reply,sizeof(struct aac_srb_reply))){
dprintk((KERN_DEBUG"aacraid: Could not copy reply to user\n"));
rcode = -EFAULT;
goto cleanup;
}
cleanup:
kfree(user_srbcmd);
for(i=0; i <= sg_indx; i++){
kfree(sg_list[i]);
}
if (rcode != -ERESTARTSYS) {
aac_fib_complete(srbfib);
aac_fib_free(srbfib);
}
return rcode;
}
| 16,050 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: void PageSerializer::addImageToResources(ImageResource* image, RenderObject* imageRenderer, const KURL& url)
{
if (!shouldAddURL(url))
return;
if (!image || !image->hasImage() || image->image() == Image::nullImage())
return;
RefPtr<SharedBuffer> data = imageRenderer ? image->imageForRenderer(imageRenderer)->data() : 0;
if (!data)
data = image->image()->data();
addToResources(image, data, url);
}
Commit Message: Revert 162155 "This review merges the two existing page serializ..."
Change r162155 broke the world even though it was landed using the CQ.
> This review merges the two existing page serializers, WebPageSerializerImpl and
> PageSerializer, into one, PageSerializer. In addition to this it moves all
> the old tests from WebPageNewSerializerTest and WebPageSerializerTest to the
> PageSerializerTest structure and splits out one test for MHTML into a new
> MHTMLTest file.
>
> Saving as 'Webpage, Complete', 'Webpage, HTML Only' and as MHTML when the
> 'Save Page as MHTML' flag is enabled now uses the same code, and should thus
> have the same feature set. Meaning that both modes now should be a bit better.
>
> Detailed list of changes:
>
> - PageSerializerTest: Prepare for more DTD test
> - PageSerializerTest: Remove now unneccesary input image test
> - PageSerializerTest: Remove unused WebPageSerializer/Impl code
> - PageSerializerTest: Move data URI morph test
> - PageSerializerTest: Move data URI test
> - PageSerializerTest: Move namespace test
> - PageSerializerTest: Move SVG Image test
> - MHTMLTest: Move MHTML specific test to own test file
> - PageSerializerTest: Delete duplicate XML header test
> - PageSerializerTest: Move blank frame test
> - PageSerializerTest: Move CSS test
> - PageSerializerTest: Add frameset/frame test
> - PageSerializerTest: Move old iframe test
> - PageSerializerTest: Move old elements test
> - Use PageSerizer for saving web pages
> - PageSerializerTest: Test for rewriting links
> - PageSerializer: Add rewrite link accumulator
> - PageSerializer: Serialize images in iframes/frames src
> - PageSerializer: XHTML fix for meta tags
> - PageSerializer: Add presentation CSS
> - PageSerializer: Rename out parameter
>
> BUG=
> [email protected]
>
> Review URL: https://codereview.chromium.org/68613003
[email protected]
Review URL: https://codereview.chromium.org/73673003
git-svn-id: svn://svn.chromium.org/blink/trunk@162156 bbb929c8-8fbe-4397-9dbb-9b2b20218538
CWE ID: CWE-119 | void PageSerializer::addImageToResources(ImageResource* image, RenderObject* imageRenderer, const KURL& url)
{
if (!shouldAddURL(url))
return;
if (!image || image->image() == Image::nullImage())
return;
RefPtr<SharedBuffer> data = imageRenderer ? image->imageForRenderer(imageRenderer)->data() : 0;
if (!data)
data = image->image()->data();
addToResources(image, data, url);
}
| 24,373 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: static size_t WritePSDChannel(const PSDInfo *psd_info,
const ImageInfo *image_info,Image *image,Image *next_image,
const QuantumType quantum_type, unsigned char *compact_pixels,
MagickOffsetType size_offset,const MagickBooleanType separate,
ExceptionInfo *exception)
{
int
y;
MagickBooleanType
monochrome;
QuantumInfo
*quantum_info;
register const Quantum
*p;
register ssize_t
i;
size_t
count,
length;
unsigned char
*pixels;
#ifdef MAGICKCORE_ZLIB_DELEGATE
#define CHUNK 16384
int
flush,
level;
unsigned char
*compressed_pixels;
z_stream
stream;
compressed_pixels=(unsigned char *) NULL;
flush=Z_NO_FLUSH;
#endif
count=0;
if (separate != MagickFalse)
{
size_offset=TellBlob(image)+2;
count+=WriteCompressionStart(psd_info,image,next_image,1);
}
if (next_image->depth > 8)
next_image->depth=16;
monochrome=IsImageMonochrome(image) && (image->depth == 1) ?
MagickTrue : MagickFalse;
quantum_info=AcquireQuantumInfo(image_info,image);
if (quantum_info == (QuantumInfo *) NULL)
return(0);
pixels=(unsigned char *) GetQuantumPixels(quantum_info);
#ifdef MAGICKCORE_ZLIB_DELEGATE
if (next_image->compression == ZipCompression)
{
compressed_pixels=(unsigned char *) AcquireQuantumMemory(CHUNK,
sizeof(*compressed_pixels));
if (compressed_pixels == (unsigned char *) NULL)
{
quantum_info=DestroyQuantumInfo(quantum_info);
return(0);
}
ResetMagickMemory(&stream,0,sizeof(stream));
stream.data_type=Z_BINARY;
level=Z_DEFAULT_COMPRESSION;
if ((image_info->quality > 0 && image_info->quality < 10))
level=(int) image_info->quality;
if (deflateInit(&stream,level) != Z_OK)
{
quantum_info=DestroyQuantumInfo(quantum_info);
return(0);
}
}
#endif
for (y=0; y < (ssize_t) next_image->rows; y++)
{
p=GetVirtualPixels(next_image,0,y,next_image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
length=ExportQuantumPixels(next_image,(CacheView *) NULL,quantum_info,
quantum_type,pixels,exception);
if (monochrome != MagickFalse)
for (i=0; i < (ssize_t) length; i++)
pixels[i]=(~pixels[i]);
if (next_image->compression == RLECompression)
{
length=PSDPackbitsEncodeImage(image,length,pixels,compact_pixels,
exception);
count+=WriteBlob(image,length,compact_pixels);
size_offset+=WritePSDOffset(psd_info,image,length,size_offset);
}
#ifdef MAGICKCORE_ZLIB_DELEGATE
else if (next_image->compression == ZipCompression)
{
stream.avail_in=(uInt) length;
stream.next_in=(Bytef *) pixels;
if (y == (ssize_t) next_image->rows-1)
flush=Z_FINISH;
do {
stream.avail_out=(uInt) CHUNK;
stream.next_out=(Bytef *) compressed_pixels;
if (deflate(&stream,flush) == Z_STREAM_ERROR)
break;
length=(size_t) CHUNK-stream.avail_out;
if (length > 0)
count+=WriteBlob(image,length,compressed_pixels);
} while (stream.avail_out == 0);
}
#endif
else
count+=WriteBlob(image,length,pixels);
}
#ifdef MAGICKCORE_ZLIB_DELEGATE
if (next_image->compression == ZipCompression)
{
(void) deflateEnd(&stream);
compressed_pixels=(unsigned char *) RelinquishMagickMemory(
compressed_pixels);
}
#endif
quantum_info=DestroyQuantumInfo(quantum_info);
return(count);
}
Commit Message: https://github.com/ImageMagick/ImageMagick/issues/348
CWE ID: CWE-787 | static size_t WritePSDChannel(const PSDInfo *psd_info,
const ImageInfo *image_info,Image *image,Image *next_image,
const QuantumType quantum_type, unsigned char *compact_pixels,
MagickOffsetType size_offset,const MagickBooleanType separate,
ExceptionInfo *exception)
{
int
y;
MagickBooleanType
monochrome;
QuantumInfo
*quantum_info;
register const Quantum
*p;
register ssize_t
i;
size_t
count,
length;
unsigned char
*pixels;
#ifdef MAGICKCORE_ZLIB_DELEGATE
#define CHUNK 16384
int
flush,
level;
unsigned char
*compressed_pixels;
z_stream
stream;
compressed_pixels=(unsigned char *) NULL;
flush=Z_NO_FLUSH;
#endif
count=0;
if (separate != MagickFalse)
{
size_offset=TellBlob(image)+2;
count+=WriteCompressionStart(psd_info,image,next_image,1);
}
if (next_image->depth > 8)
next_image->depth=16;
monochrome=IsImageMonochrome(image) && (image->depth == 1) ?
MagickTrue : MagickFalse;
quantum_info=AcquireQuantumInfo(image_info,next_image);
if (quantum_info == (QuantumInfo *) NULL)
return(0);
pixels=(unsigned char *) GetQuantumPixels(quantum_info);
#ifdef MAGICKCORE_ZLIB_DELEGATE
if (next_image->compression == ZipCompression)
{
compressed_pixels=(unsigned char *) AcquireQuantumMemory(CHUNK,
sizeof(*compressed_pixels));
if (compressed_pixels == (unsigned char *) NULL)
{
quantum_info=DestroyQuantumInfo(quantum_info);
return(0);
}
ResetMagickMemory(&stream,0,sizeof(stream));
stream.data_type=Z_BINARY;
level=Z_DEFAULT_COMPRESSION;
if ((image_info->quality > 0 && image_info->quality < 10))
level=(int) image_info->quality;
if (deflateInit(&stream,level) != Z_OK)
{
quantum_info=DestroyQuantumInfo(quantum_info);
return(0);
}
}
#endif
for (y=0; y < (ssize_t) next_image->rows; y++)
{
p=GetVirtualPixels(next_image,0,y,next_image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
length=ExportQuantumPixels(next_image,(CacheView *) NULL,quantum_info,
quantum_type,pixels,exception);
if (monochrome != MagickFalse)
for (i=0; i < (ssize_t) length; i++)
pixels[i]=(~pixels[i]);
if (next_image->compression == RLECompression)
{
length=PSDPackbitsEncodeImage(image,length,pixels,compact_pixels,
exception);
count+=WriteBlob(image,length,compact_pixels);
size_offset+=WritePSDOffset(psd_info,image,length,size_offset);
}
#ifdef MAGICKCORE_ZLIB_DELEGATE
else if (next_image->compression == ZipCompression)
{
stream.avail_in=(uInt) length;
stream.next_in=(Bytef *) pixels;
if (y == (ssize_t) next_image->rows-1)
flush=Z_FINISH;
do {
stream.avail_out=(uInt) CHUNK;
stream.next_out=(Bytef *) compressed_pixels;
if (deflate(&stream,flush) == Z_STREAM_ERROR)
break;
length=(size_t) CHUNK-stream.avail_out;
if (length > 0)
count+=WriteBlob(image,length,compressed_pixels);
} while (stream.avail_out == 0);
}
#endif
else
count+=WriteBlob(image,length,pixels);
}
#ifdef MAGICKCORE_ZLIB_DELEGATE
if (next_image->compression == ZipCompression)
{
(void) deflateEnd(&stream);
compressed_pixels=(unsigned char *) RelinquishMagickMemory(
compressed_pixels);
}
#endif
quantum_info=DestroyQuantumInfo(quantum_info);
return(count);
}
| 4,059 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: mkvparser::IMkvReader::~IMkvReader()
{
//// Disable MSVC warnings that suggest making code non-portable.
}
Commit Message: libwebm: Pull from upstream
Rolling mkvparser from upstream. Primarily for fixing a bug on parsing
failures with certain Opus WebM files.
Upstream commit hash of this pull: 574045edd4ecbeb802ee3f1d214b5510269852ae
The diff is so huge because there were some style clean ups upstream.
But it was ensured that there were no breaking changes when the style
clean ups was done upstream.
Change-Id: Ib6e907175484b4b0ae1b55ab39522ea3188ad039
CWE ID: CWE-119 | mkvparser::IMkvReader::~IMkvReader()
#ifdef _MSC_VER
//// Disable MSVC warnings that suggest making code non-portable.
#pragma warning(disable : 4996)
#endif
mkvparser::IMkvReader::~IMkvReader() {}
void mkvparser::GetVersion(int& major, int& minor, int& build, int& revision) {
major = 1;
minor = 0;
build = 0;
revision = 28;
}
| 25,470 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: static int atusb_get_and_show_revision(struct atusb *atusb)
{
struct usb_device *usb_dev = atusb->usb_dev;
unsigned char buffer[3];
int ret;
/* Get a couple of the ATMega Firmware values */
ret = atusb_control_msg(atusb, usb_rcvctrlpipe(usb_dev, 0),
ATUSB_ID, ATUSB_REQ_FROM_DEV, 0, 0,
buffer, 3, 1000);
if (ret >= 0) {
atusb->fw_ver_maj = buffer[0];
atusb->fw_ver_min = buffer[1];
atusb->fw_hw_type = buffer[2];
dev_info(&usb_dev->dev,
"Firmware: major: %u, minor: %u, hardware type: %u\n",
atusb->fw_ver_maj, atusb->fw_ver_min, atusb->fw_hw_type);
}
if (atusb->fw_ver_maj == 0 && atusb->fw_ver_min < 2) {
dev_info(&usb_dev->dev,
"Firmware version (%u.%u) predates our first public release.",
atusb->fw_ver_maj, atusb->fw_ver_min);
dev_info(&usb_dev->dev, "Please update to version 0.2 or newer");
}
return ret;
}
Commit Message: ieee802154: atusb: do not use the stack for buffers to make them DMA able
From 4.9 we should really avoid using the stack here as this will not be DMA
able on various platforms. This changes the buffers already being present in
time of 4.9 being released. This should go into stable as well.
Reported-by: Dan Carpenter <[email protected]>
Cc: [email protected]
Signed-off-by: Stefan Schmidt <[email protected]>
Signed-off-by: Marcel Holtmann <[email protected]>
CWE ID: CWE-119 | static int atusb_get_and_show_revision(struct atusb *atusb)
{
struct usb_device *usb_dev = atusb->usb_dev;
unsigned char *buffer;
int ret;
buffer = kmalloc(3, GFP_KERNEL);
if (!buffer)
return -ENOMEM;
/* Get a couple of the ATMega Firmware values */
ret = atusb_control_msg(atusb, usb_rcvctrlpipe(usb_dev, 0),
ATUSB_ID, ATUSB_REQ_FROM_DEV, 0, 0,
buffer, 3, 1000);
if (ret >= 0) {
atusb->fw_ver_maj = buffer[0];
atusb->fw_ver_min = buffer[1];
atusb->fw_hw_type = buffer[2];
dev_info(&usb_dev->dev,
"Firmware: major: %u, minor: %u, hardware type: %u\n",
atusb->fw_ver_maj, atusb->fw_ver_min, atusb->fw_hw_type);
}
if (atusb->fw_ver_maj == 0 && atusb->fw_ver_min < 2) {
dev_info(&usb_dev->dev,
"Firmware version (%u.%u) predates our first public release.",
atusb->fw_ver_maj, atusb->fw_ver_min);
dev_info(&usb_dev->dev, "Please update to version 0.2 or newer");
}
kfree(buffer);
return ret;
}
| 29,791 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: void PageInfo::OnChangePasswordButtonPressed(
content::WebContents* web_contents) {
#if defined(FULL_SAFE_BROWSING)
DCHECK(password_protection_service_);
DCHECK(safe_browsing_status_ == SAFE_BROWSING_STATUS_SIGN_IN_PASSWORD_REUSE ||
safe_browsing_status_ ==
SAFE_BROWSING_STATUS_ENTERPRISE_PASSWORD_REUSE);
password_protection_service_->OnUserAction(
web_contents,
safe_browsing_status_ == SAFE_BROWSING_STATUS_SIGN_IN_PASSWORD_REUSE
? PasswordReuseEvent::SIGN_IN_PASSWORD
: PasswordReuseEvent::ENTERPRISE_PASSWORD,
safe_browsing::WarningUIType::PAGE_INFO,
safe_browsing::WarningAction::CHANGE_PASSWORD);
#endif
}
Commit Message: Revert "PageInfo: decouple safe browsing and TLS statii."
This reverts commit ee95bc44021230127c7e6e9a8cf9d3820760f77c.
Reason for revert: suspect causing unit_tests failure on Linux MSAN Tests:
https://ci.chromium.org/p/chromium/builders/ci/Linux%20MSan%20Tests/17649
PageInfoBubbleViewTest.ChangingFlashSettingForSiteIsRemembered
PageInfoBubbleViewTest.EnsureCloseCallback
PageInfoBubbleViewTest.NotificationPermissionRevokeUkm
PageInfoBubbleViewTest.OpenPageInfoBubbleAfterNavigationStart
PageInfoBubbleViewTest.SetPermissionInfo
PageInfoBubbleViewTest.SetPermissionInfoForUsbGuard
PageInfoBubbleViewTest.SetPermissionInfoWithPolicyUsbDevices
PageInfoBubbleViewTest.SetPermissionInfoWithUsbDevice
PageInfoBubbleViewTest.SetPermissionInfoWithUserAndPolicyUsbDevices
PageInfoBubbleViewTest.UpdatingSiteDataRetainsLayout
https://logs.chromium.org/logs/chromium/buildbucket/cr-buildbucket.appspot.com/8909718923797040064/+/steps/unit_tests/0/logs/Deterministic_failure:_PageInfoBubbleViewTest.ChangingFlashSettingForSiteIsRemembered__status_CRASH_/0
[ RUN ] PageInfoBubbleViewTest.ChangingFlashSettingForSiteIsRemembered
==9056==WARNING: MemorySanitizer: use-of-uninitialized-value
#0 0x561baaab15ec in PageInfoUI::GetSecurityDescription(PageInfoUI::IdentityInfo const&) const ./../../chrome/browser/ui/page_info/page_info_ui.cc:250:3
#1 0x561bab6a1548 in PageInfoBubbleView::SetIdentityInfo(PageInfoUI::IdentityInfo const&) ./../../chrome/browser/ui/views/page_info/page_info_bubble_view.cc:802:7
#2 0x561baaaab3bb in PageInfo::PresentSiteIdentity() ./../../chrome/browser/ui/page_info/page_info.cc:969:8
#3 0x561baaaa0a21 in PageInfo::PageInfo(PageInfoUI*, Profile*, TabSpecificContentSettings*, content::WebContents*, GURL const&, security_state::SecurityLevel, security_state::VisibleSecurityState const&) ./../../chrome/browser/ui/page_info/page_info.cc:344:3
#4 0x561bab69b6dd in PageInfoBubbleView::PageInfoBubbleView(views::View*, gfx::Rect const&, aura::Window*, Profile*, content::WebContents*, GURL const&, security_state::SecurityLevel, security_state::VisibleSecurityState const&, base::OnceCallback<void (views::Widget::ClosedReason, bool)>) ./../../chrome/browser/ui/views/page_info/page_info_bubble_view.cc:576:24
...
Original change's description:
> PageInfo: decouple safe browsing and TLS statii.
>
> Previously, the Page Info bubble maintained a single variable to
> identify all reasons that a page might have a non-standard status. This
> lead to the display logic making assumptions about, for instance, the
> validity of a certificate when the page was flagged by Safe Browsing.
>
> This CL separates out the Safe Browsing status from the site identity
> status so that the page info bubble can inform the user that the site's
> certificate is invalid, even if it's also flagged by Safe Browsing.
>
> Bug: 869925
> Change-Id: I34107225b4206c8f32771ccd75e9367668d0a72b
> Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/1662537
> Reviewed-by: Mustafa Emre Acer <[email protected]>
> Reviewed-by: Bret Sepulveda <[email protected]>
> Auto-Submit: Joe DeBlasio <[email protected]>
> Commit-Queue: Joe DeBlasio <[email protected]>
> Cr-Commit-Position: refs/heads/master@{#671847}
[email protected],[email protected],[email protected]
Change-Id: I8be652952e7276bcc9266124693352e467159cc4
No-Presubmit: true
No-Tree-Checks: true
No-Try: true
Bug: 869925
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/1673985
Reviewed-by: Takashi Sakamoto <[email protected]>
Commit-Queue: Takashi Sakamoto <[email protected]>
Cr-Commit-Position: refs/heads/master@{#671932}
CWE ID: CWE-311 | void PageInfo::OnChangePasswordButtonPressed(
content::WebContents* web_contents) {
#if defined(FULL_SAFE_BROWSING)
DCHECK(password_protection_service_);
DCHECK(site_identity_status_ == SITE_IDENTITY_STATUS_SIGN_IN_PASSWORD_REUSE ||
site_identity_status_ ==
SITE_IDENTITY_STATUS_ENTERPRISE_PASSWORD_REUSE);
password_protection_service_->OnUserAction(
web_contents,
site_identity_status_ == SITE_IDENTITY_STATUS_SIGN_IN_PASSWORD_REUSE
? PasswordReuseEvent::SIGN_IN_PASSWORD
: PasswordReuseEvent::ENTERPRISE_PASSWORD,
safe_browsing::WarningUIType::PAGE_INFO,
safe_browsing::WarningAction::CHANGE_PASSWORD);
#endif
}
| 8,540 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: static void sas_revalidate_domain(struct work_struct *work)
{
int res = 0;
struct sas_discovery_event *ev = to_sas_discovery_event(work);
struct asd_sas_port *port = ev->port;
struct sas_ha_struct *ha = port->ha;
struct domain_device *ddev = port->port_dev;
/* prevent revalidation from finding sata links in recovery */
mutex_lock(&ha->disco_mutex);
if (test_bit(SAS_HA_ATA_EH_ACTIVE, &ha->state)) {
SAS_DPRINTK("REVALIDATION DEFERRED on port %d, pid:%d\n",
port->id, task_pid_nr(current));
goto out;
}
clear_bit(DISCE_REVALIDATE_DOMAIN, &port->disc.pending);
SAS_DPRINTK("REVALIDATING DOMAIN on port %d, pid:%d\n", port->id,
task_pid_nr(current));
if (ddev && (ddev->dev_type == SAS_FANOUT_EXPANDER_DEVICE ||
ddev->dev_type == SAS_EDGE_EXPANDER_DEVICE))
res = sas_ex_revalidate_domain(ddev);
SAS_DPRINTK("done REVALIDATING DOMAIN on port %d, pid:%d, res 0x%x\n",
port->id, task_pid_nr(current), res);
out:
mutex_unlock(&ha->disco_mutex);
}
Commit Message: scsi: libsas: direct call probe and destruct
In commit 87c8331fcf72 ("[SCSI] libsas: prevent domain rediscovery
competing with ata error handling") introduced disco mutex to prevent
rediscovery competing with ata error handling and put the whole
revalidation in the mutex. But the rphy add/remove needs to wait for the
error handling which also grabs the disco mutex. This may leads to dead
lock.So the probe and destruct event were introduce to do the rphy
add/remove asynchronously and out of the lock.
The asynchronously processed workers makes the whole discovery process
not atomic, the other events may interrupt the process. For example,
if a loss of signal event inserted before the probe event, the
sas_deform_port() is called and the port will be deleted.
And sas_port_delete() may run before the destruct event, but the
port-x:x is the top parent of end device or expander. This leads to
a kernel WARNING such as:
[ 82.042979] sysfs group 'power' not found for kobject 'phy-1:0:22'
[ 82.042983] ------------[ cut here ]------------
[ 82.042986] WARNING: CPU: 54 PID: 1714 at fs/sysfs/group.c:237
sysfs_remove_group+0x94/0xa0
[ 82.043059] Call trace:
[ 82.043082] [<ffff0000082e7624>] sysfs_remove_group+0x94/0xa0
[ 82.043085] [<ffff00000864e320>] dpm_sysfs_remove+0x60/0x70
[ 82.043086] [<ffff00000863ee10>] device_del+0x138/0x308
[ 82.043089] [<ffff00000869a2d0>] sas_phy_delete+0x38/0x60
[ 82.043091] [<ffff00000869a86c>] do_sas_phy_delete+0x6c/0x80
[ 82.043093] [<ffff00000863dc20>] device_for_each_child+0x58/0xa0
[ 82.043095] [<ffff000008696f80>] sas_remove_children+0x40/0x50
[ 82.043100] [<ffff00000869d1bc>] sas_destruct_devices+0x64/0xa0
[ 82.043102] [<ffff0000080e93bc>] process_one_work+0x1fc/0x4b0
[ 82.043104] [<ffff0000080e96c0>] worker_thread+0x50/0x490
[ 82.043105] [<ffff0000080f0364>] kthread+0xfc/0x128
[ 82.043107] [<ffff0000080836c0>] ret_from_fork+0x10/0x50
Make probe and destruct a direct call in the disco and revalidate function,
but put them outside the lock. The whole discovery or revalidate won't
be interrupted by other events. And the DISCE_PROBE and DISCE_DESTRUCT
event are deleted as a result of the direct call.
Introduce a new list to destruct the sas_port and put the port delete after
the destruct. This makes sure the right order of destroying the sysfs
kobject and fix the warning above.
In sas_ex_revalidate_domain() have a loop to find all broadcasted
device, and sometimes we have a chance to find the same expander twice.
Because the sas_port will be deleted at the end of the whole revalidate
process, sas_port with the same name cannot be added before this.
Otherwise the sysfs will complain of creating duplicate filename. Since
the LLDD will send broadcast for every device change, we can only
process one expander's revalidation.
[mkp: kbuild test robot warning]
Signed-off-by: Jason Yan <[email protected]>
CC: John Garry <[email protected]>
CC: Johannes Thumshirn <[email protected]>
CC: Ewan Milne <[email protected]>
CC: Christoph Hellwig <[email protected]>
CC: Tomas Henzl <[email protected]>
CC: Dan Williams <[email protected]>
Reviewed-by: Hannes Reinecke <[email protected]>
Signed-off-by: Martin K. Petersen <[email protected]>
CWE ID: | static void sas_revalidate_domain(struct work_struct *work)
{
int res = 0;
struct sas_discovery_event *ev = to_sas_discovery_event(work);
struct asd_sas_port *port = ev->port;
struct sas_ha_struct *ha = port->ha;
struct domain_device *ddev = port->port_dev;
/* prevent revalidation from finding sata links in recovery */
mutex_lock(&ha->disco_mutex);
if (test_bit(SAS_HA_ATA_EH_ACTIVE, &ha->state)) {
SAS_DPRINTK("REVALIDATION DEFERRED on port %d, pid:%d\n",
port->id, task_pid_nr(current));
goto out;
}
clear_bit(DISCE_REVALIDATE_DOMAIN, &port->disc.pending);
SAS_DPRINTK("REVALIDATING DOMAIN on port %d, pid:%d\n", port->id,
task_pid_nr(current));
if (ddev && (ddev->dev_type == SAS_FANOUT_EXPANDER_DEVICE ||
ddev->dev_type == SAS_EDGE_EXPANDER_DEVICE))
res = sas_ex_revalidate_domain(ddev);
SAS_DPRINTK("done REVALIDATING DOMAIN on port %d, pid:%d, res 0x%x\n",
port->id, task_pid_nr(current), res);
out:
mutex_unlock(&ha->disco_mutex);
sas_destruct_devices(port);
sas_destruct_ports(port);
sas_probe_devices(port);
}
| 4,224 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: static void hugetlbfs_put_super(struct super_block *sb)
{
struct hugetlbfs_sb_info *sbi = HUGETLBFS_SB(sb);
if (sbi) {
sb->s_fs_info = NULL;
kfree(sbi);
}
}
Commit Message: hugepages: fix use after free bug in "quota" handling
hugetlbfs_{get,put}_quota() are badly named. They don't interact with the
general quota handling code, and they don't much resemble its behaviour.
Rather than being about maintaining limits on on-disk block usage by
particular users, they are instead about maintaining limits on in-memory
page usage (including anonymous MAP_PRIVATE copied-on-write pages)
associated with a particular hugetlbfs filesystem instance.
Worse, they work by having callbacks to the hugetlbfs filesystem code from
the low-level page handling code, in particular from free_huge_page().
This is a layering violation of itself, but more importantly, if the
kernel does a get_user_pages() on hugepages (which can happen from KVM
amongst others), then the free_huge_page() can be delayed until after the
associated inode has already been freed. If an unmount occurs at the
wrong time, even the hugetlbfs superblock where the "quota" limits are
stored may have been freed.
Andrew Barry proposed a patch to fix this by having hugepages, instead of
storing a pointer to their address_space and reaching the superblock from
there, had the hugepages store pointers directly to the superblock,
bumping the reference count as appropriate to avoid it being freed.
Andrew Morton rejected that version, however, on the grounds that it made
the existing layering violation worse.
This is a reworked version of Andrew's patch, which removes the extra, and
some of the existing, layering violation. It works by introducing the
concept of a hugepage "subpool" at the lower hugepage mm layer - that is a
finite logical pool of hugepages to allocate from. hugetlbfs now creates
a subpool for each filesystem instance with a page limit set, and a
pointer to the subpool gets added to each allocated hugepage, instead of
the address_space pointer used now. The subpool has its own lifetime and
is only freed once all pages in it _and_ all other references to it (i.e.
superblocks) are gone.
subpools are optional - a NULL subpool pointer is taken by the code to
mean that no subpool limits are in effect.
Previous discussion of this bug found in: "Fix refcounting in hugetlbfs
quota handling.". See: https://lkml.org/lkml/2011/8/11/28 or
http://marc.info/?l=linux-mm&m=126928970510627&w=1
v2: Fixed a bug spotted by Hillf Danton, and removed the extra parameter to
alloc_huge_page() - since it already takes the vma, it is not necessary.
Signed-off-by: Andrew Barry <[email protected]>
Signed-off-by: David Gibson <[email protected]>
Cc: Hugh Dickins <[email protected]>
Cc: Mel Gorman <[email protected]>
Cc: Minchan Kim <[email protected]>
Cc: Hillf Danton <[email protected]>
Cc: Paul Mackerras <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
CWE ID: CWE-399 | static void hugetlbfs_put_super(struct super_block *sb)
{
struct hugetlbfs_sb_info *sbi = HUGETLBFS_SB(sb);
if (sbi) {
sb->s_fs_info = NULL;
if (sbi->spool)
hugepage_put_subpool(sbi->spool);
kfree(sbi);
}
}
| 23,158 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: static BOOL gdi_Bitmap_Decompress(rdpContext* context, rdpBitmap* bitmap,
const BYTE* pSrcData, UINT32 DstWidth, UINT32 DstHeight,
UINT32 bpp, UINT32 length, BOOL compressed,
UINT32 codecId)
{
UINT32 SrcSize = length;
rdpGdi* gdi = context->gdi;
bitmap->compressed = FALSE;
bitmap->format = gdi->dstFormat;
bitmap->length = DstWidth * DstHeight * GetBytesPerPixel(bitmap->format);
bitmap->data = (BYTE*) _aligned_malloc(bitmap->length, 16);
if (!bitmap->data)
return FALSE;
if (compressed)
{
if (bpp < 32)
{
if (!interleaved_decompress(context->codecs->interleaved,
pSrcData, SrcSize,
DstWidth, DstHeight,
bpp,
bitmap->data, bitmap->format,
0, 0, 0, DstWidth, DstHeight,
&gdi->palette))
return FALSE;
}
else
{
if (!planar_decompress(context->codecs->planar, pSrcData, SrcSize,
DstWidth, DstHeight,
bitmap->data, bitmap->format, 0, 0, 0,
DstWidth, DstHeight, TRUE))
return FALSE;
}
}
else
{
const UINT32 SrcFormat = gdi_get_pixel_format(bpp);
const size_t sbpp = GetBytesPerPixel(SrcFormat);
const size_t dbpp = GetBytesPerPixel(bitmap->format);
if ((sbpp == 0) || (dbpp == 0))
return FALSE;
else
{
const size_t dstSize = SrcSize * dbpp / sbpp;
if (dstSize < bitmap->length)
return FALSE;
}
if (!freerdp_image_copy(bitmap->data, bitmap->format, 0, 0, 0,
DstWidth, DstHeight, pSrcData, SrcFormat,
0, 0, 0, &gdi->palette, FREERDP_FLIP_VERTICAL))
return FALSE;
}
return TRUE;
}
Commit Message: Fixed CVE-2018-8787
Thanks to Eyal Itkin from Check Point Software Technologies.
CWE ID: CWE-190 | static BOOL gdi_Bitmap_Decompress(rdpContext* context, rdpBitmap* bitmap,
const BYTE* pSrcData, UINT32 DstWidth, UINT32 DstHeight,
UINT32 bpp, UINT32 length, BOOL compressed,
UINT32 codecId)
{
UINT32 SrcSize = length;
rdpGdi* gdi = context->gdi;
UINT32 size = DstWidth * DstHeight;
bitmap->compressed = FALSE;
bitmap->format = gdi->dstFormat;
if ((GetBytesPerPixel(bitmap->format) == 0) ||
(DstWidth == 0) || (DstHeight == 0) || (DstWidth > UINT32_MAX / DstHeight) ||
(size > (UINT32_MAX / GetBytesPerPixel(bitmap->format))))
return FALSE;
size *= GetBytesPerPixel(bitmap->format);
bitmap->length = size;
bitmap->data = (BYTE*) _aligned_malloc(bitmap->length, 16);
if (!bitmap->data)
return FALSE;
if (compressed)
{
if (bpp < 32)
{
if (!interleaved_decompress(context->codecs->interleaved,
pSrcData, SrcSize,
DstWidth, DstHeight,
bpp,
bitmap->data, bitmap->format,
0, 0, 0, DstWidth, DstHeight,
&gdi->palette))
return FALSE;
}
else
{
if (!planar_decompress(context->codecs->planar, pSrcData, SrcSize,
DstWidth, DstHeight,
bitmap->data, bitmap->format, 0, 0, 0,
DstWidth, DstHeight, TRUE))
return FALSE;
}
}
else
{
const UINT32 SrcFormat = gdi_get_pixel_format(bpp);
const size_t sbpp = GetBytesPerPixel(SrcFormat);
const size_t dbpp = GetBytesPerPixel(bitmap->format);
if ((sbpp == 0) || (dbpp == 0))
return FALSE;
else
{
const size_t dstSize = SrcSize * dbpp / sbpp;
if (dstSize < bitmap->length)
return FALSE;
}
if (!freerdp_image_copy(bitmap->data, bitmap->format, 0, 0, 0,
DstWidth, DstHeight, pSrcData, SrcFormat,
0, 0, 0, &gdi->palette, FREERDP_FLIP_VERTICAL))
return FALSE;
}
return TRUE;
}
| 15,567 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: USHORT CNB::QueryL4HeaderOffset(PVOID PacketData, ULONG IpHeaderOffset) const
{
USHORT Res;
auto ppr = ParaNdis_ReviewIPPacket(RtlOffsetToPointer(PacketData, IpHeaderOffset),
GetDataLength(), __FUNCTION__);
if (ppr.ipStatus != ppresNotIP)
{
Res = static_cast<USHORT>(IpHeaderOffset + ppr.ipHeaderSize);
}
else
{
DPrintf(0, ("[%s] ERROR: NOT an IP packet - expected troubles!\n", __FUNCTION__));
Res = 0;
}
return Res;
}
Commit Message: NetKVM: BZ#1169718: Checking the length only on read
Signed-off-by: Joseph Hindin <[email protected]>
CWE ID: CWE-20 | USHORT CNB::QueryL4HeaderOffset(PVOID PacketData, ULONG IpHeaderOffset) const
{
USHORT Res;
auto ppr = ParaNdis_ReviewIPPacket(RtlOffsetToPointer(PacketData, IpHeaderOffset),
GetDataLength(), FALSE, __FUNCTION__);
if (ppr.ipStatus != ppresNotIP)
{
Res = static_cast<USHORT>(IpHeaderOffset + ppr.ipHeaderSize);
}
else
{
DPrintf(0, ("[%s] ERROR: NOT an IP packet - expected troubles!\n", __FUNCTION__));
Res = 0;
}
return Res;
}
| 11,354 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: static bool assoc_array_insert_into_terminal_node(struct assoc_array_edit *edit,
const struct assoc_array_ops *ops,
const void *index_key,
struct assoc_array_walk_result *result)
{
struct assoc_array_shortcut *shortcut, *new_s0;
struct assoc_array_node *node, *new_n0, *new_n1, *side;
struct assoc_array_ptr *ptr;
unsigned long dissimilarity, base_seg, blank;
size_t keylen;
bool have_meta;
int level, diff;
int slot, next_slot, free_slot, i, j;
node = result->terminal_node.node;
level = result->terminal_node.level;
edit->segment_cache[ASSOC_ARRAY_FAN_OUT] = result->terminal_node.slot;
pr_devel("-->%s()\n", __func__);
/* We arrived at a node which doesn't have an onward node or shortcut
* pointer that we have to follow. This means that (a) the leaf we
* want must go here (either by insertion or replacement) or (b) we
* need to split this node and insert in one of the fragments.
*/
free_slot = -1;
/* Firstly, we have to check the leaves in this node to see if there's
* a matching one we should replace in place.
*/
for (i = 0; i < ASSOC_ARRAY_FAN_OUT; i++) {
ptr = node->slots[i];
if (!ptr) {
free_slot = i;
continue;
}
if (assoc_array_ptr_is_leaf(ptr) &&
ops->compare_object(assoc_array_ptr_to_leaf(ptr),
index_key)) {
pr_devel("replace in slot %d\n", i);
edit->leaf_p = &node->slots[i];
edit->dead_leaf = node->slots[i];
pr_devel("<--%s() = ok [replace]\n", __func__);
return true;
}
}
/* If there is a free slot in this node then we can just insert the
* leaf here.
*/
if (free_slot >= 0) {
pr_devel("insert in free slot %d\n", free_slot);
edit->leaf_p = &node->slots[free_slot];
edit->adjust_count_on = node;
pr_devel("<--%s() = ok [insert]\n", __func__);
return true;
}
/* The node has no spare slots - so we're either going to have to split
* it or insert another node before it.
*
* Whatever, we're going to need at least two new nodes - so allocate
* those now. We may also need a new shortcut, but we deal with that
* when we need it.
*/
new_n0 = kzalloc(sizeof(struct assoc_array_node), GFP_KERNEL);
if (!new_n0)
return false;
edit->new_meta[0] = assoc_array_node_to_ptr(new_n0);
new_n1 = kzalloc(sizeof(struct assoc_array_node), GFP_KERNEL);
if (!new_n1)
return false;
edit->new_meta[1] = assoc_array_node_to_ptr(new_n1);
/* We need to find out how similar the leaves are. */
pr_devel("no spare slots\n");
have_meta = false;
for (i = 0; i < ASSOC_ARRAY_FAN_OUT; i++) {
ptr = node->slots[i];
if (assoc_array_ptr_is_meta(ptr)) {
edit->segment_cache[i] = 0xff;
have_meta = true;
continue;
}
base_seg = ops->get_object_key_chunk(
assoc_array_ptr_to_leaf(ptr), level);
base_seg >>= level & ASSOC_ARRAY_KEY_CHUNK_MASK;
edit->segment_cache[i] = base_seg & ASSOC_ARRAY_FAN_MASK;
}
if (have_meta) {
pr_devel("have meta\n");
goto split_node;
}
/* The node contains only leaves */
dissimilarity = 0;
base_seg = edit->segment_cache[0];
for (i = 1; i < ASSOC_ARRAY_FAN_OUT; i++)
dissimilarity |= edit->segment_cache[i] ^ base_seg;
pr_devel("only leaves; dissimilarity=%lx\n", dissimilarity);
if ((dissimilarity & ASSOC_ARRAY_FAN_MASK) == 0) {
/* The old leaves all cluster in the same slot. We will need
* to insert a shortcut if the new node wants to cluster with them.
*/
if ((edit->segment_cache[ASSOC_ARRAY_FAN_OUT] ^ base_seg) == 0)
goto all_leaves_cluster_together;
/* Otherwise we can just insert a new node ahead of the old
* one.
*/
goto present_leaves_cluster_but_not_new_leaf;
}
split_node:
pr_devel("split node\n");
/* We need to split the current node; we know that the node doesn't
* simply contain a full set of leaves that cluster together (it
* contains meta pointers and/or non-clustering leaves).
*
* We need to expel at least two leaves out of a set consisting of the
* leaves in the node and the new leaf.
*
* We need a new node (n0) to replace the current one and a new node to
* take the expelled nodes (n1).
*/
edit->set[0].to = assoc_array_node_to_ptr(new_n0);
new_n0->back_pointer = node->back_pointer;
new_n0->parent_slot = node->parent_slot;
new_n1->back_pointer = assoc_array_node_to_ptr(new_n0);
new_n1->parent_slot = -1; /* Need to calculate this */
do_split_node:
pr_devel("do_split_node\n");
new_n0->nr_leaves_on_branch = node->nr_leaves_on_branch;
new_n1->nr_leaves_on_branch = 0;
/* Begin by finding two matching leaves. There have to be at least two
* that match - even if there are meta pointers - because any leaf that
* would match a slot with a meta pointer in it must be somewhere
* behind that meta pointer and cannot be here. Further, given N
* remaining leaf slots, we now have N+1 leaves to go in them.
*/
for (i = 0; i < ASSOC_ARRAY_FAN_OUT; i++) {
slot = edit->segment_cache[i];
if (slot != 0xff)
for (j = i + 1; j < ASSOC_ARRAY_FAN_OUT + 1; j++)
if (edit->segment_cache[j] == slot)
goto found_slot_for_multiple_occupancy;
}
found_slot_for_multiple_occupancy:
pr_devel("same slot: %x %x [%02x]\n", i, j, slot);
BUG_ON(i >= ASSOC_ARRAY_FAN_OUT);
BUG_ON(j >= ASSOC_ARRAY_FAN_OUT + 1);
BUG_ON(slot >= ASSOC_ARRAY_FAN_OUT);
new_n1->parent_slot = slot;
/* Metadata pointers cannot change slot */
for (i = 0; i < ASSOC_ARRAY_FAN_OUT; i++)
if (assoc_array_ptr_is_meta(node->slots[i]))
new_n0->slots[i] = node->slots[i];
else
new_n0->slots[i] = NULL;
BUG_ON(new_n0->slots[slot] != NULL);
new_n0->slots[slot] = assoc_array_node_to_ptr(new_n1);
/* Filter the leaf pointers between the new nodes */
free_slot = -1;
next_slot = 0;
for (i = 0; i < ASSOC_ARRAY_FAN_OUT; i++) {
if (assoc_array_ptr_is_meta(node->slots[i]))
continue;
if (edit->segment_cache[i] == slot) {
new_n1->slots[next_slot++] = node->slots[i];
new_n1->nr_leaves_on_branch++;
} else {
do {
free_slot++;
} while (new_n0->slots[free_slot] != NULL);
new_n0->slots[free_slot] = node->slots[i];
}
}
pr_devel("filtered: f=%x n=%x\n", free_slot, next_slot);
if (edit->segment_cache[ASSOC_ARRAY_FAN_OUT] != slot) {
do {
free_slot++;
} while (new_n0->slots[free_slot] != NULL);
edit->leaf_p = &new_n0->slots[free_slot];
edit->adjust_count_on = new_n0;
} else {
edit->leaf_p = &new_n1->slots[next_slot++];
edit->adjust_count_on = new_n1;
}
BUG_ON(next_slot <= 1);
edit->set_backpointers_to = assoc_array_node_to_ptr(new_n0);
for (i = 0; i < ASSOC_ARRAY_FAN_OUT; i++) {
if (edit->segment_cache[i] == 0xff) {
ptr = node->slots[i];
BUG_ON(assoc_array_ptr_is_leaf(ptr));
if (assoc_array_ptr_is_node(ptr)) {
side = assoc_array_ptr_to_node(ptr);
edit->set_backpointers[i] = &side->back_pointer;
} else {
shortcut = assoc_array_ptr_to_shortcut(ptr);
edit->set_backpointers[i] = &shortcut->back_pointer;
}
}
}
ptr = node->back_pointer;
if (!ptr)
edit->set[0].ptr = &edit->array->root;
else if (assoc_array_ptr_is_node(ptr))
edit->set[0].ptr = &assoc_array_ptr_to_node(ptr)->slots[node->parent_slot];
else
edit->set[0].ptr = &assoc_array_ptr_to_shortcut(ptr)->next_node;
edit->excised_meta[0] = assoc_array_node_to_ptr(node);
pr_devel("<--%s() = ok [split node]\n", __func__);
return true;
present_leaves_cluster_but_not_new_leaf:
/* All the old leaves cluster in the same slot, but the new leaf wants
* to go into a different slot, so we create a new node to hold the new
* leaf and a pointer to a new node holding all the old leaves.
*/
pr_devel("present leaves cluster but not new leaf\n");
new_n0->back_pointer = node->back_pointer;
new_n0->parent_slot = node->parent_slot;
new_n0->nr_leaves_on_branch = node->nr_leaves_on_branch;
new_n1->back_pointer = assoc_array_node_to_ptr(new_n0);
new_n1->parent_slot = edit->segment_cache[0];
new_n1->nr_leaves_on_branch = node->nr_leaves_on_branch;
edit->adjust_count_on = new_n0;
for (i = 0; i < ASSOC_ARRAY_FAN_OUT; i++)
new_n1->slots[i] = node->slots[i];
new_n0->slots[edit->segment_cache[0]] = assoc_array_node_to_ptr(new_n0);
edit->leaf_p = &new_n0->slots[edit->segment_cache[ASSOC_ARRAY_FAN_OUT]];
edit->set[0].ptr = &assoc_array_ptr_to_node(node->back_pointer)->slots[node->parent_slot];
edit->set[0].to = assoc_array_node_to_ptr(new_n0);
edit->excised_meta[0] = assoc_array_node_to_ptr(node);
pr_devel("<--%s() = ok [insert node before]\n", __func__);
return true;
all_leaves_cluster_together:
/* All the leaves, new and old, want to cluster together in this node
* in the same slot, so we have to replace this node with a shortcut to
* skip over the identical parts of the key and then place a pair of
* nodes, one inside the other, at the end of the shortcut and
* distribute the keys between them.
*
* Firstly we need to work out where the leaves start diverging as a
* bit position into their keys so that we know how big the shortcut
* needs to be.
*
* We only need to make a single pass of N of the N+1 leaves because if
* any keys differ between themselves at bit X then at least one of
* them must also differ with the base key at bit X or before.
*/
pr_devel("all leaves cluster together\n");
diff = INT_MAX;
for (i = 0; i < ASSOC_ARRAY_FAN_OUT; i++) {
int x = ops->diff_objects(assoc_array_ptr_to_leaf(node->slots[i]),
index_key);
if (x < diff) {
BUG_ON(x < 0);
diff = x;
}
}
BUG_ON(diff == INT_MAX);
BUG_ON(diff < level + ASSOC_ARRAY_LEVEL_STEP);
keylen = round_up(diff, ASSOC_ARRAY_KEY_CHUNK_SIZE);
keylen >>= ASSOC_ARRAY_KEY_CHUNK_SHIFT;
new_s0 = kzalloc(sizeof(struct assoc_array_shortcut) +
keylen * sizeof(unsigned long), GFP_KERNEL);
if (!new_s0)
return false;
edit->new_meta[2] = assoc_array_shortcut_to_ptr(new_s0);
edit->set[0].to = assoc_array_shortcut_to_ptr(new_s0);
new_s0->back_pointer = node->back_pointer;
new_s0->parent_slot = node->parent_slot;
new_s0->next_node = assoc_array_node_to_ptr(new_n0);
new_n0->back_pointer = assoc_array_shortcut_to_ptr(new_s0);
new_n0->parent_slot = 0;
new_n1->back_pointer = assoc_array_node_to_ptr(new_n0);
new_n1->parent_slot = -1; /* Need to calculate this */
new_s0->skip_to_level = level = diff & ~ASSOC_ARRAY_LEVEL_STEP_MASK;
pr_devel("skip_to_level = %d [diff %d]\n", level, diff);
BUG_ON(level <= 0);
for (i = 0; i < keylen; i++)
new_s0->index_key[i] =
ops->get_key_chunk(index_key, i * ASSOC_ARRAY_KEY_CHUNK_SIZE);
blank = ULONG_MAX << (level & ASSOC_ARRAY_KEY_CHUNK_MASK);
pr_devel("blank off [%zu] %d: %lx\n", keylen - 1, level, blank);
new_s0->index_key[keylen - 1] &= ~blank;
/* This now reduces to a node splitting exercise for which we'll need
* to regenerate the disparity table.
*/
for (i = 0; i < ASSOC_ARRAY_FAN_OUT; i++) {
ptr = node->slots[i];
base_seg = ops->get_object_key_chunk(assoc_array_ptr_to_leaf(ptr),
level);
base_seg >>= level & ASSOC_ARRAY_KEY_CHUNK_MASK;
edit->segment_cache[i] = base_seg & ASSOC_ARRAY_FAN_MASK;
}
base_seg = ops->get_key_chunk(index_key, level);
base_seg >>= level & ASSOC_ARRAY_KEY_CHUNK_MASK;
edit->segment_cache[ASSOC_ARRAY_FAN_OUT] = base_seg & ASSOC_ARRAY_FAN_MASK;
goto do_split_node;
}
Commit Message: assoc_array: Fix a buggy node-splitting case
This fixes CVE-2017-12193.
Fix a case in the assoc_array implementation in which a new leaf is
added that needs to go into a node that happens to be full, where the
existing leaves in that node cluster together at that level to the
exclusion of new leaf.
What needs to happen is that the existing leaves get moved out to a new
node, N1, at level + 1 and the existing node needs replacing with one,
N0, that has pointers to the new leaf and to N1.
The code that tries to do this gets this wrong in two ways:
(1) The pointer that should've pointed from N0 to N1 is set to point
recursively to N0 instead.
(2) The backpointer from N0 needs to be set correctly in the case N0 is
either the root node or reached through a shortcut.
Fix this by removing this path and using the split_node path instead,
which achieves the same end, but in a more general way (thanks to Eric
Biggers for spotting the redundancy).
The problem manifests itself as:
BUG: unable to handle kernel NULL pointer dereference at 0000000000000010
IP: assoc_array_apply_edit+0x59/0xe5
Fixes: 3cb989501c26 ("Add a generic associative array implementation.")
Reported-and-tested-by: WU Fan <[email protected]>
Signed-off-by: David Howells <[email protected]>
Cc: [email protected] [v3.13-rc1+]
Signed-off-by: Linus Torvalds <[email protected]>
CWE ID: CWE-476 | static bool assoc_array_insert_into_terminal_node(struct assoc_array_edit *edit,
const struct assoc_array_ops *ops,
const void *index_key,
struct assoc_array_walk_result *result)
{
struct assoc_array_shortcut *shortcut, *new_s0;
struct assoc_array_node *node, *new_n0, *new_n1, *side;
struct assoc_array_ptr *ptr;
unsigned long dissimilarity, base_seg, blank;
size_t keylen;
bool have_meta;
int level, diff;
int slot, next_slot, free_slot, i, j;
node = result->terminal_node.node;
level = result->terminal_node.level;
edit->segment_cache[ASSOC_ARRAY_FAN_OUT] = result->terminal_node.slot;
pr_devel("-->%s()\n", __func__);
/* We arrived at a node which doesn't have an onward node or shortcut
* pointer that we have to follow. This means that (a) the leaf we
* want must go here (either by insertion or replacement) or (b) we
* need to split this node and insert in one of the fragments.
*/
free_slot = -1;
/* Firstly, we have to check the leaves in this node to see if there's
* a matching one we should replace in place.
*/
for (i = 0; i < ASSOC_ARRAY_FAN_OUT; i++) {
ptr = node->slots[i];
if (!ptr) {
free_slot = i;
continue;
}
if (assoc_array_ptr_is_leaf(ptr) &&
ops->compare_object(assoc_array_ptr_to_leaf(ptr),
index_key)) {
pr_devel("replace in slot %d\n", i);
edit->leaf_p = &node->slots[i];
edit->dead_leaf = node->slots[i];
pr_devel("<--%s() = ok [replace]\n", __func__);
return true;
}
}
/* If there is a free slot in this node then we can just insert the
* leaf here.
*/
if (free_slot >= 0) {
pr_devel("insert in free slot %d\n", free_slot);
edit->leaf_p = &node->slots[free_slot];
edit->adjust_count_on = node;
pr_devel("<--%s() = ok [insert]\n", __func__);
return true;
}
/* The node has no spare slots - so we're either going to have to split
* it or insert another node before it.
*
* Whatever, we're going to need at least two new nodes - so allocate
* those now. We may also need a new shortcut, but we deal with that
* when we need it.
*/
new_n0 = kzalloc(sizeof(struct assoc_array_node), GFP_KERNEL);
if (!new_n0)
return false;
edit->new_meta[0] = assoc_array_node_to_ptr(new_n0);
new_n1 = kzalloc(sizeof(struct assoc_array_node), GFP_KERNEL);
if (!new_n1)
return false;
edit->new_meta[1] = assoc_array_node_to_ptr(new_n1);
/* We need to find out how similar the leaves are. */
pr_devel("no spare slots\n");
have_meta = false;
for (i = 0; i < ASSOC_ARRAY_FAN_OUT; i++) {
ptr = node->slots[i];
if (assoc_array_ptr_is_meta(ptr)) {
edit->segment_cache[i] = 0xff;
have_meta = true;
continue;
}
base_seg = ops->get_object_key_chunk(
assoc_array_ptr_to_leaf(ptr), level);
base_seg >>= level & ASSOC_ARRAY_KEY_CHUNK_MASK;
edit->segment_cache[i] = base_seg & ASSOC_ARRAY_FAN_MASK;
}
if (have_meta) {
pr_devel("have meta\n");
goto split_node;
}
/* The node contains only leaves */
dissimilarity = 0;
base_seg = edit->segment_cache[0];
for (i = 1; i < ASSOC_ARRAY_FAN_OUT; i++)
dissimilarity |= edit->segment_cache[i] ^ base_seg;
pr_devel("only leaves; dissimilarity=%lx\n", dissimilarity);
if ((dissimilarity & ASSOC_ARRAY_FAN_MASK) == 0) {
/* The old leaves all cluster in the same slot. We will need
* to insert a shortcut if the new node wants to cluster with them.
*/
if ((edit->segment_cache[ASSOC_ARRAY_FAN_OUT] ^ base_seg) == 0)
goto all_leaves_cluster_together;
/* Otherwise all the old leaves cluster in the same slot, but
* the new leaf wants to go into a different slot - so we
* create a new node (n0) to hold the new leaf and a pointer to
* a new node (n1) holding all the old leaves.
*
* This can be done by falling through to the node splitting
* path.
*/
pr_devel("present leaves cluster but not new leaf\n");
}
split_node:
pr_devel("split node\n");
/* We need to split the current node. The node must contain anything
* from a single leaf (in the one leaf case, this leaf will cluster
* with the new leaf) and the rest meta-pointers, to all leaves, some
* of which may cluster.
*
* It won't contain the case in which all the current leaves plus the
* new leaves want to cluster in the same slot.
*
* We need to expel at least two leaves out of a set consisting of the
* leaves in the node and the new leaf. The current meta pointers can
* just be copied as they shouldn't cluster with any of the leaves.
*
* We need a new node (n0) to replace the current one and a new node to
* take the expelled nodes (n1).
*/
edit->set[0].to = assoc_array_node_to_ptr(new_n0);
new_n0->back_pointer = node->back_pointer;
new_n0->parent_slot = node->parent_slot;
new_n1->back_pointer = assoc_array_node_to_ptr(new_n0);
new_n1->parent_slot = -1; /* Need to calculate this */
do_split_node:
pr_devel("do_split_node\n");
new_n0->nr_leaves_on_branch = node->nr_leaves_on_branch;
new_n1->nr_leaves_on_branch = 0;
/* Begin by finding two matching leaves. There have to be at least two
* that match - even if there are meta pointers - because any leaf that
* would match a slot with a meta pointer in it must be somewhere
* behind that meta pointer and cannot be here. Further, given N
* remaining leaf slots, we now have N+1 leaves to go in them.
*/
for (i = 0; i < ASSOC_ARRAY_FAN_OUT; i++) {
slot = edit->segment_cache[i];
if (slot != 0xff)
for (j = i + 1; j < ASSOC_ARRAY_FAN_OUT + 1; j++)
if (edit->segment_cache[j] == slot)
goto found_slot_for_multiple_occupancy;
}
found_slot_for_multiple_occupancy:
pr_devel("same slot: %x %x [%02x]\n", i, j, slot);
BUG_ON(i >= ASSOC_ARRAY_FAN_OUT);
BUG_ON(j >= ASSOC_ARRAY_FAN_OUT + 1);
BUG_ON(slot >= ASSOC_ARRAY_FAN_OUT);
new_n1->parent_slot = slot;
/* Metadata pointers cannot change slot */
for (i = 0; i < ASSOC_ARRAY_FAN_OUT; i++)
if (assoc_array_ptr_is_meta(node->slots[i]))
new_n0->slots[i] = node->slots[i];
else
new_n0->slots[i] = NULL;
BUG_ON(new_n0->slots[slot] != NULL);
new_n0->slots[slot] = assoc_array_node_to_ptr(new_n1);
/* Filter the leaf pointers between the new nodes */
free_slot = -1;
next_slot = 0;
for (i = 0; i < ASSOC_ARRAY_FAN_OUT; i++) {
if (assoc_array_ptr_is_meta(node->slots[i]))
continue;
if (edit->segment_cache[i] == slot) {
new_n1->slots[next_slot++] = node->slots[i];
new_n1->nr_leaves_on_branch++;
} else {
do {
free_slot++;
} while (new_n0->slots[free_slot] != NULL);
new_n0->slots[free_slot] = node->slots[i];
}
}
pr_devel("filtered: f=%x n=%x\n", free_slot, next_slot);
if (edit->segment_cache[ASSOC_ARRAY_FAN_OUT] != slot) {
do {
free_slot++;
} while (new_n0->slots[free_slot] != NULL);
edit->leaf_p = &new_n0->slots[free_slot];
edit->adjust_count_on = new_n0;
} else {
edit->leaf_p = &new_n1->slots[next_slot++];
edit->adjust_count_on = new_n1;
}
BUG_ON(next_slot <= 1);
edit->set_backpointers_to = assoc_array_node_to_ptr(new_n0);
for (i = 0; i < ASSOC_ARRAY_FAN_OUT; i++) {
if (edit->segment_cache[i] == 0xff) {
ptr = node->slots[i];
BUG_ON(assoc_array_ptr_is_leaf(ptr));
if (assoc_array_ptr_is_node(ptr)) {
side = assoc_array_ptr_to_node(ptr);
edit->set_backpointers[i] = &side->back_pointer;
} else {
shortcut = assoc_array_ptr_to_shortcut(ptr);
edit->set_backpointers[i] = &shortcut->back_pointer;
}
}
}
ptr = node->back_pointer;
if (!ptr)
edit->set[0].ptr = &edit->array->root;
else if (assoc_array_ptr_is_node(ptr))
edit->set[0].ptr = &assoc_array_ptr_to_node(ptr)->slots[node->parent_slot];
else
edit->set[0].ptr = &assoc_array_ptr_to_shortcut(ptr)->next_node;
edit->excised_meta[0] = assoc_array_node_to_ptr(node);
pr_devel("<--%s() = ok [split node]\n", __func__);
return true;
all_leaves_cluster_together:
/* All the leaves, new and old, want to cluster together in this node
* in the same slot, so we have to replace this node with a shortcut to
* skip over the identical parts of the key and then place a pair of
* nodes, one inside the other, at the end of the shortcut and
* distribute the keys between them.
*
* Firstly we need to work out where the leaves start diverging as a
* bit position into their keys so that we know how big the shortcut
* needs to be.
*
* We only need to make a single pass of N of the N+1 leaves because if
* any keys differ between themselves at bit X then at least one of
* them must also differ with the base key at bit X or before.
*/
pr_devel("all leaves cluster together\n");
diff = INT_MAX;
for (i = 0; i < ASSOC_ARRAY_FAN_OUT; i++) {
int x = ops->diff_objects(assoc_array_ptr_to_leaf(node->slots[i]),
index_key);
if (x < diff) {
BUG_ON(x < 0);
diff = x;
}
}
BUG_ON(diff == INT_MAX);
BUG_ON(diff < level + ASSOC_ARRAY_LEVEL_STEP);
keylen = round_up(diff, ASSOC_ARRAY_KEY_CHUNK_SIZE);
keylen >>= ASSOC_ARRAY_KEY_CHUNK_SHIFT;
new_s0 = kzalloc(sizeof(struct assoc_array_shortcut) +
keylen * sizeof(unsigned long), GFP_KERNEL);
if (!new_s0)
return false;
edit->new_meta[2] = assoc_array_shortcut_to_ptr(new_s0);
edit->set[0].to = assoc_array_shortcut_to_ptr(new_s0);
new_s0->back_pointer = node->back_pointer;
new_s0->parent_slot = node->parent_slot;
new_s0->next_node = assoc_array_node_to_ptr(new_n0);
new_n0->back_pointer = assoc_array_shortcut_to_ptr(new_s0);
new_n0->parent_slot = 0;
new_n1->back_pointer = assoc_array_node_to_ptr(new_n0);
new_n1->parent_slot = -1; /* Need to calculate this */
new_s0->skip_to_level = level = diff & ~ASSOC_ARRAY_LEVEL_STEP_MASK;
pr_devel("skip_to_level = %d [diff %d]\n", level, diff);
BUG_ON(level <= 0);
for (i = 0; i < keylen; i++)
new_s0->index_key[i] =
ops->get_key_chunk(index_key, i * ASSOC_ARRAY_KEY_CHUNK_SIZE);
blank = ULONG_MAX << (level & ASSOC_ARRAY_KEY_CHUNK_MASK);
pr_devel("blank off [%zu] %d: %lx\n", keylen - 1, level, blank);
new_s0->index_key[keylen - 1] &= ~blank;
/* This now reduces to a node splitting exercise for which we'll need
* to regenerate the disparity table.
*/
for (i = 0; i < ASSOC_ARRAY_FAN_OUT; i++) {
ptr = node->slots[i];
base_seg = ops->get_object_key_chunk(assoc_array_ptr_to_leaf(ptr),
level);
base_seg >>= level & ASSOC_ARRAY_KEY_CHUNK_MASK;
edit->segment_cache[i] = base_seg & ASSOC_ARRAY_FAN_MASK;
}
base_seg = ops->get_key_chunk(index_key, level);
base_seg >>= level & ASSOC_ARRAY_KEY_CHUNK_MASK;
edit->segment_cache[ASSOC_ARRAY_FAN_OUT] = base_seg & ASSOC_ARRAY_FAN_MASK;
goto do_split_node;
}
| 20,134 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: int build_ntlmssp_auth_blob(unsigned char **pbuffer,
u16 *buflen,
struct cifs_ses *ses,
const struct nls_table *nls_cp)
{
int rc;
AUTHENTICATE_MESSAGE *sec_blob;
__u32 flags;
unsigned char *tmp;
rc = setup_ntlmv2_rsp(ses, nls_cp);
if (rc) {
cifs_dbg(VFS, "Error %d during NTLMSSP authentication\n", rc);
*buflen = 0;
goto setup_ntlmv2_ret;
}
*pbuffer = kmalloc(size_of_ntlmssp_blob(ses), GFP_KERNEL);
sec_blob = (AUTHENTICATE_MESSAGE *)*pbuffer;
memcpy(sec_blob->Signature, NTLMSSP_SIGNATURE, 8);
sec_blob->MessageType = NtLmAuthenticate;
flags = NTLMSSP_NEGOTIATE_56 |
NTLMSSP_REQUEST_TARGET | NTLMSSP_NEGOTIATE_TARGET_INFO |
NTLMSSP_NEGOTIATE_128 | NTLMSSP_NEGOTIATE_UNICODE |
NTLMSSP_NEGOTIATE_NTLM | NTLMSSP_NEGOTIATE_EXTENDED_SEC;
if (ses->server->sign) {
flags |= NTLMSSP_NEGOTIATE_SIGN;
if (!ses->server->session_estab ||
ses->ntlmssp->sesskey_per_smbsess)
flags |= NTLMSSP_NEGOTIATE_KEY_XCH;
}
tmp = *pbuffer + sizeof(AUTHENTICATE_MESSAGE);
sec_blob->NegotiateFlags = cpu_to_le32(flags);
sec_blob->LmChallengeResponse.BufferOffset =
cpu_to_le32(sizeof(AUTHENTICATE_MESSAGE));
sec_blob->LmChallengeResponse.Length = 0;
sec_blob->LmChallengeResponse.MaximumLength = 0;
sec_blob->NtChallengeResponse.BufferOffset =
cpu_to_le32(tmp - *pbuffer);
if (ses->user_name != NULL) {
memcpy(tmp, ses->auth_key.response + CIFS_SESS_KEY_SIZE,
ses->auth_key.len - CIFS_SESS_KEY_SIZE);
tmp += ses->auth_key.len - CIFS_SESS_KEY_SIZE;
sec_blob->NtChallengeResponse.Length =
cpu_to_le16(ses->auth_key.len - CIFS_SESS_KEY_SIZE);
sec_blob->NtChallengeResponse.MaximumLength =
cpu_to_le16(ses->auth_key.len - CIFS_SESS_KEY_SIZE);
} else {
/*
* don't send an NT Response for anonymous access
*/
sec_blob->NtChallengeResponse.Length = 0;
sec_blob->NtChallengeResponse.MaximumLength = 0;
}
if (ses->domainName == NULL) {
sec_blob->DomainName.BufferOffset = cpu_to_le32(tmp - *pbuffer);
sec_blob->DomainName.Length = 0;
sec_blob->DomainName.MaximumLength = 0;
tmp += 2;
} else {
int len;
len = cifs_strtoUTF16((__le16 *)tmp, ses->domainName,
CIFS_MAX_DOMAINNAME_LEN, nls_cp);
len *= 2; /* unicode is 2 bytes each */
sec_blob->DomainName.BufferOffset = cpu_to_le32(tmp - *pbuffer);
sec_blob->DomainName.Length = cpu_to_le16(len);
sec_blob->DomainName.MaximumLength = cpu_to_le16(len);
tmp += len;
}
if (ses->user_name == NULL) {
sec_blob->UserName.BufferOffset = cpu_to_le32(tmp - *pbuffer);
sec_blob->UserName.Length = 0;
sec_blob->UserName.MaximumLength = 0;
tmp += 2;
} else {
int len;
len = cifs_strtoUTF16((__le16 *)tmp, ses->user_name,
CIFS_MAX_USERNAME_LEN, nls_cp);
len *= 2; /* unicode is 2 bytes each */
sec_blob->UserName.BufferOffset = cpu_to_le32(tmp - *pbuffer);
sec_blob->UserName.Length = cpu_to_le16(len);
sec_blob->UserName.MaximumLength = cpu_to_le16(len);
tmp += len;
}
sec_blob->WorkstationName.BufferOffset = cpu_to_le32(tmp - *pbuffer);
sec_blob->WorkstationName.Length = 0;
sec_blob->WorkstationName.MaximumLength = 0;
tmp += 2;
if (((ses->ntlmssp->server_flags & NTLMSSP_NEGOTIATE_KEY_XCH) ||
(ses->ntlmssp->server_flags & NTLMSSP_NEGOTIATE_EXTENDED_SEC))
&& !calc_seckey(ses)) {
memcpy(tmp, ses->ntlmssp->ciphertext, CIFS_CPHTXT_SIZE);
sec_blob->SessionKey.BufferOffset = cpu_to_le32(tmp - *pbuffer);
sec_blob->SessionKey.Length = cpu_to_le16(CIFS_CPHTXT_SIZE);
sec_blob->SessionKey.MaximumLength =
cpu_to_le16(CIFS_CPHTXT_SIZE);
tmp += CIFS_CPHTXT_SIZE;
} else {
sec_blob->SessionKey.BufferOffset = cpu_to_le32(tmp - *pbuffer);
sec_blob->SessionKey.Length = 0;
sec_blob->SessionKey.MaximumLength = 0;
}
*buflen = tmp - *pbuffer;
setup_ntlmv2_ret:
return rc;
}
Commit Message: CIFS: Enable encryption during session setup phase
In order to allow encryption on SMB connection we need to exchange
a session key and generate encryption and decryption keys.
Signed-off-by: Pavel Shilovsky <[email protected]>
CWE ID: CWE-476 | int build_ntlmssp_auth_blob(unsigned char **pbuffer,
u16 *buflen,
struct cifs_ses *ses,
const struct nls_table *nls_cp)
{
int rc;
AUTHENTICATE_MESSAGE *sec_blob;
__u32 flags;
unsigned char *tmp;
rc = setup_ntlmv2_rsp(ses, nls_cp);
if (rc) {
cifs_dbg(VFS, "Error %d during NTLMSSP authentication\n", rc);
*buflen = 0;
goto setup_ntlmv2_ret;
}
*pbuffer = kmalloc(size_of_ntlmssp_blob(ses), GFP_KERNEL);
sec_blob = (AUTHENTICATE_MESSAGE *)*pbuffer;
memcpy(sec_blob->Signature, NTLMSSP_SIGNATURE, 8);
sec_blob->MessageType = NtLmAuthenticate;
flags = NTLMSSP_NEGOTIATE_56 |
NTLMSSP_REQUEST_TARGET | NTLMSSP_NEGOTIATE_TARGET_INFO |
NTLMSSP_NEGOTIATE_128 | NTLMSSP_NEGOTIATE_UNICODE |
NTLMSSP_NEGOTIATE_NTLM | NTLMSSP_NEGOTIATE_EXTENDED_SEC |
NTLMSSP_NEGOTIATE_SEAL;
if (ses->server->sign)
flags |= NTLMSSP_NEGOTIATE_SIGN;
if (!ses->server->session_estab || ses->ntlmssp->sesskey_per_smbsess)
flags |= NTLMSSP_NEGOTIATE_KEY_XCH;
tmp = *pbuffer + sizeof(AUTHENTICATE_MESSAGE);
sec_blob->NegotiateFlags = cpu_to_le32(flags);
sec_blob->LmChallengeResponse.BufferOffset =
cpu_to_le32(sizeof(AUTHENTICATE_MESSAGE));
sec_blob->LmChallengeResponse.Length = 0;
sec_blob->LmChallengeResponse.MaximumLength = 0;
sec_blob->NtChallengeResponse.BufferOffset =
cpu_to_le32(tmp - *pbuffer);
if (ses->user_name != NULL) {
memcpy(tmp, ses->auth_key.response + CIFS_SESS_KEY_SIZE,
ses->auth_key.len - CIFS_SESS_KEY_SIZE);
tmp += ses->auth_key.len - CIFS_SESS_KEY_SIZE;
sec_blob->NtChallengeResponse.Length =
cpu_to_le16(ses->auth_key.len - CIFS_SESS_KEY_SIZE);
sec_blob->NtChallengeResponse.MaximumLength =
cpu_to_le16(ses->auth_key.len - CIFS_SESS_KEY_SIZE);
} else {
/*
* don't send an NT Response for anonymous access
*/
sec_blob->NtChallengeResponse.Length = 0;
sec_blob->NtChallengeResponse.MaximumLength = 0;
}
if (ses->domainName == NULL) {
sec_blob->DomainName.BufferOffset = cpu_to_le32(tmp - *pbuffer);
sec_blob->DomainName.Length = 0;
sec_blob->DomainName.MaximumLength = 0;
tmp += 2;
} else {
int len;
len = cifs_strtoUTF16((__le16 *)tmp, ses->domainName,
CIFS_MAX_DOMAINNAME_LEN, nls_cp);
len *= 2; /* unicode is 2 bytes each */
sec_blob->DomainName.BufferOffset = cpu_to_le32(tmp - *pbuffer);
sec_blob->DomainName.Length = cpu_to_le16(len);
sec_blob->DomainName.MaximumLength = cpu_to_le16(len);
tmp += len;
}
if (ses->user_name == NULL) {
sec_blob->UserName.BufferOffset = cpu_to_le32(tmp - *pbuffer);
sec_blob->UserName.Length = 0;
sec_blob->UserName.MaximumLength = 0;
tmp += 2;
} else {
int len;
len = cifs_strtoUTF16((__le16 *)tmp, ses->user_name,
CIFS_MAX_USERNAME_LEN, nls_cp);
len *= 2; /* unicode is 2 bytes each */
sec_blob->UserName.BufferOffset = cpu_to_le32(tmp - *pbuffer);
sec_blob->UserName.Length = cpu_to_le16(len);
sec_blob->UserName.MaximumLength = cpu_to_le16(len);
tmp += len;
}
sec_blob->WorkstationName.BufferOffset = cpu_to_le32(tmp - *pbuffer);
sec_blob->WorkstationName.Length = 0;
sec_blob->WorkstationName.MaximumLength = 0;
tmp += 2;
if (((ses->ntlmssp->server_flags & NTLMSSP_NEGOTIATE_KEY_XCH) ||
(ses->ntlmssp->server_flags & NTLMSSP_NEGOTIATE_EXTENDED_SEC))
&& !calc_seckey(ses)) {
memcpy(tmp, ses->ntlmssp->ciphertext, CIFS_CPHTXT_SIZE);
sec_blob->SessionKey.BufferOffset = cpu_to_le32(tmp - *pbuffer);
sec_blob->SessionKey.Length = cpu_to_le16(CIFS_CPHTXT_SIZE);
sec_blob->SessionKey.MaximumLength =
cpu_to_le16(CIFS_CPHTXT_SIZE);
tmp += CIFS_CPHTXT_SIZE;
} else {
sec_blob->SessionKey.BufferOffset = cpu_to_le32(tmp - *pbuffer);
sec_blob->SessionKey.Length = 0;
sec_blob->SessionKey.MaximumLength = 0;
}
*buflen = tmp - *pbuffer;
setup_ntlmv2_ret:
return rc;
}
| 12,599 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: LayerTreeCoordinator::LayerTreeCoordinator(WebPage* webPage)
: LayerTreeHost(webPage)
, m_notifyAfterScheduledLayerFlush(false)
, m_isValid(true)
, m_waitingForUIProcess(true)
, m_isSuspended(false)
, m_contentsScale(1)
, m_shouldSendScrollPositionUpdate(true)
, m_shouldSyncFrame(false)
, m_shouldSyncRootLayer(true)
, m_layerFlushTimer(this, &LayerTreeCoordinator::layerFlushTimerFired)
, m_layerFlushSchedulingEnabled(true)
, m_forceRepaintAsyncCallbackID(0)
{
m_rootLayer = GraphicsLayer::create(this);
CoordinatedGraphicsLayer* webRootLayer = toCoordinatedGraphicsLayer(m_rootLayer.get());
webRootLayer->setRootLayer(true);
#ifndef NDEBUG
m_rootLayer->setName("LayerTreeCoordinator root layer");
#endif
m_rootLayer->setDrawsContent(false);
m_rootLayer->setSize(m_webPage->size());
m_layerTreeContext.webLayerID = toCoordinatedGraphicsLayer(webRootLayer)->id();
m_nonCompositedContentLayer = GraphicsLayer::create(this);
toCoordinatedGraphicsLayer(m_rootLayer.get())->setCoordinatedGraphicsLayerClient(this);
#ifndef NDEBUG
m_nonCompositedContentLayer->setName("LayerTreeCoordinator non-composited content");
#endif
m_nonCompositedContentLayer->setDrawsContent(true);
m_nonCompositedContentLayer->setSize(m_webPage->size());
m_rootLayer->addChild(m_nonCompositedContentLayer.get());
if (m_webPage->hasPageOverlay())
createPageOverlayLayer();
scheduleLayerFlush();
}
Commit Message: [WK2] LayerTreeCoordinator should release unused UpdatedAtlases
https://bugs.webkit.org/show_bug.cgi?id=95072
Reviewed by Jocelyn Turcotte.
Release graphic buffers that haven't been used for a while in order to save memory.
This way we can give back memory to the system when no user interaction happens
after a period of time, for example when we are in the background.
* Shared/ShareableBitmap.h:
* WebProcess/WebPage/CoordinatedGraphics/LayerTreeCoordinator.cpp:
(WebKit::LayerTreeCoordinator::LayerTreeCoordinator):
(WebKit::LayerTreeCoordinator::beginContentUpdate):
(WebKit):
(WebKit::LayerTreeCoordinator::scheduleReleaseInactiveAtlases):
(WebKit::LayerTreeCoordinator::releaseInactiveAtlasesTimerFired):
* WebProcess/WebPage/CoordinatedGraphics/LayerTreeCoordinator.h:
(LayerTreeCoordinator):
* WebProcess/WebPage/UpdateAtlas.cpp:
(WebKit::UpdateAtlas::UpdateAtlas):
(WebKit::UpdateAtlas::didSwapBuffers):
Don't call buildLayoutIfNeeded here. It's enought to call it in beginPaintingOnAvailableBuffer
and this way we can track whether this atlas is used with m_areaAllocator.
(WebKit::UpdateAtlas::beginPaintingOnAvailableBuffer):
* WebProcess/WebPage/UpdateAtlas.h:
(WebKit::UpdateAtlas::addTimeInactive):
(WebKit::UpdateAtlas::isInactive):
(WebKit::UpdateAtlas::isInUse):
(UpdateAtlas):
git-svn-id: svn://svn.chromium.org/blink/trunk@128473 bbb929c8-8fbe-4397-9dbb-9b2b20218538
CWE ID: CWE-20 | LayerTreeCoordinator::LayerTreeCoordinator(WebPage* webPage)
: LayerTreeHost(webPage)
, m_notifyAfterScheduledLayerFlush(false)
, m_isValid(true)
, m_waitingForUIProcess(true)
, m_isSuspended(false)
, m_contentsScale(1)
, m_shouldSendScrollPositionUpdate(true)
, m_shouldSyncFrame(false)
, m_shouldSyncRootLayer(true)
, m_layerFlushTimer(this, &LayerTreeCoordinator::layerFlushTimerFired)
, m_releaseInactiveAtlasesTimer(this, &LayerTreeCoordinator::releaseInactiveAtlasesTimerFired)
, m_layerFlushSchedulingEnabled(true)
, m_forceRepaintAsyncCallbackID(0)
{
m_rootLayer = GraphicsLayer::create(this);
CoordinatedGraphicsLayer* webRootLayer = toCoordinatedGraphicsLayer(m_rootLayer.get());
webRootLayer->setRootLayer(true);
#ifndef NDEBUG
m_rootLayer->setName("LayerTreeCoordinator root layer");
#endif
m_rootLayer->setDrawsContent(false);
m_rootLayer->setSize(m_webPage->size());
m_layerTreeContext.webLayerID = toCoordinatedGraphicsLayer(webRootLayer)->id();
m_nonCompositedContentLayer = GraphicsLayer::create(this);
toCoordinatedGraphicsLayer(m_rootLayer.get())->setCoordinatedGraphicsLayerClient(this);
#ifndef NDEBUG
m_nonCompositedContentLayer->setName("LayerTreeCoordinator non-composited content");
#endif
m_nonCompositedContentLayer->setDrawsContent(true);
m_nonCompositedContentLayer->setSize(m_webPage->size());
m_rootLayer->addChild(m_nonCompositedContentLayer.get());
if (m_webPage->hasPageOverlay())
createPageOverlayLayer();
scheduleLayerFlush();
}
| 23,350 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: void CoordinatorImpl::FinalizeGlobalMemoryDumpIfAllManagersReplied() {
TRACE_EVENT0(base::trace_event::MemoryDumpManager::kTraceCategory,
"GlobalMemoryDump.Computation");
DCHECK(!queued_memory_dump_requests_.empty());
QueuedRequest* request = &queued_memory_dump_requests_.front();
if (!request->dump_in_progress || request->pending_responses.size() > 0 ||
request->heap_dump_in_progress) {
return;
}
QueuedRequestDispatcher::Finalize(request, tracing_observer_.get());
queued_memory_dump_requests_.pop_front();
request = nullptr;
if (!queued_memory_dump_requests_.empty()) {
base::SequencedTaskRunnerHandle::Get()->PostTask(
FROM_HERE,
base::BindOnce(&CoordinatorImpl::PerformNextQueuedGlobalMemoryDump,
base::Unretained(this)));
}
}
Commit Message: Fix heap-use-after-free by using weak factory instead of Unretained
Bug: 856578
Change-Id: Ifb2a1b7e6c22e1af36e12eedba72427f51d925b9
Reviewed-on: https://chromium-review.googlesource.com/1114617
Reviewed-by: Hector Dearman <[email protected]>
Commit-Queue: Hector Dearman <[email protected]>
Cr-Commit-Position: refs/heads/master@{#571528}
CWE ID: CWE-416 | void CoordinatorImpl::FinalizeGlobalMemoryDumpIfAllManagersReplied() {
TRACE_EVENT0(base::trace_event::MemoryDumpManager::kTraceCategory,
"GlobalMemoryDump.Computation");
DCHECK(!queued_memory_dump_requests_.empty());
QueuedRequest* request = &queued_memory_dump_requests_.front();
if (!request->dump_in_progress || request->pending_responses.size() > 0 ||
request->heap_dump_in_progress) {
return;
}
QueuedRequestDispatcher::Finalize(request, tracing_observer_.get());
queued_memory_dump_requests_.pop_front();
request = nullptr;
if (!queued_memory_dump_requests_.empty()) {
base::SequencedTaskRunnerHandle::Get()->PostTask(
FROM_HERE,
base::BindOnce(&CoordinatorImpl::PerformNextQueuedGlobalMemoryDump,
weak_ptr_factory_.GetWeakPtr()));
}
}
| 16,179 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: static void coroutine_fn v9fs_link(void *opaque)
{
V9fsPDU *pdu = opaque;
int32_t dfid, oldfid;
V9fsFidState *dfidp, *oldfidp;
V9fsString name;
size_t offset = 7;
int err = 0;
v9fs_string_init(&name);
err = pdu_unmarshal(pdu, offset, "dds", &dfid, &oldfid, &name);
if (err < 0) {
goto out_nofid;
}
trace_v9fs_link(pdu->tag, pdu->id, dfid, oldfid, name.data);
if (name_is_illegal(name.data)) {
err = -ENOENT;
goto out_nofid;
}
if (!strcmp(".", name.data) || !strcmp("..", name.data)) {
err = -EEXIST;
goto out_nofid;
}
dfidp = get_fid(pdu, dfid);
if (dfidp == NULL) {
err = -ENOENT;
goto out_nofid;
}
oldfidp = get_fid(pdu, oldfid);
if (oldfidp == NULL) {
err = -ENOENT;
goto out;
}
err = v9fs_co_link(pdu, oldfidp, dfidp, &name);
if (!err) {
err = offset;
}
out:
put_fid(pdu, dfidp);
out_nofid:
pdu_complete(pdu, err);
}
Commit Message:
CWE ID: CWE-399 | static void coroutine_fn v9fs_link(void *opaque)
{
V9fsPDU *pdu = opaque;
int32_t dfid, oldfid;
V9fsFidState *dfidp, *oldfidp;
V9fsString name;
size_t offset = 7;
int err = 0;
v9fs_string_init(&name);
err = pdu_unmarshal(pdu, offset, "dds", &dfid, &oldfid, &name);
if (err < 0) {
goto out_nofid;
}
trace_v9fs_link(pdu->tag, pdu->id, dfid, oldfid, name.data);
if (name_is_illegal(name.data)) {
err = -ENOENT;
goto out_nofid;
}
if (!strcmp(".", name.data) || !strcmp("..", name.data)) {
err = -EEXIST;
goto out_nofid;
}
dfidp = get_fid(pdu, dfid);
if (dfidp == NULL) {
err = -ENOENT;
goto out_nofid;
}
oldfidp = get_fid(pdu, oldfid);
if (oldfidp == NULL) {
err = -ENOENT;
goto out;
}
err = v9fs_co_link(pdu, oldfidp, dfidp, &name);
if (!err) {
err = offset;
}
put_fid(pdu, oldfidp);
out:
put_fid(pdu, dfidp);
out_nofid:
pdu_complete(pdu, err);
}
| 22,113 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: void RunCoeffCheck() {
ACMRandom rnd(ACMRandom::DeterministicSeed());
const int count_test_block = 5000;
DECLARE_ALIGNED_ARRAY(16, int16_t, input_block, kNumCoeffs);
DECLARE_ALIGNED_ARRAY(16, int16_t, output_ref_block, kNumCoeffs);
DECLARE_ALIGNED_ARRAY(16, int16_t, output_block, kNumCoeffs);
for (int i = 0; i < count_test_block; ++i) {
for (int j = 0; j < kNumCoeffs; ++j)
input_block[j] = rnd.Rand8() - rnd.Rand8();
fwd_txfm_ref(input_block, output_ref_block, pitch_, tx_type_);
REGISTER_STATE_CHECK(RunFwdTxfm(input_block, output_block, pitch_));
for (int j = 0; j < kNumCoeffs; ++j)
EXPECT_EQ(output_block[j], output_ref_block[j]);
}
}
Commit Message: Merge Conflict Fix CL to lmp-mr1-release for ag/849478
DO NOT MERGE - libvpx: Pull from upstream
Current HEAD: 7105df53d7dc13d5e575bc8df714ec8d1da36b06
BUG=23452792
Change-Id: Ic78176fc369e0bacc71d423e0e2e6075d004aaec
CWE ID: CWE-119 | void RunCoeffCheck() {
ACMRandom rnd(ACMRandom::DeterministicSeed());
const int count_test_block = 5000;
DECLARE_ALIGNED(16, int16_t, input_block[kNumCoeffs]);
DECLARE_ALIGNED(16, tran_low_t, output_ref_block[kNumCoeffs]);
DECLARE_ALIGNED(16, tran_low_t, output_block[kNumCoeffs]);
for (int i = 0; i < count_test_block; ++i) {
// Initialize a test block with input range [-mask_, mask_].
for (int j = 0; j < kNumCoeffs; ++j)
input_block[j] = (rnd.Rand16() & mask_) - (rnd.Rand16() & mask_);
fwd_txfm_ref(input_block, output_ref_block, pitch_, tx_type_);
ASM_REGISTER_STATE_CHECK(RunFwdTxfm(input_block, output_block, pitch_));
for (int j = 0; j < kNumCoeffs; ++j)
EXPECT_EQ(output_block[j], output_ref_block[j]);
}
}
| 17,757 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: int rds_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
size_t size, int msg_flags)
{
struct sock *sk = sock->sk;
struct rds_sock *rs = rds_sk_to_rs(sk);
long timeo;
int ret = 0, nonblock = msg_flags & MSG_DONTWAIT;
struct sockaddr_in *sin;
struct rds_incoming *inc = NULL;
/* udp_recvmsg()->sock_recvtimeo() gets away without locking too.. */
timeo = sock_rcvtimeo(sk, nonblock);
rdsdebug("size %zu flags 0x%x timeo %ld\n", size, msg_flags, timeo);
if (msg_flags & MSG_OOB)
goto out;
while (1) {
/* If there are pending notifications, do those - and nothing else */
if (!list_empty(&rs->rs_notify_queue)) {
ret = rds_notify_queue_get(rs, msg);
break;
}
if (rs->rs_cong_notify) {
ret = rds_notify_cong(rs, msg);
break;
}
if (!rds_next_incoming(rs, &inc)) {
if (nonblock) {
ret = -EAGAIN;
break;
}
timeo = wait_event_interruptible_timeout(*sk_sleep(sk),
(!list_empty(&rs->rs_notify_queue) ||
rs->rs_cong_notify ||
rds_next_incoming(rs, &inc)), timeo);
rdsdebug("recvmsg woke inc %p timeo %ld\n", inc,
timeo);
if (timeo > 0 || timeo == MAX_SCHEDULE_TIMEOUT)
continue;
ret = timeo;
if (ret == 0)
ret = -ETIMEDOUT;
break;
}
rdsdebug("copying inc %p from %pI4:%u to user\n", inc,
&inc->i_conn->c_faddr,
ntohs(inc->i_hdr.h_sport));
ret = inc->i_conn->c_trans->inc_copy_to_user(inc, msg->msg_iov,
size);
if (ret < 0)
break;
/*
* if the message we just copied isn't at the head of the
* recv queue then someone else raced us to return it, try
* to get the next message.
*/
if (!rds_still_queued(rs, inc, !(msg_flags & MSG_PEEK))) {
rds_inc_put(inc);
inc = NULL;
rds_stats_inc(s_recv_deliver_raced);
continue;
}
if (ret < be32_to_cpu(inc->i_hdr.h_len)) {
if (msg_flags & MSG_TRUNC)
ret = be32_to_cpu(inc->i_hdr.h_len);
msg->msg_flags |= MSG_TRUNC;
}
if (rds_cmsg_recv(inc, msg)) {
ret = -EFAULT;
goto out;
}
rds_stats_inc(s_recv_delivered);
sin = (struct sockaddr_in *)msg->msg_name;
if (sin) {
sin->sin_family = AF_INET;
sin->sin_port = inc->i_hdr.h_sport;
sin->sin_addr.s_addr = inc->i_saddr;
memset(sin->sin_zero, 0, sizeof(sin->sin_zero));
}
break;
}
if (inc)
rds_inc_put(inc);
out:
return ret;
}
Commit Message: rds: set correct msg_namelen
Jay Fenlason ([email protected]) found a bug,
that recvfrom() on an RDS socket can return the contents of random kernel
memory to userspace if it was called with a address length larger than
sizeof(struct sockaddr_in).
rds_recvmsg() also fails to set the addr_len paramater properly before
returning, but that's just a bug.
There are also a number of cases wher recvfrom() can return an entirely bogus
address. Anything in rds_recvmsg() that returns a non-negative value but does
not go through the "sin = (struct sockaddr_in *)msg->msg_name;" code path
at the end of the while(1) loop will return up to 128 bytes of kernel memory
to userspace.
And I write two test programs to reproduce this bug, you will see that in
rds_server, fromAddr will be overwritten and the following sock_fd will be
destroyed.
Yes, it is the programmer's fault to set msg_namelen incorrectly, but it is
better to make the kernel copy the real length of address to user space in
such case.
How to run the test programs ?
I test them on 32bit x86 system, 3.5.0-rc7.
1 compile
gcc -o rds_client rds_client.c
gcc -o rds_server rds_server.c
2 run ./rds_server on one console
3 run ./rds_client on another console
4 you will see something like:
server is waiting to receive data...
old socket fd=3
server received data from client:data from client
msg.msg_namelen=32
new socket fd=-1067277685
sendmsg()
: Bad file descriptor
/***************** rds_client.c ********************/
int main(void)
{
int sock_fd;
struct sockaddr_in serverAddr;
struct sockaddr_in toAddr;
char recvBuffer[128] = "data from client";
struct msghdr msg;
struct iovec iov;
sock_fd = socket(AF_RDS, SOCK_SEQPACKET, 0);
if (sock_fd < 0) {
perror("create socket error\n");
exit(1);
}
memset(&serverAddr, 0, sizeof(serverAddr));
serverAddr.sin_family = AF_INET;
serverAddr.sin_addr.s_addr = inet_addr("127.0.0.1");
serverAddr.sin_port = htons(4001);
if (bind(sock_fd, (struct sockaddr*)&serverAddr, sizeof(serverAddr)) < 0) {
perror("bind() error\n");
close(sock_fd);
exit(1);
}
memset(&toAddr, 0, sizeof(toAddr));
toAddr.sin_family = AF_INET;
toAddr.sin_addr.s_addr = inet_addr("127.0.0.1");
toAddr.sin_port = htons(4000);
msg.msg_name = &toAddr;
msg.msg_namelen = sizeof(toAddr);
msg.msg_iov = &iov;
msg.msg_iovlen = 1;
msg.msg_iov->iov_base = recvBuffer;
msg.msg_iov->iov_len = strlen(recvBuffer) + 1;
msg.msg_control = 0;
msg.msg_controllen = 0;
msg.msg_flags = 0;
if (sendmsg(sock_fd, &msg, 0) == -1) {
perror("sendto() error\n");
close(sock_fd);
exit(1);
}
printf("client send data:%s\n", recvBuffer);
memset(recvBuffer, '\0', 128);
msg.msg_name = &toAddr;
msg.msg_namelen = sizeof(toAddr);
msg.msg_iov = &iov;
msg.msg_iovlen = 1;
msg.msg_iov->iov_base = recvBuffer;
msg.msg_iov->iov_len = 128;
msg.msg_control = 0;
msg.msg_controllen = 0;
msg.msg_flags = 0;
if (recvmsg(sock_fd, &msg, 0) == -1) {
perror("recvmsg() error\n");
close(sock_fd);
exit(1);
}
printf("receive data from server:%s\n", recvBuffer);
close(sock_fd);
return 0;
}
/***************** rds_server.c ********************/
int main(void)
{
struct sockaddr_in fromAddr;
int sock_fd;
struct sockaddr_in serverAddr;
unsigned int addrLen;
char recvBuffer[128];
struct msghdr msg;
struct iovec iov;
sock_fd = socket(AF_RDS, SOCK_SEQPACKET, 0);
if(sock_fd < 0) {
perror("create socket error\n");
exit(0);
}
memset(&serverAddr, 0, sizeof(serverAddr));
serverAddr.sin_family = AF_INET;
serverAddr.sin_addr.s_addr = inet_addr("127.0.0.1");
serverAddr.sin_port = htons(4000);
if (bind(sock_fd, (struct sockaddr*)&serverAddr, sizeof(serverAddr)) < 0) {
perror("bind error\n");
close(sock_fd);
exit(1);
}
printf("server is waiting to receive data...\n");
msg.msg_name = &fromAddr;
/*
* I add 16 to sizeof(fromAddr), ie 32,
* and pay attention to the definition of fromAddr,
* recvmsg() will overwrite sock_fd,
* since kernel will copy 32 bytes to userspace.
*
* If you just use sizeof(fromAddr), it works fine.
* */
msg.msg_namelen = sizeof(fromAddr) + 16;
/* msg.msg_namelen = sizeof(fromAddr); */
msg.msg_iov = &iov;
msg.msg_iovlen = 1;
msg.msg_iov->iov_base = recvBuffer;
msg.msg_iov->iov_len = 128;
msg.msg_control = 0;
msg.msg_controllen = 0;
msg.msg_flags = 0;
while (1) {
printf("old socket fd=%d\n", sock_fd);
if (recvmsg(sock_fd, &msg, 0) == -1) {
perror("recvmsg() error\n");
close(sock_fd);
exit(1);
}
printf("server received data from client:%s\n", recvBuffer);
printf("msg.msg_namelen=%d\n", msg.msg_namelen);
printf("new socket fd=%d\n", sock_fd);
strcat(recvBuffer, "--data from server");
if (sendmsg(sock_fd, &msg, 0) == -1) {
perror("sendmsg()\n");
close(sock_fd);
exit(1);
}
}
close(sock_fd);
return 0;
}
Signed-off-by: Weiping Pan <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
CWE ID: CWE-200 | int rds_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
size_t size, int msg_flags)
{
struct sock *sk = sock->sk;
struct rds_sock *rs = rds_sk_to_rs(sk);
long timeo;
int ret = 0, nonblock = msg_flags & MSG_DONTWAIT;
struct sockaddr_in *sin;
struct rds_incoming *inc = NULL;
/* udp_recvmsg()->sock_recvtimeo() gets away without locking too.. */
timeo = sock_rcvtimeo(sk, nonblock);
rdsdebug("size %zu flags 0x%x timeo %ld\n", size, msg_flags, timeo);
msg->msg_namelen = 0;
if (msg_flags & MSG_OOB)
goto out;
while (1) {
/* If there are pending notifications, do those - and nothing else */
if (!list_empty(&rs->rs_notify_queue)) {
ret = rds_notify_queue_get(rs, msg);
break;
}
if (rs->rs_cong_notify) {
ret = rds_notify_cong(rs, msg);
break;
}
if (!rds_next_incoming(rs, &inc)) {
if (nonblock) {
ret = -EAGAIN;
break;
}
timeo = wait_event_interruptible_timeout(*sk_sleep(sk),
(!list_empty(&rs->rs_notify_queue) ||
rs->rs_cong_notify ||
rds_next_incoming(rs, &inc)), timeo);
rdsdebug("recvmsg woke inc %p timeo %ld\n", inc,
timeo);
if (timeo > 0 || timeo == MAX_SCHEDULE_TIMEOUT)
continue;
ret = timeo;
if (ret == 0)
ret = -ETIMEDOUT;
break;
}
rdsdebug("copying inc %p from %pI4:%u to user\n", inc,
&inc->i_conn->c_faddr,
ntohs(inc->i_hdr.h_sport));
ret = inc->i_conn->c_trans->inc_copy_to_user(inc, msg->msg_iov,
size);
if (ret < 0)
break;
/*
* if the message we just copied isn't at the head of the
* recv queue then someone else raced us to return it, try
* to get the next message.
*/
if (!rds_still_queued(rs, inc, !(msg_flags & MSG_PEEK))) {
rds_inc_put(inc);
inc = NULL;
rds_stats_inc(s_recv_deliver_raced);
continue;
}
if (ret < be32_to_cpu(inc->i_hdr.h_len)) {
if (msg_flags & MSG_TRUNC)
ret = be32_to_cpu(inc->i_hdr.h_len);
msg->msg_flags |= MSG_TRUNC;
}
if (rds_cmsg_recv(inc, msg)) {
ret = -EFAULT;
goto out;
}
rds_stats_inc(s_recv_delivered);
sin = (struct sockaddr_in *)msg->msg_name;
if (sin) {
sin->sin_family = AF_INET;
sin->sin_port = inc->i_hdr.h_sport;
sin->sin_addr.s_addr = inc->i_saddr;
memset(sin->sin_zero, 0, sizeof(sin->sin_zero));
msg->msg_namelen = sizeof(*sin);
}
break;
}
if (inc)
rds_inc_put(inc);
out:
return ret;
}
| 2,949 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: R_API RConfigNode* r_config_set(RConfig *cfg, const char *name, const char *value) {
RConfigNode *node = NULL;
char *ov = NULL;
ut64 oi;
if (!cfg || STRNULL (name)) {
return NULL;
}
node = r_config_node_get (cfg, name);
if (node) {
if (node->flags & CN_RO) {
eprintf ("(error: '%s' config key is read only)\n", name);
return node;
}
oi = node->i_value;
if (node->value) {
ov = strdup (node->value);
if (!ov) {
goto beach;
}
} else {
free (node->value);
node->value = strdup ("");
}
if (node->flags & CN_BOOL) {
bool b = is_true (value);
node->i_value = (ut64) b? 1: 0;
char *value = strdup (r_str_bool (b));
if (value) {
free (node->value);
node->value = value;
}
} else {
if (!value) {
free (node->value);
node->value = strdup ("");
node->i_value = 0;
} else {
if (node->value == value) {
goto beach;
}
free (node->value);
node->value = strdup (value);
if (IS_DIGIT (*value)) {
if (strchr (value, '/')) {
node->i_value = r_num_get (cfg->num, value);
} else {
node->i_value = r_num_math (cfg->num, value);
}
} else {
node->i_value = 0;
}
node->flags |= CN_INT;
}
}
} else { // Create a new RConfigNode
oi = UT64_MAX;
if (!cfg->lock) {
node = r_config_node_new (name, value);
if (node) {
if (value && is_bool (value)) {
node->flags |= CN_BOOL;
node->i_value = is_true (value)? 1: 0;
}
if (cfg->ht) {
ht_insert (cfg->ht, node->name, node);
r_list_append (cfg->nodes, node);
cfg->n_nodes++;
}
} else {
eprintf ("r_config_set: unable to create a new RConfigNode\n");
}
} else {
eprintf ("r_config_set: variable '%s' not found\n", name);
}
}
if (node && node->setter) {
int ret = node->setter (cfg->user, node);
if (ret == false) {
if (oi != UT64_MAX) {
node->i_value = oi;
}
free (node->value);
node->value = strdup (ov? ov: "");
}
}
beach:
free (ov);
return node;
}
Commit Message: Fix #7698 - UAF in r_config_set when loading a dex
CWE ID: CWE-416 | R_API RConfigNode* r_config_set(RConfig *cfg, const char *name, const char *value) {
RConfigNode *node = NULL;
char *ov = NULL;
ut64 oi;
if (!cfg || STRNULL (name)) {
return NULL;
}
node = r_config_node_get (cfg, name);
if (node) {
if (node->flags & CN_RO) {
eprintf ("(error: '%s' config key is read only)\n", name);
return node;
}
oi = node->i_value;
if (node->value) {
ov = strdup (node->value);
if (!ov) {
goto beach;
}
} else {
free (node->value);
node->value = strdup ("");
}
if (node->flags & CN_BOOL) {
bool b = is_true (value);
node->i_value = (ut64) b? 1: 0;
char *value = strdup (r_str_bool (b));
if (value) {
free (node->value);
node->value = value;
}
} else {
if (!value) {
free (node->value);
node->value = strdup ("");
node->i_value = 0;
} else {
if (node->value == value) {
goto beach;
}
char *tmp = node->value;
node->value = strdup (value);
free (tmp);
if (IS_DIGIT (*value)) {
if (strchr (value, '/')) {
node->i_value = r_num_get (cfg->num, value);
} else {
node->i_value = r_num_math (cfg->num, value);
}
} else {
node->i_value = 0;
}
node->flags |= CN_INT;
}
}
} else { // Create a new RConfigNode
oi = UT64_MAX;
if (!cfg->lock) {
node = r_config_node_new (name, value);
if (node) {
if (value && is_bool (value)) {
node->flags |= CN_BOOL;
node->i_value = is_true (value)? 1: 0;
}
if (cfg->ht) {
ht_insert (cfg->ht, node->name, node);
r_list_append (cfg->nodes, node);
cfg->n_nodes++;
}
} else {
eprintf ("r_config_set: unable to create a new RConfigNode\n");
}
} else {
eprintf ("r_config_set: variable '%s' not found\n", name);
}
}
if (node && node->setter) {
int ret = node->setter (cfg->user, node);
if (ret == false) {
if (oi != UT64_MAX) {
node->i_value = oi;
}
free (node->value);
node->value = strdup (ov? ov: "");
}
}
beach:
free (ov);
return node;
}
| 26,883 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: ResourceRequestBlockedReason BaseFetchContext::CanRequest(
Resource::Type type,
const ResourceRequest& resource_request,
const KURL& url,
const ResourceLoaderOptions& options,
SecurityViolationReportingPolicy reporting_policy,
FetchParameters::OriginRestriction origin_restriction,
ResourceRequest::RedirectStatus redirect_status) const {
ResourceRequestBlockedReason blocked_reason =
CanRequestInternal(type, resource_request, url, options, reporting_policy,
origin_restriction, redirect_status);
if (blocked_reason != ResourceRequestBlockedReason::kNone &&
reporting_policy == SecurityViolationReportingPolicy::kReport) {
DispatchDidBlockRequest(resource_request, options.initiator_info,
blocked_reason);
}
return blocked_reason;
}
Commit Message: DevTools: send proper resource type in Network.RequestWillBeSent
This patch plumbs resoure type into the DispatchWillSendRequest
instrumenation. This allows us to report accurate type in
Network.RequestWillBeSent event, instead of "Other", that we report
today.
BUG=765501
R=dgozman
Change-Id: I0134c08b841e8dd247fdc8ff208bfd51e462709c
Reviewed-on: https://chromium-review.googlesource.com/667504
Reviewed-by: Pavel Feldman <[email protected]>
Reviewed-by: Dmitry Gozman <[email protected]>
Commit-Queue: Andrey Lushnikov <[email protected]>
Cr-Commit-Position: refs/heads/master@{#507936}
CWE ID: CWE-119 | ResourceRequestBlockedReason BaseFetchContext::CanRequest(
Resource::Type type,
const ResourceRequest& resource_request,
const KURL& url,
const ResourceLoaderOptions& options,
SecurityViolationReportingPolicy reporting_policy,
FetchParameters::OriginRestriction origin_restriction,
ResourceRequest::RedirectStatus redirect_status) const {
ResourceRequestBlockedReason blocked_reason =
CanRequestInternal(type, resource_request, url, options, reporting_policy,
origin_restriction, redirect_status);
if (blocked_reason != ResourceRequestBlockedReason::kNone &&
reporting_policy == SecurityViolationReportingPolicy::kReport) {
DispatchDidBlockRequest(resource_request, options.initiator_info,
blocked_reason, type);
}
return blocked_reason;
}
| 27,870 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: M_fs_error_t M_fs_move(const char *path_old, const char *path_new, M_uint32 mode, M_fs_progress_cb_t cb, M_uint32 progress_flags)
{
char *norm_path_old;
char *norm_path_new;
char *resolve_path;
M_fs_info_t *info;
M_fs_progress_t *progress = NULL;
M_uint64 entry_size;
M_fs_error_t res;
if (path_old == NULL || *path_old == '\0' || path_new == NULL || *path_new == '\0') {
return M_FS_ERROR_INVALID;
}
/* It's okay if new path doesn't exist. */
res = M_fs_path_norm(&norm_path_new, path_new, M_FS_PATH_NORM_RESDIR, M_FS_SYSTEM_AUTO);
if (res != M_FS_ERROR_SUCCESS) {
M_free(norm_path_new);
return res;
}
/* If a path is a file and the destination is a directory the file should be moved
* into the directory. E.g. /file.txt -> /dir = /dir/file.txt */
if (M_fs_isfileintodir(path_old, path_new, &norm_path_old)) {
M_free(norm_path_new);
res = M_fs_move(path_old, norm_path_old, mode, cb, progress_flags);
M_free(norm_path_old);
return res;
}
/* Normalize the old path and do basic checks that it exists. We'll leave really checking that the old path
* existing to rename because any check we perform may not be true when rename is called. */
res = M_fs_path_norm(&norm_path_old, path_old, M_FS_PATH_NORM_RESALL, M_FS_SYSTEM_AUTO);
if (res != M_FS_ERROR_SUCCESS) {
M_free(norm_path_new);
M_free(norm_path_old);
return res;
}
progress = M_fs_progress_create();
res = M_fs_info(&info, path_old, (mode & M_FS_FILE_MODE_PRESERVE_PERMS)?M_FS_PATH_INFO_FLAGS_NONE:M_FS_PATH_INFO_FLAGS_BASIC);
if (res != M_FS_ERROR_SUCCESS) {
M_fs_progress_destroy(progress);
M_free(norm_path_new);
M_free(norm_path_old);
return res;
}
/* There is a race condition where the path could not exist but be created between the exists check and calling
* rename to move the file but there isn't much we can do in this case. copy will delete and the file so this
* situation won't cause an error. */
if (!M_fs_check_overwrite_allowed(norm_path_old, norm_path_new, mode)) {
M_fs_progress_destroy(progress);
M_free(norm_path_new);
M_free(norm_path_old);
return M_FS_ERROR_FILE_EXISTS;
}
if (cb) {
entry_size = M_fs_info_get_size(info);
M_fs_progress_set_path(progress, norm_path_new);
M_fs_progress_set_type(progress, M_fs_info_get_type(info));
if (progress_flags & M_FS_PROGRESS_SIZE_TOTAL) {
M_fs_progress_set_size_total(progress, entry_size);
M_fs_progress_set_size_total_progess(progress, entry_size);
}
if (progress_flags & M_FS_PROGRESS_SIZE_CUR) {
M_fs_progress_set_size_current(progress, entry_size);
M_fs_progress_set_size_current_progress(progress, entry_size);
}
/* Change the progress count to reflect the count. */
if (progress_flags & M_FS_PROGRESS_COUNT) {
M_fs_progress_set_count_total(progress, 1);
M_fs_progress_set_count(progress, 1);
}
}
/* Move the file. */
if (M_fs_info_get_type(info) == M_FS_TYPE_SYMLINK) {
res = M_fs_path_readlink(&resolve_path, norm_path_old);
if (res == M_FS_ERROR_SUCCESS) {
res = M_fs_symlink(norm_path_new, resolve_path);
}
M_free(resolve_path);
} else {
res = M_fs_move_file(norm_path_old, norm_path_new);
}
/* Failure was because we're crossing mount points. */
if (res == M_FS_ERROR_NOT_SAMEDEV) {
/* Can't rename so copy and delete. */
if (M_fs_copy(norm_path_old, norm_path_new, mode, cb, progress_flags) == M_FS_ERROR_SUCCESS) {
/* Success - Delete the original files since this is a move. */
res = M_fs_delete(norm_path_old, M_TRUE, NULL, M_FS_PROGRESS_NOEXTRA);
} else {
/* Failure - Delete the new files that were copied but only if we are not overwriting. We don't
* want to remove any existing files (especially if the dest is a dir). */
if (!(mode & M_FS_FILE_MODE_OVERWRITE)) {
M_fs_delete(norm_path_new, M_TRUE, NULL, M_FS_PROGRESS_NOEXTRA);
}
res = M_FS_ERROR_GENERIC;
}
} else {
/* Call the cb with the result of the move whether it was a success for fail. We call the cb only if the
* result of the move is not M_FS_ERROR_NOT_SAMEDEV because the copy operation will call the cb for us. */
if (cb) {
M_fs_progress_set_result(progress, res);
if (!cb(progress)) {
res = M_FS_ERROR_CANCELED;
}
}
}
M_fs_info_destroy(info);
M_fs_progress_destroy(progress);
M_free(norm_path_new);
M_free(norm_path_old);
return res;
}
Commit Message: fs: Don't try to delete the file when copying. It could cause a security issue if the file exists and doesn't allow other's to read/write. delete could allow someone to create the file and have access to the data.
CWE ID: CWE-732 | M_fs_error_t M_fs_move(const char *path_old, const char *path_new, M_uint32 mode, M_fs_progress_cb_t cb, M_uint32 progress_flags)
{
char *norm_path_old;
char *norm_path_new;
char *resolve_path;
M_fs_info_t *info;
M_fs_progress_t *progress = NULL;
M_uint64 entry_size;
M_fs_error_t res;
if (path_old == NULL || *path_old == '\0' || path_new == NULL || *path_new == '\0') {
return M_FS_ERROR_INVALID;
}
/* It's okay if new path doesn't exist. */
res = M_fs_path_norm(&norm_path_new, path_new, M_FS_PATH_NORM_RESDIR, M_FS_SYSTEM_AUTO);
if (res != M_FS_ERROR_SUCCESS) {
M_free(norm_path_new);
return res;
}
/* If a path is a file and the destination is a directory the file should be moved
* into the directory. E.g. /file.txt -> /dir = /dir/file.txt */
if (M_fs_isfileintodir(path_old, path_new, &norm_path_old)) {
M_free(norm_path_new);
res = M_fs_move(path_old, norm_path_old, mode, cb, progress_flags);
M_free(norm_path_old);
return res;
}
/* Normalize the old path and do basic checks that it exists. We'll leave really checking that the old path
* existing to rename because any check we perform may not be true when rename is called. */
res = M_fs_path_norm(&norm_path_old, path_old, M_FS_PATH_NORM_RESALL, M_FS_SYSTEM_AUTO);
if (res != M_FS_ERROR_SUCCESS) {
M_free(norm_path_new);
M_free(norm_path_old);
return res;
}
progress = M_fs_progress_create();
res = M_fs_info(&info, path_old, (mode & M_FS_FILE_MODE_PRESERVE_PERMS)?M_FS_PATH_INFO_FLAGS_NONE:M_FS_PATH_INFO_FLAGS_BASIC);
if (res != M_FS_ERROR_SUCCESS) {
M_fs_progress_destroy(progress);
M_free(norm_path_new);
M_free(norm_path_old);
return res;
}
/* There is a race condition where the path could not exist but be created between the exists check and calling
* rename to move the file but there isn't much we can do in this case. copy will delete and the file so this
* situation won't cause an error. */
if (!M_fs_check_overwrite_allowed(norm_path_old, norm_path_new, mode)) {
M_fs_progress_destroy(progress);
M_free(norm_path_new);
M_free(norm_path_old);
return M_FS_ERROR_FILE_EXISTS;
}
if (cb) {
entry_size = M_fs_info_get_size(info);
M_fs_progress_set_path(progress, norm_path_new);
M_fs_progress_set_type(progress, M_fs_info_get_type(info));
if (progress_flags & M_FS_PROGRESS_SIZE_TOTAL) {
M_fs_progress_set_size_total(progress, entry_size);
M_fs_progress_set_size_total_progess(progress, entry_size);
}
if (progress_flags & M_FS_PROGRESS_SIZE_CUR) {
M_fs_progress_set_size_current(progress, entry_size);
M_fs_progress_set_size_current_progress(progress, entry_size);
}
/* Change the progress count to reflect the count. */
if (progress_flags & M_FS_PROGRESS_COUNT) {
M_fs_progress_set_count_total(progress, 1);
M_fs_progress_set_count(progress, 1);
}
}
/* Move the file. */
if (M_fs_info_get_type(info) == M_FS_TYPE_SYMLINK) {
res = M_fs_path_readlink(&resolve_path, norm_path_old);
if (res == M_FS_ERROR_SUCCESS) {
res = M_fs_symlink(norm_path_new, resolve_path);
}
M_free(resolve_path);
} else {
res = M_fs_move_file(norm_path_old, norm_path_new);
}
/* Failure was because we're crossing mount points. */
if (res == M_FS_ERROR_NOT_SAMEDEV) {
/* Can't rename so copy and delete. */
if (M_fs_copy(norm_path_old, norm_path_new, mode, cb, progress_flags) == M_FS_ERROR_SUCCESS) {
/* Success - Delete the original files since this is a move. */
res = M_fs_delete(norm_path_old, M_TRUE, NULL, M_FS_PROGRESS_NOEXTRA);
} else {
/* Failure - Delete the new files that were copied but only if we are not overwriting. We don't
* want to remove any existing files (especially if the dest is a dir). */
if (!(mode & M_FS_FILE_MODE_OVERWRITE)) {
M_fs_delete(norm_path_new, M_TRUE, NULL, M_FS_PROGRESS_NOEXTRA);
}
res = M_FS_ERROR_GENERIC;
}
} else {
/* Call the cb with the result of the move whether it was a success for fail. We call the cb only if the
* result of the move is not M_FS_ERROR_NOT_SAMEDEV because the copy operation will call the cb for us. */
if (cb) {
M_fs_progress_set_result(progress, res);
if (!cb(progress)) {
res = M_FS_ERROR_CANCELED;
}
}
}
M_fs_info_destroy(info);
M_fs_progress_destroy(progress);
M_free(norm_path_new);
M_free(norm_path_old);
return res;
}
| 13,083 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: static int hci_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
struct msghdr *msg, size_t len, int flags)
{
int noblock = flags & MSG_DONTWAIT;
struct sock *sk = sock->sk;
struct sk_buff *skb;
int copied, err;
BT_DBG("sock %p, sk %p", sock, sk);
if (flags & (MSG_OOB))
return -EOPNOTSUPP;
if (sk->sk_state == BT_CLOSED)
return 0;
skb = skb_recv_datagram(sk, flags, noblock, &err);
if (!skb)
return err;
msg->msg_namelen = 0;
copied = skb->len;
if (len < copied) {
msg->msg_flags |= MSG_TRUNC;
copied = len;
}
skb_reset_transport_header(skb);
err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
switch (hci_pi(sk)->channel) {
case HCI_CHANNEL_RAW:
hci_sock_cmsg(sk, msg, skb);
break;
case HCI_CHANNEL_USER:
case HCI_CHANNEL_CONTROL:
case HCI_CHANNEL_MONITOR:
sock_recv_timestamp(msg, sk, skb);
break;
}
skb_free_datagram(sk, skb);
return err ? : copied;
}
Commit Message: net: rework recvmsg handler msg_name and msg_namelen logic
This patch now always passes msg->msg_namelen as 0. recvmsg handlers must
set msg_namelen to the proper size <= sizeof(struct sockaddr_storage)
to return msg_name to the user.
This prevents numerous uninitialized memory leaks we had in the
recvmsg handlers and makes it harder for new code to accidentally leak
uninitialized memory.
Optimize for the case recvfrom is called with NULL as address. We don't
need to copy the address at all, so set it to NULL before invoking the
recvmsg handler. We can do so, because all the recvmsg handlers must
cope with the case a plain read() is called on them. read() also sets
msg_name to NULL.
Also document these changes in include/linux/net.h as suggested by David
Miller.
Changes since RFC:
Set msg->msg_name = NULL if user specified a NULL in msg_name but had a
non-null msg_namelen in verify_iovec/verify_compat_iovec. This doesn't
affect sendto as it would bail out earlier while trying to copy-in the
address. It also more naturally reflects the logic by the callers of
verify_iovec.
With this change in place I could remove "
if (!uaddr || msg_sys->msg_namelen == 0)
msg->msg_name = NULL
".
This change does not alter the user visible error logic as we ignore
msg_namelen as long as msg_name is NULL.
Also remove two unnecessary curly brackets in ___sys_recvmsg and change
comments to netdev style.
Cc: David Miller <[email protected]>
Suggested-by: Eric Dumazet <[email protected]>
Signed-off-by: Hannes Frederic Sowa <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
CWE ID: CWE-20 | static int hci_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
struct msghdr *msg, size_t len, int flags)
{
int noblock = flags & MSG_DONTWAIT;
struct sock *sk = sock->sk;
struct sk_buff *skb;
int copied, err;
BT_DBG("sock %p, sk %p", sock, sk);
if (flags & (MSG_OOB))
return -EOPNOTSUPP;
if (sk->sk_state == BT_CLOSED)
return 0;
skb = skb_recv_datagram(sk, flags, noblock, &err);
if (!skb)
return err;
copied = skb->len;
if (len < copied) {
msg->msg_flags |= MSG_TRUNC;
copied = len;
}
skb_reset_transport_header(skb);
err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
switch (hci_pi(sk)->channel) {
case HCI_CHANNEL_RAW:
hci_sock_cmsg(sk, msg, skb);
break;
case HCI_CHANNEL_USER:
case HCI_CHANNEL_CONTROL:
case HCI_CHANNEL_MONITOR:
sock_recv_timestamp(msg, sk, skb);
break;
}
skb_free_datagram(sk, skb);
return err ? : copied;
}
| 28,342 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: void SyncBackendHost::Initialize(
SyncFrontend* frontend,
const GURL& sync_service_url,
const syncable::ModelTypeSet& types,
net::URLRequestContextGetter* baseline_context_getter,
const SyncCredentials& credentials,
bool delete_sync_data_folder) {
if (!core_thread_.Start())
return;
frontend_ = frontend;
DCHECK(frontend);
registrar_.workers[GROUP_DB] = new DatabaseModelWorker();
registrar_.workers[GROUP_UI] = new UIModelWorker();
registrar_.workers[GROUP_PASSIVE] = new ModelSafeWorker();
if (CommandLine::ForCurrentProcess()->HasSwitch(
switches::kEnableSyncTypedUrls) || types.count(syncable::TYPED_URLS)) {
registrar_.workers[GROUP_HISTORY] =
new HistoryModelWorker(
profile_->GetHistoryService(Profile::IMPLICIT_ACCESS));
}
for (syncable::ModelTypeSet::const_iterator it = types.begin();
it != types.end(); ++it) {
registrar_.routing_info[(*it)] = GROUP_PASSIVE;
}
PasswordStore* password_store =
profile_->GetPasswordStore(Profile::IMPLICIT_ACCESS);
if (password_store) {
registrar_.workers[GROUP_PASSWORD] =
new PasswordModelWorker(password_store);
} else {
LOG_IF(WARNING, types.count(syncable::PASSWORDS) > 0) << "Password store "
<< "not initialized, cannot sync passwords";
registrar_.routing_info.erase(syncable::PASSWORDS);
}
registrar_.routing_info[syncable::NIGORI] = GROUP_PASSIVE;
core_->CreateSyncNotifier(baseline_context_getter);
InitCore(Core::DoInitializeOptions(
sync_service_url,
MakeHttpBridgeFactory(baseline_context_getter),
credentials,
delete_sync_data_folder,
RestoreEncryptionBootstrapToken(),
false));
}
Commit Message: Enable HistoryModelWorker by default, now that bug 69561 is fixed.
BUG=69561
TEST=Run sync manually and run integration tests, sync should not crash.
Review URL: http://codereview.chromium.org/7016007
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@85211 0039d316-1c4b-4281-b951-d872f2087c98
CWE ID: CWE-399 | void SyncBackendHost::Initialize(
SyncFrontend* frontend,
const GURL& sync_service_url,
const syncable::ModelTypeSet& types,
net::URLRequestContextGetter* baseline_context_getter,
const SyncCredentials& credentials,
bool delete_sync_data_folder) {
if (!core_thread_.Start())
return;
frontend_ = frontend;
DCHECK(frontend);
registrar_.workers[GROUP_DB] = new DatabaseModelWorker();
registrar_.workers[GROUP_UI] = new UIModelWorker();
registrar_.workers[GROUP_PASSIVE] = new ModelSafeWorker();
registrar_.workers[GROUP_HISTORY] = new HistoryModelWorker(
profile_->GetHistoryService(Profile::IMPLICIT_ACCESS));
for (syncable::ModelTypeSet::const_iterator it = types.begin();
it != types.end(); ++it) {
registrar_.routing_info[(*it)] = GROUP_PASSIVE;
}
PasswordStore* password_store =
profile_->GetPasswordStore(Profile::IMPLICIT_ACCESS);
if (password_store) {
registrar_.workers[GROUP_PASSWORD] =
new PasswordModelWorker(password_store);
} else {
LOG_IF(WARNING, types.count(syncable::PASSWORDS) > 0) << "Password store "
<< "not initialized, cannot sync passwords";
registrar_.routing_info.erase(syncable::PASSWORDS);
}
registrar_.routing_info[syncable::NIGORI] = GROUP_PASSIVE;
core_->CreateSyncNotifier(baseline_context_getter);
InitCore(Core::DoInitializeOptions(
sync_service_url,
MakeHttpBridgeFactory(baseline_context_getter),
credentials,
delete_sync_data_folder,
RestoreEncryptionBootstrapToken(),
false));
}
| 12,107 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: std::unique_ptr<WebContents> CreateWebContents() {
std::unique_ptr<WebContents> web_contents = CreateTestWebContents();
content::WebContentsTester::For(web_contents.get())
->NavigateAndCommit(GURL("https://www.example.com"));
return web_contents;
}
Commit Message: Connect the LocalDB to TabManager.
Bug: 773382
Change-Id: Iec8fe5226ee175105d51f300f30b4865478ac099
Reviewed-on: https://chromium-review.googlesource.com/1118611
Commit-Queue: Sébastien Marchand <[email protected]>
Reviewed-by: François Doray <[email protected]>
Cr-Commit-Position: refs/heads/master@{#572871}
CWE ID: | std::unique_ptr<WebContents> CreateWebContents() {
std::unique_ptr<WebContents> web_contents = CreateTestWebContents();
ResourceCoordinatorTabHelper::CreateForWebContents(web_contents.get());
content::WebContentsTester::For(web_contents.get())
->NavigateAndCommit(GURL("https://www.example.com"));
base::RepeatingClosure run_loop_cb = base::BindRepeating(
&base::TestMockTimeTaskRunner::RunUntilIdle, task_runner_);
testing::WaitForLocalDBEntryToBeInitialized(web_contents.get(),
run_loop_cb);
testing::ExpireLocalDBObservationWindows(web_contents.get());
return web_contents;
}
| 24,619 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: int jas_matrix_cmp(jas_matrix_t *mat0, jas_matrix_t *mat1)
{
int i;
int j;
if (mat0->numrows_ != mat1->numrows_ || mat0->numcols_ !=
mat1->numcols_) {
return 1;
}
for (i = 0; i < mat0->numrows_; i++) {
for (j = 0; j < mat0->numcols_; j++) {
if (jas_matrix_get(mat0, i, j) != jas_matrix_get(mat1, i, j)) {
return 1;
}
}
}
return 0;
}
Commit Message: The generation of the configuration file jas_config.h has been completely
reworked in order to avoid pollution of the global namespace.
Some problematic types like uchar, ulong, and friends have been replaced
with names with a jas_ prefix.
An option max_samples has been added to the BMP and JPEG decoders to
restrict the maximum size of image that they can decode. This change
was made as a (possibly temporary) fix to address security concerns.
A max_samples command-line option has also been added to imginfo.
Whether an image component (for jas_image_t) is stored in memory or on
disk is now based on the component size (rather than the image size).
Some debug log message were added.
Some new integer overflow checks were added.
Some new safe integer add/multiply functions were added.
More pre-C99 cruft was removed. JasPer has numerous "hacks" to
handle pre-C99 compilers. JasPer now assumes C99 support. So, this
pre-C99 cruft is unnecessary and can be removed.
The regression jasper-doublefree-mem_close.jpg has been re-enabled.
Theoretically, it should work more predictably now.
CWE ID: CWE-190 | int jas_matrix_cmp(jas_matrix_t *mat0, jas_matrix_t *mat1)
{
jas_matind_t i;
jas_matind_t j;
if (mat0->numrows_ != mat1->numrows_ || mat0->numcols_ !=
mat1->numcols_) {
return 1;
}
for (i = 0; i < mat0->numrows_; i++) {
for (j = 0; j < mat0->numcols_; j++) {
if (jas_matrix_get(mat0, i, j) != jas_matrix_get(mat1, i, j)) {
return 1;
}
}
}
return 0;
}
| 29,494 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: Chapters::Edition::Edition()
{
}
Commit Message: libwebm: Pull from upstream
Rolling mkvparser from upstream. Primarily for fixing a bug on parsing
failures with certain Opus WebM files.
Upstream commit hash of this pull: 574045edd4ecbeb802ee3f1d214b5510269852ae
The diff is so huge because there were some style clean ups upstream.
But it was ensured that there were no breaking changes when the style
clean ups was done upstream.
Change-Id: Ib6e907175484b4b0ae1b55ab39522ea3188ad039
CWE ID: CWE-119 | Chapters::Edition::Edition()
const int size = (m_atoms_size == 0) ? 1 : 2 * m_atoms_size;
Atom* const atoms = new (std::nothrow) Atom[size];
if (atoms == NULL)
return false;
for (int idx = 0; idx < m_atoms_count; ++idx) {
m_atoms[idx].ShallowCopy(atoms[idx]);
}
delete[] m_atoms;
m_atoms = atoms;
m_atoms_size = size;
return true;
}
| 10,432 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: RuntimeCustomBindings::RuntimeCustomBindings(ScriptContext* context)
: ObjectBackedNativeHandler(context) {
RouteFunction(
"GetManifest",
base::Bind(&RuntimeCustomBindings::GetManifest, base::Unretained(this)));
RouteFunction("OpenChannelToExtension",
base::Bind(&RuntimeCustomBindings::OpenChannelToExtension,
base::Unretained(this)));
RouteFunction("OpenChannelToNativeApp",
base::Bind(&RuntimeCustomBindings::OpenChannelToNativeApp,
base::Unretained(this)));
RouteFunction("GetExtensionViews",
base::Bind(&RuntimeCustomBindings::GetExtensionViews,
base::Unretained(this)));
}
Commit Message: [Extensions] Expand bindings access checks
BUG=601149
BUG=601073
Review URL: https://codereview.chromium.org/1866103002
Cr-Commit-Position: refs/heads/master@{#387710}
CWE ID: CWE-284 | RuntimeCustomBindings::RuntimeCustomBindings(ScriptContext* context)
: ObjectBackedNativeHandler(context) {
RouteFunction(
"GetManifest",
base::Bind(&RuntimeCustomBindings::GetManifest, base::Unretained(this)));
RouteFunction("OpenChannelToExtension", "runtime.connect",
base::Bind(&RuntimeCustomBindings::OpenChannelToExtension,
base::Unretained(this)));
RouteFunction("OpenChannelToNativeApp", "runtime.connectNative",
base::Bind(&RuntimeCustomBindings::OpenChannelToNativeApp,
base::Unretained(this)));
RouteFunction("GetExtensionViews",
base::Bind(&RuntimeCustomBindings::GetExtensionViews,
base::Unretained(this)));
}
| 5,208 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: int sr_do_ioctl(Scsi_CD *cd, struct packet_command *cgc)
{
struct scsi_device *SDev;
struct scsi_sense_hdr sshdr;
int result, err = 0, retries = 0;
SDev = cd->device;
retry:
if (!scsi_block_when_processing_errors(SDev)) {
err = -ENODEV;
goto out;
}
result = scsi_execute(SDev, cgc->cmd, cgc->data_direction,
cgc->buffer, cgc->buflen,
(unsigned char *)cgc->sense, &sshdr,
cgc->timeout, IOCTL_RETRIES, 0, 0, NULL);
/* Minimal error checking. Ignore cases we know about, and report the rest. */
if (driver_byte(result) != 0) {
switch (sshdr.sense_key) {
case UNIT_ATTENTION:
SDev->changed = 1;
if (!cgc->quiet)
sr_printk(KERN_INFO, cd,
"disc change detected.\n");
if (retries++ < 10)
goto retry;
err = -ENOMEDIUM;
break;
case NOT_READY: /* This happens if there is no disc in drive */
if (sshdr.asc == 0x04 &&
sshdr.ascq == 0x01) {
/* sense: Logical unit is in process of becoming ready */
if (!cgc->quiet)
sr_printk(KERN_INFO, cd,
"CDROM not ready yet.\n");
if (retries++ < 10) {
/* sleep 2 sec and try again */
ssleep(2);
goto retry;
} else {
/* 20 secs are enough? */
err = -ENOMEDIUM;
break;
}
}
if (!cgc->quiet)
sr_printk(KERN_INFO, cd,
"CDROM not ready. Make sure there "
"is a disc in the drive.\n");
err = -ENOMEDIUM;
break;
case ILLEGAL_REQUEST:
err = -EIO;
if (sshdr.asc == 0x20 &&
sshdr.ascq == 0x00)
/* sense: Invalid command operation code */
err = -EDRIVE_CANT_DO_THIS;
break;
default:
err = -EIO;
}
}
/* Wake up a process waiting for device */
out:
cgc->stat = err;
return err;
}
Commit Message: sr: pass down correctly sized SCSI sense buffer
We're casting the CDROM layer request_sense to the SCSI sense
buffer, but the former is 64 bytes and the latter is 96 bytes.
As we generally allocate these on the stack, we end up blowing
up the stack.
Fix this by wrapping the scsi_execute() call with a properly
sized sense buffer, and copying back the bits for the CDROM
layer.
Cc: [email protected]
Reported-by: Piotr Gabriel Kosinski <[email protected]>
Reported-by: Daniel Shapira <[email protected]>
Tested-by: Kees Cook <[email protected]>
Fixes: 82ed4db499b8 ("block: split scsi_request out of struct request")
Signed-off-by: Jens Axboe <[email protected]>
CWE ID: CWE-119 | int sr_do_ioctl(Scsi_CD *cd, struct packet_command *cgc)
{
struct scsi_device *SDev;
struct scsi_sense_hdr sshdr;
int result, err = 0, retries = 0;
unsigned char sense_buffer[SCSI_SENSE_BUFFERSIZE], *senseptr = NULL;
SDev = cd->device;
if (cgc->sense)
senseptr = sense_buffer;
retry:
if (!scsi_block_when_processing_errors(SDev)) {
err = -ENODEV;
goto out;
}
result = scsi_execute(SDev, cgc->cmd, cgc->data_direction,
cgc->buffer, cgc->buflen, senseptr, &sshdr,
cgc->timeout, IOCTL_RETRIES, 0, 0, NULL);
if (cgc->sense)
memcpy(cgc->sense, sense_buffer, sizeof(*cgc->sense));
/* Minimal error checking. Ignore cases we know about, and report the rest. */
if (driver_byte(result) != 0) {
switch (sshdr.sense_key) {
case UNIT_ATTENTION:
SDev->changed = 1;
if (!cgc->quiet)
sr_printk(KERN_INFO, cd,
"disc change detected.\n");
if (retries++ < 10)
goto retry;
err = -ENOMEDIUM;
break;
case NOT_READY: /* This happens if there is no disc in drive */
if (sshdr.asc == 0x04 &&
sshdr.ascq == 0x01) {
/* sense: Logical unit is in process of becoming ready */
if (!cgc->quiet)
sr_printk(KERN_INFO, cd,
"CDROM not ready yet.\n");
if (retries++ < 10) {
/* sleep 2 sec and try again */
ssleep(2);
goto retry;
} else {
/* 20 secs are enough? */
err = -ENOMEDIUM;
break;
}
}
if (!cgc->quiet)
sr_printk(KERN_INFO, cd,
"CDROM not ready. Make sure there "
"is a disc in the drive.\n");
err = -ENOMEDIUM;
break;
case ILLEGAL_REQUEST:
err = -EIO;
if (sshdr.asc == 0x20 &&
sshdr.ascq == 0x00)
/* sense: Invalid command operation code */
err = -EDRIVE_CANT_DO_THIS;
break;
default:
err = -EIO;
}
}
/* Wake up a process waiting for device */
out:
cgc->stat = err;
return err;
}
| 21,539 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: make_transform_images(png_store *ps)
{
png_byte colour_type = 0;
png_byte bit_depth = 0;
unsigned int palette_number = 0;
/* This is in case of errors. */
safecat(ps->test, sizeof ps->test, 0, "make standard images");
/* Use next_format to enumerate all the combinations we test, including
* generating multiple low bit depth palette images.
*/
while (next_format(&colour_type, &bit_depth, &palette_number, 0))
{
int interlace_type;
for (interlace_type = PNG_INTERLACE_NONE;
interlace_type < INTERLACE_LAST; ++interlace_type)
{
char name[FILE_NAME_SIZE];
standard_name(name, sizeof name, 0, colour_type, bit_depth,
palette_number, interlace_type, 0, 0, 0);
make_transform_image(ps, colour_type, bit_depth, palette_number,
interlace_type, name);
}
}
}
Commit Message: DO NOT MERGE Update libpng to 1.6.20
BUG:23265085
Change-Id: I85199805636d771f3597b691b63bc0bf46084833
(cherry picked from commit bbe98b40cda082024b669fa508931042eed18f82)
CWE ID: | make_transform_images(png_store *ps)
make_transform_images(png_modifier *pm)
{
png_byte colour_type = 0;
png_byte bit_depth = 0;
unsigned int palette_number = 0;
/* This is in case of errors. */
safecat(pm->this.test, sizeof pm->this.test, 0, "make standard images");
/* Use next_format to enumerate all the combinations we test, including
* generating multiple low bit depth palette images. Non-A images (palette
* and direct) are created with and without tRNS chunks.
*/
while (next_format(&colour_type, &bit_depth, &palette_number, 1, 1))
{
int interlace_type;
for (interlace_type = PNG_INTERLACE_NONE;
interlace_type < INTERLACE_LAST; ++interlace_type)
{
char name[FILE_NAME_SIZE];
standard_name(name, sizeof name, 0, colour_type, bit_depth,
palette_number, interlace_type, 0, 0, do_own_interlace);
make_transform_image(&pm->this, colour_type, bit_depth, palette_number,
interlace_type, name);
}
}
}
| 19,849 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: static ssize_t read_mem(struct file *file, char __user *buf,
size_t count, loff_t *ppos)
{
phys_addr_t p = *ppos;
ssize_t read, sz;
void *ptr;
if (p != *ppos)
return 0;
if (!valid_phys_addr_range(p, count))
return -EFAULT;
read = 0;
#ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
/* we don't have page 0 mapped on sparc and m68k.. */
if (p < PAGE_SIZE) {
sz = size_inside_page(p, count);
if (sz > 0) {
if (clear_user(buf, sz))
return -EFAULT;
buf += sz;
p += sz;
count -= sz;
read += sz;
}
}
#endif
while (count > 0) {
unsigned long remaining;
sz = size_inside_page(p, count);
if (!range_is_allowed(p >> PAGE_SHIFT, count))
return -EPERM;
/*
* On ia64 if a page has been mapped somewhere as uncached, then
* it must also be accessed uncached by the kernel or data
* corruption may occur.
*/
ptr = xlate_dev_mem_ptr(p);
if (!ptr)
return -EFAULT;
remaining = copy_to_user(buf, ptr, sz);
unxlate_dev_mem_ptr(p, ptr);
if (remaining)
return -EFAULT;
buf += sz;
p += sz;
count -= sz;
read += sz;
}
*ppos += read;
return read;
}
Commit Message: mm: Tighten x86 /dev/mem with zeroing reads
Under CONFIG_STRICT_DEVMEM, reading System RAM through /dev/mem is
disallowed. However, on x86, the first 1MB was always allowed for BIOS
and similar things, regardless of it actually being System RAM. It was
possible for heap to end up getting allocated in low 1MB RAM, and then
read by things like x86info or dd, which would trip hardened usercopy:
usercopy: kernel memory exposure attempt detected from ffff880000090000 (dma-kmalloc-256) (4096 bytes)
This changes the x86 exception for the low 1MB by reading back zeros for
System RAM areas instead of blindly allowing them. More work is needed to
extend this to mmap, but currently mmap doesn't go through usercopy, so
hardened usercopy won't Oops the kernel.
Reported-by: Tommi Rantala <[email protected]>
Tested-by: Tommi Rantala <[email protected]>
Signed-off-by: Kees Cook <[email protected]>
CWE ID: CWE-732 | static ssize_t read_mem(struct file *file, char __user *buf,
size_t count, loff_t *ppos)
{
phys_addr_t p = *ppos;
ssize_t read, sz;
void *ptr;
if (p != *ppos)
return 0;
if (!valid_phys_addr_range(p, count))
return -EFAULT;
read = 0;
#ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
/* we don't have page 0 mapped on sparc and m68k.. */
if (p < PAGE_SIZE) {
sz = size_inside_page(p, count);
if (sz > 0) {
if (clear_user(buf, sz))
return -EFAULT;
buf += sz;
p += sz;
count -= sz;
read += sz;
}
}
#endif
while (count > 0) {
unsigned long remaining;
int allowed;
sz = size_inside_page(p, count);
allowed = page_is_allowed(p >> PAGE_SHIFT);
if (!allowed)
return -EPERM;
if (allowed == 2) {
/* Show zeros for restricted memory. */
remaining = clear_user(buf, sz);
} else {
/*
* On ia64 if a page has been mapped somewhere as
* uncached, then it must also be accessed uncached
* by the kernel or data corruption may occur.
*/
ptr = xlate_dev_mem_ptr(p);
if (!ptr)
return -EFAULT;
remaining = copy_to_user(buf, ptr, sz);
unxlate_dev_mem_ptr(p, ptr);
}
if (remaining)
return -EFAULT;
buf += sz;
p += sz;
count -= sz;
read += sz;
}
*ppos += read;
return read;
}
| 1,885 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: long kvm_arch_vcpu_ioctl(struct file *filp,
unsigned int ioctl, unsigned long arg)
{
struct kvm_vcpu *vcpu = filp->private_data;
void __user *argp = (void __user *)arg;
switch (ioctl) {
case KVM_ARM_VCPU_INIT: {
struct kvm_vcpu_init init;
if (copy_from_user(&init, argp, sizeof(init)))
return -EFAULT;
return kvm_vcpu_set_target(vcpu, &init);
}
case KVM_SET_ONE_REG:
case KVM_GET_ONE_REG: {
struct kvm_one_reg reg;
if (copy_from_user(®, argp, sizeof(reg)))
return -EFAULT;
if (ioctl == KVM_SET_ONE_REG)
return kvm_arm_set_reg(vcpu, ®);
else
return kvm_arm_get_reg(vcpu, ®);
}
case KVM_GET_REG_LIST: {
struct kvm_reg_list __user *user_list = argp;
struct kvm_reg_list reg_list;
unsigned n;
if (copy_from_user(®_list, user_list, sizeof(reg_list)))
return -EFAULT;
n = reg_list.n;
reg_list.n = kvm_arm_num_regs(vcpu);
if (copy_to_user(user_list, ®_list, sizeof(reg_list)))
return -EFAULT;
if (n < reg_list.n)
return -E2BIG;
return kvm_arm_copy_reg_indices(vcpu, user_list->reg);
}
default:
return -EINVAL;
}
}
Commit Message: ARM: KVM: prevent NULL pointer dereferences with KVM VCPU ioctl
Some ARM KVM VCPU ioctls require the vCPU to be properly initialized
with the KVM_ARM_VCPU_INIT ioctl before being used with further
requests. KVM_RUN checks whether this initialization has been
done, but other ioctls do not.
Namely KVM_GET_REG_LIST will dereference an array with index -1
without initialization and thus leads to a kernel oops.
Fix this by adding checks before executing the ioctl handlers.
[ Removed superflous comment from static function - Christoffer ]
Changes from v1:
* moved check into a static function with a meaningful name
Signed-off-by: Andre Przywara <[email protected]>
Signed-off-by: Christoffer Dall <[email protected]>
CWE ID: CWE-399 | long kvm_arch_vcpu_ioctl(struct file *filp,
unsigned int ioctl, unsigned long arg)
{
struct kvm_vcpu *vcpu = filp->private_data;
void __user *argp = (void __user *)arg;
switch (ioctl) {
case KVM_ARM_VCPU_INIT: {
struct kvm_vcpu_init init;
if (copy_from_user(&init, argp, sizeof(init)))
return -EFAULT;
return kvm_vcpu_set_target(vcpu, &init);
}
case KVM_SET_ONE_REG:
case KVM_GET_ONE_REG: {
struct kvm_one_reg reg;
if (unlikely(!kvm_vcpu_initialized(vcpu)))
return -ENOEXEC;
if (copy_from_user(®, argp, sizeof(reg)))
return -EFAULT;
if (ioctl == KVM_SET_ONE_REG)
return kvm_arm_set_reg(vcpu, ®);
else
return kvm_arm_get_reg(vcpu, ®);
}
case KVM_GET_REG_LIST: {
struct kvm_reg_list __user *user_list = argp;
struct kvm_reg_list reg_list;
unsigned n;
if (unlikely(!kvm_vcpu_initialized(vcpu)))
return -ENOEXEC;
if (copy_from_user(®_list, user_list, sizeof(reg_list)))
return -EFAULT;
n = reg_list.n;
reg_list.n = kvm_arm_num_regs(vcpu);
if (copy_to_user(user_list, ®_list, sizeof(reg_list)))
return -EFAULT;
if (n < reg_list.n)
return -E2BIG;
return kvm_arm_copy_reg_indices(vcpu, user_list->reg);
}
default:
return -EINVAL;
}
}
| 15,500 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: static void serial_update_parameters(SerialState *s)
{
int speed, parity, data_bits, stop_bits, frame_size;
QEMUSerialSetParams ssp;
if (s->divider == 0)
return;
/* Start bit. */
frame_size = 1;
/* Parity bit. */
frame_size++;
if (s->lcr & 0x10)
parity = 'E';
else
parity = 'O';
} else {
parity = 'N';
}
Commit Message:
CWE ID: CWE-369 | static void serial_update_parameters(SerialState *s)
{
int speed, parity, data_bits, stop_bits, frame_size;
QEMUSerialSetParams ssp;
if (s->divider == 0 || s->divider > s->baudbase) {
return;
}
/* Start bit. */
frame_size = 1;
/* Parity bit. */
frame_size++;
if (s->lcr & 0x10)
parity = 'E';
else
parity = 'O';
} else {
parity = 'N';
}
| 24,970 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: status_t SampleIterator::seekTo(uint32_t sampleIndex) {
ALOGV("seekTo(%d)", sampleIndex);
if (sampleIndex >= mTable->mNumSampleSizes) {
return ERROR_END_OF_STREAM;
}
if (mTable->mSampleToChunkOffset < 0
|| mTable->mChunkOffsetOffset < 0
|| mTable->mSampleSizeOffset < 0
|| mTable->mTimeToSampleCount == 0) {
return ERROR_MALFORMED;
}
if (mInitialized && mCurrentSampleIndex == sampleIndex) {
return OK;
}
if (!mInitialized || sampleIndex < mFirstChunkSampleIndex) {
reset();
}
if (sampleIndex >= mStopChunkSampleIndex) {
status_t err;
if ((err = findChunkRange(sampleIndex)) != OK) {
ALOGE("findChunkRange failed");
return err;
}
}
CHECK(sampleIndex < mStopChunkSampleIndex);
if (mSamplesPerChunk == 0) {
ALOGE("b/22802344");
return ERROR_MALFORMED;
}
uint32_t chunk =
(sampleIndex - mFirstChunkSampleIndex) / mSamplesPerChunk
+ mFirstChunk;
if (!mInitialized || chunk != mCurrentChunkIndex) {
mCurrentChunkIndex = chunk;
status_t err;
if ((err = getChunkOffset(chunk, &mCurrentChunkOffset)) != OK) {
ALOGE("getChunkOffset return error");
return err;
}
mCurrentChunkSampleSizes.clear();
uint32_t firstChunkSampleIndex =
mFirstChunkSampleIndex
+ mSamplesPerChunk * (mCurrentChunkIndex - mFirstChunk);
for (uint32_t i = 0; i < mSamplesPerChunk; ++i) {
size_t sampleSize;
if ((err = getSampleSizeDirect(
firstChunkSampleIndex + i, &sampleSize)) != OK) {
ALOGE("getSampleSizeDirect return error");
return err;
}
mCurrentChunkSampleSizes.push(sampleSize);
}
}
uint32_t chunkRelativeSampleIndex =
(sampleIndex - mFirstChunkSampleIndex) % mSamplesPerChunk;
mCurrentSampleOffset = mCurrentChunkOffset;
for (uint32_t i = 0; i < chunkRelativeSampleIndex; ++i) {
mCurrentSampleOffset += mCurrentChunkSampleSizes[i];
}
mCurrentSampleSize = mCurrentChunkSampleSizes[chunkRelativeSampleIndex];
if (sampleIndex < mTTSSampleIndex) {
mTimeToSampleIndex = 0;
mTTSSampleIndex = 0;
mTTSSampleTime = 0;
mTTSCount = 0;
mTTSDuration = 0;
}
status_t err;
if ((err = findSampleTimeAndDuration(
sampleIndex, &mCurrentSampleTime, &mCurrentSampleDuration)) != OK) {
ALOGE("findSampleTime return error");
return err;
}
mCurrentSampleIndex = sampleIndex;
mInitialized = true;
return OK;
}
Commit Message: SampleIterator: clear members on seekTo error
Bug: 31091777
Change-Id: Iddf99d0011961d0fd3d755e57db4365b6a6a1193
(cherry picked from commit 03237ce0f9584c98ccda76c2474a4ae84c763f5b)
CWE ID: CWE-200 | status_t SampleIterator::seekTo(uint32_t sampleIndex) {
ALOGV("seekTo(%d)", sampleIndex);
if (sampleIndex >= mTable->mNumSampleSizes) {
return ERROR_END_OF_STREAM;
}
if (mTable->mSampleToChunkOffset < 0
|| mTable->mChunkOffsetOffset < 0
|| mTable->mSampleSizeOffset < 0
|| mTable->mTimeToSampleCount == 0) {
return ERROR_MALFORMED;
}
if (mInitialized && mCurrentSampleIndex == sampleIndex) {
return OK;
}
if (!mInitialized || sampleIndex < mFirstChunkSampleIndex) {
reset();
}
if (sampleIndex >= mStopChunkSampleIndex) {
status_t err;
if ((err = findChunkRange(sampleIndex)) != OK) {
ALOGE("findChunkRange failed");
return err;
}
}
CHECK(sampleIndex < mStopChunkSampleIndex);
if (mSamplesPerChunk == 0) {
ALOGE("b/22802344");
return ERROR_MALFORMED;
}
uint32_t chunk =
(sampleIndex - mFirstChunkSampleIndex) / mSamplesPerChunk
+ mFirstChunk;
if (!mInitialized || chunk != mCurrentChunkIndex) {
status_t err;
if ((err = getChunkOffset(chunk, &mCurrentChunkOffset)) != OK) {
ALOGE("getChunkOffset return error");
return err;
}
mCurrentChunkSampleSizes.clear();
uint32_t firstChunkSampleIndex =
mFirstChunkSampleIndex
+ mSamplesPerChunk * (chunk - mFirstChunk);
for (uint32_t i = 0; i < mSamplesPerChunk; ++i) {
size_t sampleSize;
if ((err = getSampleSizeDirect(
firstChunkSampleIndex + i, &sampleSize)) != OK) {
ALOGE("getSampleSizeDirect return error");
mCurrentChunkSampleSizes.clear();
return err;
}
mCurrentChunkSampleSizes.push(sampleSize);
}
mCurrentChunkIndex = chunk;
}
uint32_t chunkRelativeSampleIndex =
(sampleIndex - mFirstChunkSampleIndex) % mSamplesPerChunk;
mCurrentSampleOffset = mCurrentChunkOffset;
for (uint32_t i = 0; i < chunkRelativeSampleIndex; ++i) {
mCurrentSampleOffset += mCurrentChunkSampleSizes[i];
}
mCurrentSampleSize = mCurrentChunkSampleSizes[chunkRelativeSampleIndex];
if (sampleIndex < mTTSSampleIndex) {
mTimeToSampleIndex = 0;
mTTSSampleIndex = 0;
mTTSSampleTime = 0;
mTTSCount = 0;
mTTSDuration = 0;
}
status_t err;
if ((err = findSampleTimeAndDuration(
sampleIndex, &mCurrentSampleTime, &mCurrentSampleDuration)) != OK) {
ALOGE("findSampleTime return error");
return err;
}
mCurrentSampleIndex = sampleIndex;
mInitialized = true;
return OK;
}
| 95 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: OMX_ERRORTYPE SoftOpus::internalGetParameter(
OMX_INDEXTYPE index, OMX_PTR params) {
switch ((int)index) {
case OMX_IndexParamAudioAndroidOpus:
{
OMX_AUDIO_PARAM_ANDROID_OPUSTYPE *opusParams =
(OMX_AUDIO_PARAM_ANDROID_OPUSTYPE *)params;
if (opusParams->nPortIndex != 0) {
return OMX_ErrorUndefined;
}
opusParams->nAudioBandWidth = 0;
opusParams->nSampleRate = kRate;
opusParams->nBitRate = 0;
if (!isConfigured()) {
opusParams->nChannels = 1;
} else {
opusParams->nChannels = mHeader->channels;
}
return OMX_ErrorNone;
}
case OMX_IndexParamAudioPcm:
{
OMX_AUDIO_PARAM_PCMMODETYPE *pcmParams =
(OMX_AUDIO_PARAM_PCMMODETYPE *)params;
if (pcmParams->nPortIndex != 1) {
return OMX_ErrorUndefined;
}
pcmParams->eNumData = OMX_NumericalDataSigned;
pcmParams->eEndian = OMX_EndianBig;
pcmParams->bInterleaved = OMX_TRUE;
pcmParams->nBitPerSample = 16;
pcmParams->ePCMMode = OMX_AUDIO_PCMModeLinear;
pcmParams->eChannelMapping[0] = OMX_AUDIO_ChannelLF;
pcmParams->eChannelMapping[1] = OMX_AUDIO_ChannelRF;
pcmParams->nSamplingRate = kRate;
if (!isConfigured()) {
pcmParams->nChannels = 1;
} else {
pcmParams->nChannels = mHeader->channels;
}
return OMX_ErrorNone;
}
default:
return SimpleSoftOMXComponent::internalGetParameter(index, params);
}
}
Commit Message: DO NOT MERGE Verify OMX buffer sizes prior to access
Bug: 27207275
Change-Id: I4412825d1ee233d993af0a67708bea54304ff62d
CWE ID: CWE-119 | OMX_ERRORTYPE SoftOpus::internalGetParameter(
OMX_INDEXTYPE index, OMX_PTR params) {
switch ((int)index) {
case OMX_IndexParamAudioAndroidOpus:
{
OMX_AUDIO_PARAM_ANDROID_OPUSTYPE *opusParams =
(OMX_AUDIO_PARAM_ANDROID_OPUSTYPE *)params;
if (!isValidOMXParam(opusParams)) {
return OMX_ErrorBadParameter;
}
if (opusParams->nPortIndex != 0) {
return OMX_ErrorUndefined;
}
opusParams->nAudioBandWidth = 0;
opusParams->nSampleRate = kRate;
opusParams->nBitRate = 0;
if (!isConfigured()) {
opusParams->nChannels = 1;
} else {
opusParams->nChannels = mHeader->channels;
}
return OMX_ErrorNone;
}
case OMX_IndexParamAudioPcm:
{
OMX_AUDIO_PARAM_PCMMODETYPE *pcmParams =
(OMX_AUDIO_PARAM_PCMMODETYPE *)params;
if (!isValidOMXParam(pcmParams)) {
return OMX_ErrorBadParameter;
}
if (pcmParams->nPortIndex != 1) {
return OMX_ErrorUndefined;
}
pcmParams->eNumData = OMX_NumericalDataSigned;
pcmParams->eEndian = OMX_EndianBig;
pcmParams->bInterleaved = OMX_TRUE;
pcmParams->nBitPerSample = 16;
pcmParams->ePCMMode = OMX_AUDIO_PCMModeLinear;
pcmParams->eChannelMapping[0] = OMX_AUDIO_ChannelLF;
pcmParams->eChannelMapping[1] = OMX_AUDIO_ChannelRF;
pcmParams->nSamplingRate = kRate;
if (!isConfigured()) {
pcmParams->nChannels = 1;
} else {
pcmParams->nChannels = mHeader->channels;
}
return OMX_ErrorNone;
}
default:
return SimpleSoftOMXComponent::internalGetParameter(index, params);
}
}
| 14,872 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: ResourceDispatcherHostImpl::ResourceDispatcherHostImpl()
: download_file_manager_(new DownloadFileManager(NULL)),
save_file_manager_(new SaveFileManager()),
request_id_(-1),
ALLOW_THIS_IN_INITIALIZER_LIST(weak_factory_(this)),
ALLOW_THIS_IN_INITIALIZER_LIST(ssl_delegate_weak_factory_(this)),
is_shutdown_(false),
max_outstanding_requests_cost_per_process_(
kMaxOutstandingRequestsCostPerProcess),
filter_(NULL),
delegate_(NULL),
allow_cross_origin_auth_prompt_(false) {
DCHECK(BrowserThread::CurrentlyOn(BrowserThread::UI));
DCHECK(!g_resource_dispatcher_host);
g_resource_dispatcher_host = this;
GetContentClient()->browser()->ResourceDispatcherHostCreated();
ANNOTATE_BENIGN_RACE(
&last_user_gesture_time_,
"We don't care about the precise value, see http://crbug.com/92889");
BrowserThread::PostTask(
BrowserThread::IO, FROM_HERE,
base::Bind(&appcache::AppCacheInterceptor::EnsureRegistered));
update_load_states_timer_.reset(
new base::RepeatingTimer<ResourceDispatcherHostImpl>());
}
Commit Message: Inherits SupportsWeakPtr<T> instead of having WeakPtrFactory<T>
This change refines r137676.
BUG=122654
TEST=browser_test
Review URL: https://chromiumcodereview.appspot.com/10332233
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@139771 0039d316-1c4b-4281-b951-d872f2087c98
CWE ID: CWE-119 | ResourceDispatcherHostImpl::ResourceDispatcherHostImpl()
: download_file_manager_(new DownloadFileManager(NULL)),
save_file_manager_(new SaveFileManager()),
request_id_(-1),
is_shutdown_(false),
max_outstanding_requests_cost_per_process_(
kMaxOutstandingRequestsCostPerProcess),
filter_(NULL),
delegate_(NULL),
allow_cross_origin_auth_prompt_(false) {
DCHECK(BrowserThread::CurrentlyOn(BrowserThread::UI));
DCHECK(!g_resource_dispatcher_host);
g_resource_dispatcher_host = this;
GetContentClient()->browser()->ResourceDispatcherHostCreated();
ANNOTATE_BENIGN_RACE(
&last_user_gesture_time_,
"We don't care about the precise value, see http://crbug.com/92889");
BrowserThread::PostTask(
BrowserThread::IO, FROM_HERE,
base::Bind(&appcache::AppCacheInterceptor::EnsureRegistered));
update_load_states_timer_.reset(
new base::RepeatingTimer<ResourceDispatcherHostImpl>());
}
| 96 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: static int snd_timer_user_tselect(struct file *file,
struct snd_timer_select __user *_tselect)
{
struct snd_timer_user *tu;
struct snd_timer_select tselect;
char str[32];
int err = 0;
tu = file->private_data;
mutex_lock(&tu->tread_sem);
if (tu->timeri) {
snd_timer_close(tu->timeri);
tu->timeri = NULL;
}
if (copy_from_user(&tselect, _tselect, sizeof(tselect))) {
err = -EFAULT;
goto __err;
}
sprintf(str, "application %i", current->pid);
if (tselect.id.dev_class != SNDRV_TIMER_CLASS_SLAVE)
tselect.id.dev_sclass = SNDRV_TIMER_SCLASS_APPLICATION;
err = snd_timer_open(&tu->timeri, str, &tselect.id, current->pid);
if (err < 0)
goto __err;
kfree(tu->queue);
tu->queue = NULL;
kfree(tu->tqueue);
tu->tqueue = NULL;
if (tu->tread) {
tu->tqueue = kmalloc(tu->queue_size * sizeof(struct snd_timer_tread),
GFP_KERNEL);
if (tu->tqueue == NULL)
err = -ENOMEM;
} else {
tu->queue = kmalloc(tu->queue_size * sizeof(struct snd_timer_read),
GFP_KERNEL);
if (tu->queue == NULL)
err = -ENOMEM;
}
if (err < 0) {
snd_timer_close(tu->timeri);
tu->timeri = NULL;
} else {
tu->timeri->flags |= SNDRV_TIMER_IFLG_FAST;
tu->timeri->callback = tu->tread
? snd_timer_user_tinterrupt : snd_timer_user_interrupt;
tu->timeri->ccallback = snd_timer_user_ccallback;
tu->timeri->callback_data = (void *)tu;
}
__err:
mutex_unlock(&tu->tread_sem);
return err;
}
Commit Message: ALSA: timer: Fix race among timer ioctls
ALSA timer ioctls have an open race and this may lead to a
use-after-free of timer instance object. A simplistic fix is to make
each ioctl exclusive. We have already tread_sem for controlling the
tread, and extend this as a global mutex to be applied to each ioctl.
The downside is, of course, the worse concurrency. But these ioctls
aren't to be parallel accessible, in anyway, so it should be fine to
serialize there.
Reported-by: Dmitry Vyukov <[email protected]>
Tested-by: Dmitry Vyukov <[email protected]>
Cc: <[email protected]>
Signed-off-by: Takashi Iwai <[email protected]>
CWE ID: CWE-362 | static int snd_timer_user_tselect(struct file *file,
struct snd_timer_select __user *_tselect)
{
struct snd_timer_user *tu;
struct snd_timer_select tselect;
char str[32];
int err = 0;
tu = file->private_data;
if (tu->timeri) {
snd_timer_close(tu->timeri);
tu->timeri = NULL;
}
if (copy_from_user(&tselect, _tselect, sizeof(tselect))) {
err = -EFAULT;
goto __err;
}
sprintf(str, "application %i", current->pid);
if (tselect.id.dev_class != SNDRV_TIMER_CLASS_SLAVE)
tselect.id.dev_sclass = SNDRV_TIMER_SCLASS_APPLICATION;
err = snd_timer_open(&tu->timeri, str, &tselect.id, current->pid);
if (err < 0)
goto __err;
kfree(tu->queue);
tu->queue = NULL;
kfree(tu->tqueue);
tu->tqueue = NULL;
if (tu->tread) {
tu->tqueue = kmalloc(tu->queue_size * sizeof(struct snd_timer_tread),
GFP_KERNEL);
if (tu->tqueue == NULL)
err = -ENOMEM;
} else {
tu->queue = kmalloc(tu->queue_size * sizeof(struct snd_timer_read),
GFP_KERNEL);
if (tu->queue == NULL)
err = -ENOMEM;
}
if (err < 0) {
snd_timer_close(tu->timeri);
tu->timeri = NULL;
} else {
tu->timeri->flags |= SNDRV_TIMER_IFLG_FAST;
tu->timeri->callback = tu->tread
? snd_timer_user_tinterrupt : snd_timer_user_interrupt;
tu->timeri->ccallback = snd_timer_user_ccallback;
tu->timeri->callback_data = (void *)tu;
}
__err:
return err;
}
| 19,585 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: long long AudioTrack::GetChannels() const
{
return m_channels;
}
Commit Message: libwebm: Pull from upstream
Rolling mkvparser from upstream. Primarily for fixing a bug on parsing
failures with certain Opus WebM files.
Upstream commit hash of this pull: 574045edd4ecbeb802ee3f1d214b5510269852ae
The diff is so huge because there were some style clean ups upstream.
But it was ensured that there were no breaking changes when the style
clean ups was done upstream.
Change-Id: Ib6e907175484b4b0ae1b55ab39522ea3188ad039
CWE ID: CWE-119 | long long AudioTrack::GetChannels() const
Track** i = m_trackEntries;
Track** const j = m_trackEntriesEnd;
while (i != j) {
Track* const pTrack = *i++;
if (pTrack == NULL)
continue;
if (tn == pTrack->GetNumber())
return pTrack;
}
return NULL; // not found
}
| 9,740 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: PHP_FUNCTION(locale_get_display_name)
{
get_icu_disp_value_src_php( DISP_NAME , INTERNAL_FUNCTION_PARAM_PASSTHRU );
}
Commit Message: Fix bug #72241: get_icu_value_internal out-of-bounds read
CWE ID: CWE-125 | PHP_FUNCTION(locale_get_display_name)
PHP_FUNCTION(locale_get_display_name)
{
get_icu_disp_value_src_php( DISP_NAME , INTERNAL_FUNCTION_PARAM_PASSTHRU );
}
| 7,725 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: gamma_info_imp(gamma_display *dp, png_structp pp, png_infop pi)
{
/* Reuse the standard stuff as appropriate. */
standard_info_part1(&dp->this, pp, pi);
/* If requested strip 16 to 8 bits - this is handled automagically below
* because the output bit depth is read from the library. Note that there
* are interactions with sBIT but, internally, libpng makes sbit at most
* PNG_MAX_GAMMA_8 when doing the following.
*/
if (dp->scale16)
# ifdef PNG_READ_SCALE_16_TO_8_SUPPORTED
png_set_scale_16(pp);
# else
/* The following works both in 1.5.4 and earlier versions: */
# ifdef PNG_READ_16_TO_8_SUPPORTED
png_set_strip_16(pp);
# else
png_error(pp, "scale16 (16 to 8 bit conversion) not supported");
# endif
# endif
if (dp->expand16)
# ifdef PNG_READ_EXPAND_16_SUPPORTED
png_set_expand_16(pp);
# else
png_error(pp, "expand16 (8 to 16 bit conversion) not supported");
# endif
if (dp->do_background >= ALPHA_MODE_OFFSET)
{
# ifdef PNG_READ_ALPHA_MODE_SUPPORTED
{
/* This tests the alpha mode handling, if supported. */
int mode = dp->do_background - ALPHA_MODE_OFFSET;
/* The gamma value is the output gamma, and is in the standard,
* non-inverted, represenation. It provides a default for the PNG file
* gamma, but since the file has a gAMA chunk this does not matter.
*/
PNG_CONST double sg = dp->screen_gamma;
# ifndef PNG_FLOATING_POINT_SUPPORTED
PNG_CONST png_fixed_point g = fix(sg);
# endif
# ifdef PNG_FLOATING_POINT_SUPPORTED
png_set_alpha_mode(pp, mode, sg);
# else
png_set_alpha_mode_fixed(pp, mode, g);
# endif
/* However, for the standard Porter-Duff algorithm the output defaults
* to be linear, so if the test requires non-linear output it must be
* corrected here.
*/
if (mode == PNG_ALPHA_STANDARD && sg != 1)
{
# ifdef PNG_FLOATING_POINT_SUPPORTED
png_set_gamma(pp, sg, dp->file_gamma);
# else
png_fixed_point f = fix(dp->file_gamma);
png_set_gamma_fixed(pp, g, f);
# endif
}
}
# else
png_error(pp, "alpha mode handling not supported");
# endif
}
else
{
/* Set up gamma processing. */
# ifdef PNG_FLOATING_POINT_SUPPORTED
png_set_gamma(pp, dp->screen_gamma, dp->file_gamma);
# else
{
png_fixed_point s = fix(dp->screen_gamma);
png_fixed_point f = fix(dp->file_gamma);
png_set_gamma_fixed(pp, s, f);
}
# endif
if (dp->do_background)
{
# ifdef PNG_READ_BACKGROUND_SUPPORTED
/* NOTE: this assumes the caller provided the correct background gamma!
*/
PNG_CONST double bg = dp->background_gamma;
# ifndef PNG_FLOATING_POINT_SUPPORTED
PNG_CONST png_fixed_point g = fix(bg);
# endif
# ifdef PNG_FLOATING_POINT_SUPPORTED
png_set_background(pp, &dp->background_color, dp->do_background,
0/*need_expand*/, bg);
# else
png_set_background_fixed(pp, &dp->background_color,
dp->do_background, 0/*need_expand*/, g);
# endif
# else
png_error(pp, "png_set_background not supported");
# endif
}
}
{
int i = dp->this.use_update_info;
/* Always do one call, even if use_update_info is 0. */
do
png_read_update_info(pp, pi);
while (--i > 0);
}
/* Now we may get a different cbRow: */
standard_info_part2(&dp->this, pp, pi, 1 /*images*/);
}
Commit Message: DO NOT MERGE Update libpng to 1.6.20
BUG:23265085
Change-Id: I85199805636d771f3597b691b63bc0bf46084833
(cherry picked from commit bbe98b40cda082024b669fa508931042eed18f82)
CWE ID: | gamma_info_imp(gamma_display *dp, png_structp pp, png_infop pi)
{
/* Reuse the standard stuff as appropriate. */
standard_info_part1(&dp->this, pp, pi);
/* If requested strip 16 to 8 bits - this is handled automagically below
* because the output bit depth is read from the library. Note that there
* are interactions with sBIT but, internally, libpng makes sbit at most
* PNG_MAX_GAMMA_8 prior to 1.7 when doing the following.
*/
if (dp->scale16)
# ifdef PNG_READ_SCALE_16_TO_8_SUPPORTED
png_set_scale_16(pp);
# else
/* The following works both in 1.5.4 and earlier versions: */
# ifdef PNG_READ_16_TO_8_SUPPORTED
png_set_strip_16(pp);
# else
png_error(pp, "scale16 (16 to 8 bit conversion) not supported");
# endif
# endif
if (dp->expand16)
# ifdef PNG_READ_EXPAND_16_SUPPORTED
png_set_expand_16(pp);
# else
png_error(pp, "expand16 (8 to 16 bit conversion) not supported");
# endif
if (dp->do_background >= ALPHA_MODE_OFFSET)
{
# ifdef PNG_READ_ALPHA_MODE_SUPPORTED
{
/* This tests the alpha mode handling, if supported. */
int mode = dp->do_background - ALPHA_MODE_OFFSET;
/* The gamma value is the output gamma, and is in the standard,
* non-inverted, represenation. It provides a default for the PNG file
* gamma, but since the file has a gAMA chunk this does not matter.
*/
const double sg = dp->screen_gamma;
# ifndef PNG_FLOATING_POINT_SUPPORTED
const png_fixed_point g = fix(sg);
# endif
# ifdef PNG_FLOATING_POINT_SUPPORTED
png_set_alpha_mode(pp, mode, sg);
# else
png_set_alpha_mode_fixed(pp, mode, g);
# endif
/* However, for the standard Porter-Duff algorithm the output defaults
* to be linear, so if the test requires non-linear output it must be
* corrected here.
*/
if (mode == PNG_ALPHA_STANDARD && sg != 1)
{
# ifdef PNG_FLOATING_POINT_SUPPORTED
png_set_gamma(pp, sg, dp->file_gamma);
# else
png_fixed_point f = fix(dp->file_gamma);
png_set_gamma_fixed(pp, g, f);
# endif
}
}
# else
png_error(pp, "alpha mode handling not supported");
# endif
}
else
{
/* Set up gamma processing. */
# ifdef PNG_FLOATING_POINT_SUPPORTED
png_set_gamma(pp, dp->screen_gamma, dp->file_gamma);
# else
{
png_fixed_point s = fix(dp->screen_gamma);
png_fixed_point f = fix(dp->file_gamma);
png_set_gamma_fixed(pp, s, f);
}
# endif
if (dp->do_background)
{
# ifdef PNG_READ_BACKGROUND_SUPPORTED
/* NOTE: this assumes the caller provided the correct background gamma!
*/
const double bg = dp->background_gamma;
# ifndef PNG_FLOATING_POINT_SUPPORTED
const png_fixed_point g = fix(bg);
# endif
# ifdef PNG_FLOATING_POINT_SUPPORTED
png_set_background(pp, &dp->background_color, dp->do_background,
0/*need_expand*/, bg);
# else
png_set_background_fixed(pp, &dp->background_color,
dp->do_background, 0/*need_expand*/, g);
# endif
# else
png_error(pp, "png_set_background not supported");
# endif
}
}
{
int i = dp->this.use_update_info;
/* Always do one call, even if use_update_info is 0. */
do
png_read_update_info(pp, pi);
while (--i > 0);
}
/* Now we may get a different cbRow: */
standard_info_part2(&dp->this, pp, pi, 1 /*images*/);
}
| 15,153 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: DevToolsUI::DevToolsUI(content::WebUI* web_ui)
: WebUIController(web_ui),
bindings_(web_ui->GetWebContents()) {
web_ui->SetBindings(0);
Profile* profile = Profile::FromWebUI(web_ui);
content::URLDataSource::Add(
profile,
new DevToolsDataSource(profile->GetRequestContext()));
}
Commit Message: [DevTools] Move sanitize url to devtools_ui.cc.
Compatibility script is not reliable enough.
BUG=653134
Review-Url: https://codereview.chromium.org/2403633002
Cr-Commit-Position: refs/heads/master@{#425814}
CWE ID: CWE-200 | DevToolsUI::DevToolsUI(content::WebUI* web_ui)
: WebUIController(web_ui) {
web_ui->SetBindings(0);
Profile* profile = Profile::FromWebUI(web_ui);
content::URLDataSource::Add(
profile,
new DevToolsDataSource(profile->GetRequestContext()));
GURL url = web_ui->GetWebContents()->GetVisibleURL();
if (url.spec() == SanitizeFrontendURL(url).spec())
bindings_.reset(new DevToolsUIBindings(web_ui->GetWebContents()));
}
| 9,915 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: OMX_ERRORTYPE SoftAACEncoder::internalGetParameter(
OMX_INDEXTYPE index, OMX_PTR params) {
switch (index) {
case OMX_IndexParamAudioPortFormat:
{
OMX_AUDIO_PARAM_PORTFORMATTYPE *formatParams =
(OMX_AUDIO_PARAM_PORTFORMATTYPE *)params;
if (formatParams->nPortIndex > 1) {
return OMX_ErrorUndefined;
}
if (formatParams->nIndex > 0) {
return OMX_ErrorNoMore;
}
formatParams->eEncoding =
(formatParams->nPortIndex == 0)
? OMX_AUDIO_CodingPCM : OMX_AUDIO_CodingAAC;
return OMX_ErrorNone;
}
case OMX_IndexParamAudioAac:
{
OMX_AUDIO_PARAM_AACPROFILETYPE *aacParams =
(OMX_AUDIO_PARAM_AACPROFILETYPE *)params;
if (aacParams->nPortIndex != 1) {
return OMX_ErrorUndefined;
}
aacParams->nBitRate = mBitRate;
aacParams->nAudioBandWidth = 0;
aacParams->nAACtools = 0;
aacParams->nAACERtools = 0;
aacParams->eAACProfile = OMX_AUDIO_AACObjectMain;
aacParams->eAACStreamFormat = OMX_AUDIO_AACStreamFormatMP4FF;
aacParams->eChannelMode = OMX_AUDIO_ChannelModeStereo;
aacParams->nChannels = mNumChannels;
aacParams->nSampleRate = mSampleRate;
aacParams->nFrameLength = 0;
return OMX_ErrorNone;
}
case OMX_IndexParamAudioPcm:
{
OMX_AUDIO_PARAM_PCMMODETYPE *pcmParams =
(OMX_AUDIO_PARAM_PCMMODETYPE *)params;
if (pcmParams->nPortIndex != 0) {
return OMX_ErrorUndefined;
}
pcmParams->eNumData = OMX_NumericalDataSigned;
pcmParams->eEndian = OMX_EndianBig;
pcmParams->bInterleaved = OMX_TRUE;
pcmParams->nBitPerSample = 16;
pcmParams->ePCMMode = OMX_AUDIO_PCMModeLinear;
pcmParams->eChannelMapping[0] = OMX_AUDIO_ChannelLF;
pcmParams->eChannelMapping[1] = OMX_AUDIO_ChannelRF;
pcmParams->nChannels = mNumChannels;
pcmParams->nSamplingRate = mSampleRate;
return OMX_ErrorNone;
}
default:
return SimpleSoftOMXComponent::internalGetParameter(index, params);
}
}
Commit Message: DO NOT MERGE Verify OMX buffer sizes prior to access
Bug: 27207275
Change-Id: I4412825d1ee233d993af0a67708bea54304ff62d
CWE ID: CWE-119 | OMX_ERRORTYPE SoftAACEncoder::internalGetParameter(
OMX_INDEXTYPE index, OMX_PTR params) {
switch (index) {
case OMX_IndexParamAudioPortFormat:
{
OMX_AUDIO_PARAM_PORTFORMATTYPE *formatParams =
(OMX_AUDIO_PARAM_PORTFORMATTYPE *)params;
if (!isValidOMXParam(formatParams)) {
return OMX_ErrorBadParameter;
}
if (formatParams->nPortIndex > 1) {
return OMX_ErrorUndefined;
}
if (formatParams->nIndex > 0) {
return OMX_ErrorNoMore;
}
formatParams->eEncoding =
(formatParams->nPortIndex == 0)
? OMX_AUDIO_CodingPCM : OMX_AUDIO_CodingAAC;
return OMX_ErrorNone;
}
case OMX_IndexParamAudioAac:
{
OMX_AUDIO_PARAM_AACPROFILETYPE *aacParams =
(OMX_AUDIO_PARAM_AACPROFILETYPE *)params;
if (!isValidOMXParam(aacParams)) {
return OMX_ErrorBadParameter;
}
if (aacParams->nPortIndex != 1) {
return OMX_ErrorUndefined;
}
aacParams->nBitRate = mBitRate;
aacParams->nAudioBandWidth = 0;
aacParams->nAACtools = 0;
aacParams->nAACERtools = 0;
aacParams->eAACProfile = OMX_AUDIO_AACObjectMain;
aacParams->eAACStreamFormat = OMX_AUDIO_AACStreamFormatMP4FF;
aacParams->eChannelMode = OMX_AUDIO_ChannelModeStereo;
aacParams->nChannels = mNumChannels;
aacParams->nSampleRate = mSampleRate;
aacParams->nFrameLength = 0;
return OMX_ErrorNone;
}
case OMX_IndexParamAudioPcm:
{
OMX_AUDIO_PARAM_PCMMODETYPE *pcmParams =
(OMX_AUDIO_PARAM_PCMMODETYPE *)params;
if (!isValidOMXParam(pcmParams)) {
return OMX_ErrorBadParameter;
}
if (pcmParams->nPortIndex != 0) {
return OMX_ErrorUndefined;
}
pcmParams->eNumData = OMX_NumericalDataSigned;
pcmParams->eEndian = OMX_EndianBig;
pcmParams->bInterleaved = OMX_TRUE;
pcmParams->nBitPerSample = 16;
pcmParams->ePCMMode = OMX_AUDIO_PCMModeLinear;
pcmParams->eChannelMapping[0] = OMX_AUDIO_ChannelLF;
pcmParams->eChannelMapping[1] = OMX_AUDIO_ChannelRF;
pcmParams->nChannels = mNumChannels;
pcmParams->nSamplingRate = mSampleRate;
return OMX_ErrorNone;
}
default:
return SimpleSoftOMXComponent::internalGetParameter(index, params);
}
}
| 12,946 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: spnego_gss_wrap_iov_length(OM_uint32 *minor_status,
gss_ctx_id_t context_handle,
int conf_req_flag,
gss_qop_t qop_req,
int *conf_state,
gss_iov_buffer_desc *iov,
int iov_count)
{
OM_uint32 ret;
ret = gss_wrap_iov_length(minor_status,
context_handle,
conf_req_flag,
qop_req,
conf_state,
iov,
iov_count);
return (ret);
}
Commit Message: Fix SPNEGO context aliasing bugs [CVE-2015-2695]
The SPNEGO mechanism currently replaces its context handle with the
mechanism context handle upon establishment, under the assumption that
most GSS functions are only called after context establishment. This
assumption is incorrect, and can lead to aliasing violations for some
programs. Maintain the SPNEGO context structure after context
establishment and refer to it in all GSS methods. Add initiate and
opened flags to the SPNEGO context structure for use in
gss_inquire_context() prior to context establishment.
CVE-2015-2695:
In MIT krb5 1.5 and later, applications which call
gss_inquire_context() on a partially-established SPNEGO context can
cause the GSS-API library to read from a pointer using the wrong type,
generally causing a process crash. This bug may go unnoticed, because
the most common SPNEGO authentication scenario establishes the context
after just one call to gss_accept_sec_context(). Java server
applications using the native JGSS provider are vulnerable to this
bug. A carefully crafted SPNEGO packet might allow the
gss_inquire_context() call to succeed with attacker-determined
results, but applications should not make access control decisions
based on gss_inquire_context() results prior to context establishment.
CVSSv2 Vector: AV:N/AC:M/Au:N/C:N/I:N/A:C/E:POC/RL:OF/RC:C
[[email protected]: several bugfixes, style changes, and edge-case
behavior changes; commit message and CVE description]
ticket: 8244
target_version: 1.14
tags: pullup
CWE ID: CWE-18 | spnego_gss_wrap_iov_length(OM_uint32 *minor_status,
gss_ctx_id_t context_handle,
int conf_req_flag,
gss_qop_t qop_req,
int *conf_state,
gss_iov_buffer_desc *iov,
int iov_count)
{
OM_uint32 ret;
spnego_gss_ctx_id_t sc = (spnego_gss_ctx_id_t)context_handle;
if (sc->ctx_handle == GSS_C_NO_CONTEXT)
return (GSS_S_NO_CONTEXT);
ret = gss_wrap_iov_length(minor_status,
sc->ctx_handle,
conf_req_flag,
qop_req,
conf_state,
iov,
iov_count);
return (ret);
}
| 6,393 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: void ApplyBlockElementCommand::formatSelection(const VisiblePosition& startOfSelection, const VisiblePosition& endOfSelection)
{
Position start = startOfSelection.deepEquivalent().downstream();
if (isAtUnsplittableElement(start)) {
RefPtr<Element> blockquote = createBlockElement();
insertNodeAt(blockquote, start);
RefPtr<Element> placeholder = createBreakElement(document());
appendNode(placeholder, blockquote);
setEndingSelection(VisibleSelection(positionBeforeNode(placeholder.get()), DOWNSTREAM, endingSelection().isDirectional()));
return;
}
RefPtr<Element> blockquoteForNextIndent;
VisiblePosition endOfCurrentParagraph = endOfParagraph(startOfSelection);
VisiblePosition endAfterSelection = endOfParagraph(endOfParagraph(endOfSelection).next());
m_endOfLastParagraph = endOfParagraph(endOfSelection).deepEquivalent();
bool atEnd = false;
Position end;
while (endOfCurrentParagraph != endAfterSelection && !atEnd) {
if (endOfCurrentParagraph.deepEquivalent() == m_endOfLastParagraph)
atEnd = true;
rangeForParagraphSplittingTextNodesIfNeeded(endOfCurrentParagraph, start, end);
endOfCurrentParagraph = end;
Position afterEnd = end.next();
Node* enclosingCell = enclosingNodeOfType(start, &isTableCell);
VisiblePosition endOfNextParagraph = endOfNextParagrahSplittingTextNodesIfNeeded(endOfCurrentParagraph, start, end);
formatRange(start, end, m_endOfLastParagraph, blockquoteForNextIndent);
if (enclosingCell && enclosingCell != enclosingNodeOfType(endOfNextParagraph.deepEquivalent(), &isTableCell))
blockquoteForNextIndent = 0;
if (endAfterSelection.isNotNull() && !endAfterSelection.deepEquivalent().inDocument())
break;
if (endOfNextParagraph.isNotNull() && !endOfNextParagraph.deepEquivalent().inDocument()) {
ASSERT_NOT_REACHED();
return;
}
endOfCurrentParagraph = endOfNextParagraph;
}
}
Commit Message: Remove false assertion in ApplyBlockElementCommand::formatSelection()
Note: This patch is preparation of fixing issue 294456.
This patch removes false assertion in ApplyBlockElementCommand::formatSelection(), when contents of being indent is modified, e.g. mutation event, |endOfNextParagraph| can hold removed contents.
BUG=294456
TEST=n/a
[email protected]
Review URL: https://codereview.chromium.org/25657004
git-svn-id: svn://svn.chromium.org/blink/trunk@158701 bbb929c8-8fbe-4397-9dbb-9b2b20218538
CWE ID: CWE-399 | void ApplyBlockElementCommand::formatSelection(const VisiblePosition& startOfSelection, const VisiblePosition& endOfSelection)
{
Position start = startOfSelection.deepEquivalent().downstream();
if (isAtUnsplittableElement(start)) {
RefPtr<Element> blockquote = createBlockElement();
insertNodeAt(blockquote, start);
RefPtr<Element> placeholder = createBreakElement(document());
appendNode(placeholder, blockquote);
setEndingSelection(VisibleSelection(positionBeforeNode(placeholder.get()), DOWNSTREAM, endingSelection().isDirectional()));
return;
}
RefPtr<Element> blockquoteForNextIndent;
VisiblePosition endOfCurrentParagraph = endOfParagraph(startOfSelection);
VisiblePosition endAfterSelection = endOfParagraph(endOfParagraph(endOfSelection).next());
m_endOfLastParagraph = endOfParagraph(endOfSelection).deepEquivalent();
bool atEnd = false;
Position end;
while (endOfCurrentParagraph != endAfterSelection && !atEnd) {
if (endOfCurrentParagraph.deepEquivalent() == m_endOfLastParagraph)
atEnd = true;
rangeForParagraphSplittingTextNodesIfNeeded(endOfCurrentParagraph, start, end);
endOfCurrentParagraph = end;
Position afterEnd = end.next();
Node* enclosingCell = enclosingNodeOfType(start, &isTableCell);
VisiblePosition endOfNextParagraph = endOfNextParagrahSplittingTextNodesIfNeeded(endOfCurrentParagraph, start, end);
formatRange(start, end, m_endOfLastParagraph, blockquoteForNextIndent);
if (enclosingCell && enclosingCell != enclosingNodeOfType(endOfNextParagraph.deepEquivalent(), &isTableCell))
blockquoteForNextIndent = 0;
if (endAfterSelection.isNotNull() && !endAfterSelection.deepEquivalent().inDocument())
break;
// If somehow, e.g. mutation event handler, we did, return to prevent crashes.
if (endOfNextParagraph.isNotNull() && !endOfNextParagraph.deepEquivalent().inDocument())
return;
endOfCurrentParagraph = endOfNextParagraph;
}
}
| 3,783 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: static void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info,
const struct in6_addr *force_saddr)
{
struct net *net = dev_net(skb->dev);
struct inet6_dev *idev = NULL;
struct ipv6hdr *hdr = ipv6_hdr(skb);
struct sock *sk;
struct ipv6_pinfo *np;
const struct in6_addr *saddr = NULL;
struct dst_entry *dst;
struct icmp6hdr tmp_hdr;
struct flowi6 fl6;
struct icmpv6_msg msg;
struct sockcm_cookie sockc_unused = {0};
struct ipcm6_cookie ipc6;
int iif = 0;
int addr_type = 0;
int len;
int err = 0;
u32 mark = IP6_REPLY_MARK(net, skb->mark);
if ((u8 *)hdr < skb->head ||
(skb_network_header(skb) + sizeof(*hdr)) > skb_tail_pointer(skb))
return;
/*
* Make sure we respect the rules
* i.e. RFC 1885 2.4(e)
* Rule (e.1) is enforced by not using icmp6_send
* in any code that processes icmp errors.
*/
addr_type = ipv6_addr_type(&hdr->daddr);
if (ipv6_chk_addr(net, &hdr->daddr, skb->dev, 0) ||
ipv6_chk_acast_addr_src(net, skb->dev, &hdr->daddr))
saddr = &hdr->daddr;
/*
* Dest addr check
*/
if (addr_type & IPV6_ADDR_MULTICAST || skb->pkt_type != PACKET_HOST) {
if (type != ICMPV6_PKT_TOOBIG &&
!(type == ICMPV6_PARAMPROB &&
code == ICMPV6_UNK_OPTION &&
(opt_unrec(skb, info))))
return;
saddr = NULL;
}
addr_type = ipv6_addr_type(&hdr->saddr);
/*
* Source addr check
*/
if (__ipv6_addr_needs_scope_id(addr_type))
iif = skb->dev->ifindex;
else
iif = l3mdev_master_ifindex(skb_dst(skb)->dev);
/*
* Must not send error if the source does not uniquely
* identify a single node (RFC2463 Section 2.4).
* We check unspecified / multicast addresses here,
* and anycast addresses will be checked later.
*/
if ((addr_type == IPV6_ADDR_ANY) || (addr_type & IPV6_ADDR_MULTICAST)) {
net_dbg_ratelimited("icmp6_send: addr_any/mcast source [%pI6c > %pI6c]\n",
&hdr->saddr, &hdr->daddr);
return;
}
/*
* Never answer to a ICMP packet.
*/
if (is_ineligible(skb)) {
net_dbg_ratelimited("icmp6_send: no reply to icmp error [%pI6c > %pI6c]\n",
&hdr->saddr, &hdr->daddr);
return;
}
mip6_addr_swap(skb);
memset(&fl6, 0, sizeof(fl6));
fl6.flowi6_proto = IPPROTO_ICMPV6;
fl6.daddr = hdr->saddr;
if (force_saddr)
saddr = force_saddr;
if (saddr)
fl6.saddr = *saddr;
fl6.flowi6_mark = mark;
fl6.flowi6_oif = iif;
fl6.fl6_icmp_type = type;
fl6.fl6_icmp_code = code;
security_skb_classify_flow(skb, flowi6_to_flowi(&fl6));
sk = icmpv6_xmit_lock(net);
if (!sk)
return;
sk->sk_mark = mark;
np = inet6_sk(sk);
if (!icmpv6_xrlim_allow(sk, type, &fl6))
goto out;
tmp_hdr.icmp6_type = type;
tmp_hdr.icmp6_code = code;
tmp_hdr.icmp6_cksum = 0;
tmp_hdr.icmp6_pointer = htonl(info);
if (!fl6.flowi6_oif && ipv6_addr_is_multicast(&fl6.daddr))
fl6.flowi6_oif = np->mcast_oif;
else if (!fl6.flowi6_oif)
fl6.flowi6_oif = np->ucast_oif;
ipc6.tclass = np->tclass;
fl6.flowlabel = ip6_make_flowinfo(ipc6.tclass, fl6.flowlabel);
dst = icmpv6_route_lookup(net, skb, sk, &fl6);
if (IS_ERR(dst))
goto out;
ipc6.hlimit = ip6_sk_dst_hoplimit(np, &fl6, dst);
ipc6.dontfrag = np->dontfrag;
ipc6.opt = NULL;
msg.skb = skb;
msg.offset = skb_network_offset(skb);
msg.type = type;
len = skb->len - msg.offset;
len = min_t(unsigned int, len, IPV6_MIN_MTU - sizeof(struct ipv6hdr) - sizeof(struct icmp6hdr));
if (len < 0) {
net_dbg_ratelimited("icmp: len problem [%pI6c > %pI6c]\n",
&hdr->saddr, &hdr->daddr);
goto out_dst_release;
}
rcu_read_lock();
idev = __in6_dev_get(skb->dev);
err = ip6_append_data(sk, icmpv6_getfrag, &msg,
len + sizeof(struct icmp6hdr),
sizeof(struct icmp6hdr),
&ipc6, &fl6, (struct rt6_info *)dst,
MSG_DONTWAIT, &sockc_unused);
if (err) {
ICMP6_INC_STATS(net, idev, ICMP6_MIB_OUTERRORS);
ip6_flush_pending_frames(sk);
} else {
err = icmpv6_push_pending_frames(sk, &fl6, &tmp_hdr,
len + sizeof(struct icmp6hdr));
}
rcu_read_unlock();
out_dst_release:
dst_release(dst);
out:
icmpv6_xmit_unlock(sk);
}
Commit Message: net: handle no dst on skb in icmp6_send
Andrey reported the following while fuzzing the kernel with syzkaller:
kasan: CONFIG_KASAN_INLINE enabled
kasan: GPF could be caused by NULL-ptr deref or user memory access
general protection fault: 0000 [#1] SMP KASAN
Modules linked in:
CPU: 0 PID: 3859 Comm: a.out Not tainted 4.9.0-rc6+ #429
Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS Bochs 01/01/2011
task: ffff8800666d4200 task.stack: ffff880067348000
RIP: 0010:[<ffffffff833617ec>] [<ffffffff833617ec>]
icmp6_send+0x5fc/0x1e30 net/ipv6/icmp.c:451
RSP: 0018:ffff88006734f2c0 EFLAGS: 00010206
RAX: ffff8800666d4200 RBX: 0000000000000000 RCX: 0000000000000000
RDX: 0000000000000000 RSI: dffffc0000000000 RDI: 0000000000000018
RBP: ffff88006734f630 R08: ffff880064138418 R09: 0000000000000003
R10: dffffc0000000000 R11: 0000000000000005 R12: 0000000000000000
R13: ffffffff84e7e200 R14: ffff880064138484 R15: ffff8800641383c0
FS: 00007fb3887a07c0(0000) GS:ffff88006cc00000(0000) knlGS:0000000000000000
CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
CR2: 0000000020000000 CR3: 000000006b040000 CR4: 00000000000006f0
Stack:
ffff8800666d4200 ffff8800666d49f8 ffff8800666d4200 ffffffff84c02460
ffff8800666d4a1a 1ffff1000ccdaa2f ffff88006734f498 0000000000000046
ffff88006734f440 ffffffff832f4269 ffff880064ba7456 0000000000000000
Call Trace:
[<ffffffff83364ddc>] icmpv6_param_prob+0x2c/0x40 net/ipv6/icmp.c:557
[< inline >] ip6_tlvopt_unknown net/ipv6/exthdrs.c:88
[<ffffffff83394405>] ip6_parse_tlv+0x555/0x670 net/ipv6/exthdrs.c:157
[<ffffffff8339a759>] ipv6_parse_hopopts+0x199/0x460 net/ipv6/exthdrs.c:663
[<ffffffff832ee773>] ipv6_rcv+0xfa3/0x1dc0 net/ipv6/ip6_input.c:191
...
icmp6_send / icmpv6_send is invoked for both rx and tx paths. In both
cases the dst->dev should be preferred for determining the L3 domain
if the dst has been set on the skb. Fallback to the skb->dev if it has
not. This covers the case reported here where icmp6_send is invoked on
Rx before the route lookup.
Fixes: 5d41ce29e ("net: icmp6_send should use dst dev to determine L3 domain")
Reported-by: Andrey Konovalov <[email protected]>
Signed-off-by: David Ahern <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
CWE ID: CWE-20 | static void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info,
const struct in6_addr *force_saddr)
{
struct net *net = dev_net(skb->dev);
struct inet6_dev *idev = NULL;
struct ipv6hdr *hdr = ipv6_hdr(skb);
struct sock *sk;
struct ipv6_pinfo *np;
const struct in6_addr *saddr = NULL;
struct dst_entry *dst;
struct icmp6hdr tmp_hdr;
struct flowi6 fl6;
struct icmpv6_msg msg;
struct sockcm_cookie sockc_unused = {0};
struct ipcm6_cookie ipc6;
int iif = 0;
int addr_type = 0;
int len;
int err = 0;
u32 mark = IP6_REPLY_MARK(net, skb->mark);
if ((u8 *)hdr < skb->head ||
(skb_network_header(skb) + sizeof(*hdr)) > skb_tail_pointer(skb))
return;
/*
* Make sure we respect the rules
* i.e. RFC 1885 2.4(e)
* Rule (e.1) is enforced by not using icmp6_send
* in any code that processes icmp errors.
*/
addr_type = ipv6_addr_type(&hdr->daddr);
if (ipv6_chk_addr(net, &hdr->daddr, skb->dev, 0) ||
ipv6_chk_acast_addr_src(net, skb->dev, &hdr->daddr))
saddr = &hdr->daddr;
/*
* Dest addr check
*/
if (addr_type & IPV6_ADDR_MULTICAST || skb->pkt_type != PACKET_HOST) {
if (type != ICMPV6_PKT_TOOBIG &&
!(type == ICMPV6_PARAMPROB &&
code == ICMPV6_UNK_OPTION &&
(opt_unrec(skb, info))))
return;
saddr = NULL;
}
addr_type = ipv6_addr_type(&hdr->saddr);
/*
* Source addr check
*/
if (__ipv6_addr_needs_scope_id(addr_type))
iif = skb->dev->ifindex;
else {
dst = skb_dst(skb);
iif = l3mdev_master_ifindex(dst ? dst->dev : skb->dev);
}
/*
* Must not send error if the source does not uniquely
* identify a single node (RFC2463 Section 2.4).
* We check unspecified / multicast addresses here,
* and anycast addresses will be checked later.
*/
if ((addr_type == IPV6_ADDR_ANY) || (addr_type & IPV6_ADDR_MULTICAST)) {
net_dbg_ratelimited("icmp6_send: addr_any/mcast source [%pI6c > %pI6c]\n",
&hdr->saddr, &hdr->daddr);
return;
}
/*
* Never answer to a ICMP packet.
*/
if (is_ineligible(skb)) {
net_dbg_ratelimited("icmp6_send: no reply to icmp error [%pI6c > %pI6c]\n",
&hdr->saddr, &hdr->daddr);
return;
}
mip6_addr_swap(skb);
memset(&fl6, 0, sizeof(fl6));
fl6.flowi6_proto = IPPROTO_ICMPV6;
fl6.daddr = hdr->saddr;
if (force_saddr)
saddr = force_saddr;
if (saddr)
fl6.saddr = *saddr;
fl6.flowi6_mark = mark;
fl6.flowi6_oif = iif;
fl6.fl6_icmp_type = type;
fl6.fl6_icmp_code = code;
security_skb_classify_flow(skb, flowi6_to_flowi(&fl6));
sk = icmpv6_xmit_lock(net);
if (!sk)
return;
sk->sk_mark = mark;
np = inet6_sk(sk);
if (!icmpv6_xrlim_allow(sk, type, &fl6))
goto out;
tmp_hdr.icmp6_type = type;
tmp_hdr.icmp6_code = code;
tmp_hdr.icmp6_cksum = 0;
tmp_hdr.icmp6_pointer = htonl(info);
if (!fl6.flowi6_oif && ipv6_addr_is_multicast(&fl6.daddr))
fl6.flowi6_oif = np->mcast_oif;
else if (!fl6.flowi6_oif)
fl6.flowi6_oif = np->ucast_oif;
ipc6.tclass = np->tclass;
fl6.flowlabel = ip6_make_flowinfo(ipc6.tclass, fl6.flowlabel);
dst = icmpv6_route_lookup(net, skb, sk, &fl6);
if (IS_ERR(dst))
goto out;
ipc6.hlimit = ip6_sk_dst_hoplimit(np, &fl6, dst);
ipc6.dontfrag = np->dontfrag;
ipc6.opt = NULL;
msg.skb = skb;
msg.offset = skb_network_offset(skb);
msg.type = type;
len = skb->len - msg.offset;
len = min_t(unsigned int, len, IPV6_MIN_MTU - sizeof(struct ipv6hdr) - sizeof(struct icmp6hdr));
if (len < 0) {
net_dbg_ratelimited("icmp: len problem [%pI6c > %pI6c]\n",
&hdr->saddr, &hdr->daddr);
goto out_dst_release;
}
rcu_read_lock();
idev = __in6_dev_get(skb->dev);
err = ip6_append_data(sk, icmpv6_getfrag, &msg,
len + sizeof(struct icmp6hdr),
sizeof(struct icmp6hdr),
&ipc6, &fl6, (struct rt6_info *)dst,
MSG_DONTWAIT, &sockc_unused);
if (err) {
ICMP6_INC_STATS(net, idev, ICMP6_MIB_OUTERRORS);
ip6_flush_pending_frames(sk);
} else {
err = icmpv6_push_pending_frames(sk, &fl6, &tmp_hdr,
len + sizeof(struct icmp6hdr));
}
rcu_read_unlock();
out_dst_release:
dst_release(dst);
out:
icmpv6_xmit_unlock(sk);
}
| 28,624 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: struct sctp_chunk *sctp_process_asconf(struct sctp_association *asoc,
struct sctp_chunk *asconf)
{
sctp_addiphdr_t *hdr;
union sctp_addr_param *addr_param;
sctp_addip_param_t *asconf_param;
struct sctp_chunk *asconf_ack;
__be16 err_code;
int length = 0;
int chunk_len;
__u32 serial;
int all_param_pass = 1;
chunk_len = ntohs(asconf->chunk_hdr->length) - sizeof(sctp_chunkhdr_t);
hdr = (sctp_addiphdr_t *)asconf->skb->data;
serial = ntohl(hdr->serial);
/* Skip the addiphdr and store a pointer to address parameter. */
length = sizeof(sctp_addiphdr_t);
addr_param = (union sctp_addr_param *)(asconf->skb->data + length);
chunk_len -= length;
/* Skip the address parameter and store a pointer to the first
* asconf parameter.
*/
length = ntohs(addr_param->p.length);
asconf_param = (void *)addr_param + length;
chunk_len -= length;
/* create an ASCONF_ACK chunk.
* Based on the definitions of parameters, we know that the size of
* ASCONF_ACK parameters are less than or equal to the fourfold of ASCONF
* parameters.
*/
asconf_ack = sctp_make_asconf_ack(asoc, serial, chunk_len * 4);
if (!asconf_ack)
goto done;
/* Process the TLVs contained within the ASCONF chunk. */
while (chunk_len > 0) {
err_code = sctp_process_asconf_param(asoc, asconf,
asconf_param);
/* ADDIP 4.1 A7)
* If an error response is received for a TLV parameter,
* all TLVs with no response before the failed TLV are
* considered successful if not reported. All TLVs after
* the failed response are considered unsuccessful unless
* a specific success indication is present for the parameter.
*/
if (SCTP_ERROR_NO_ERROR != err_code)
all_param_pass = 0;
if (!all_param_pass)
sctp_add_asconf_response(asconf_ack,
asconf_param->crr_id, err_code,
asconf_param);
/* ADDIP 4.3 D11) When an endpoint receiving an ASCONF to add
* an IP address sends an 'Out of Resource' in its response, it
* MUST also fail any subsequent add or delete requests bundled
* in the ASCONF.
*/
if (SCTP_ERROR_RSRC_LOW == err_code)
goto done;
/* Move to the next ASCONF param. */
length = ntohs(asconf_param->param_hdr.length);
asconf_param = (void *)asconf_param + length;
chunk_len -= length;
}
done:
asoc->peer.addip_serial++;
/* If we are sending a new ASCONF_ACK hold a reference to it in assoc
* after freeing the reference to old asconf ack if any.
*/
if (asconf_ack) {
sctp_chunk_hold(asconf_ack);
list_add_tail(&asconf_ack->transmitted_list,
&asoc->asconf_ack_list);
}
return asconf_ack;
}
Commit Message: net: sctp: fix skb_over_panic when receiving malformed ASCONF chunks
Commit 6f4c618ddb0 ("SCTP : Add paramters validity check for
ASCONF chunk") added basic verification of ASCONF chunks, however,
it is still possible to remotely crash a server by sending a
special crafted ASCONF chunk, even up to pre 2.6.12 kernels:
skb_over_panic: text:ffffffffa01ea1c3 len:31056 put:30768
head:ffff88011bd81800 data:ffff88011bd81800 tail:0x7950
end:0x440 dev:<NULL>
------------[ cut here ]------------
kernel BUG at net/core/skbuff.c:129!
[...]
Call Trace:
<IRQ>
[<ffffffff8144fb1c>] skb_put+0x5c/0x70
[<ffffffffa01ea1c3>] sctp_addto_chunk+0x63/0xd0 [sctp]
[<ffffffffa01eadaf>] sctp_process_asconf+0x1af/0x540 [sctp]
[<ffffffff8152d025>] ? _read_unlock_bh+0x15/0x20
[<ffffffffa01e0038>] sctp_sf_do_asconf+0x168/0x240 [sctp]
[<ffffffffa01e3751>] sctp_do_sm+0x71/0x1210 [sctp]
[<ffffffff8147645d>] ? fib_rules_lookup+0xad/0xf0
[<ffffffffa01e6b22>] ? sctp_cmp_addr_exact+0x32/0x40 [sctp]
[<ffffffffa01e8393>] sctp_assoc_bh_rcv+0xd3/0x180 [sctp]
[<ffffffffa01ee986>] sctp_inq_push+0x56/0x80 [sctp]
[<ffffffffa01fcc42>] sctp_rcv+0x982/0xa10 [sctp]
[<ffffffffa01d5123>] ? ipt_local_in_hook+0x23/0x28 [iptable_filter]
[<ffffffff8148bdc9>] ? nf_iterate+0x69/0xb0
[<ffffffff81496d10>] ? ip_local_deliver_finish+0x0/0x2d0
[<ffffffff8148bf86>] ? nf_hook_slow+0x76/0x120
[<ffffffff81496d10>] ? ip_local_deliver_finish+0x0/0x2d0
[<ffffffff81496ded>] ip_local_deliver_finish+0xdd/0x2d0
[<ffffffff81497078>] ip_local_deliver+0x98/0xa0
[<ffffffff8149653d>] ip_rcv_finish+0x12d/0x440
[<ffffffff81496ac5>] ip_rcv+0x275/0x350
[<ffffffff8145c88b>] __netif_receive_skb+0x4ab/0x750
[<ffffffff81460588>] netif_receive_skb+0x58/0x60
This can be triggered e.g., through a simple scripted nmap
connection scan injecting the chunk after the handshake, for
example, ...
-------------- INIT[ASCONF; ASCONF_ACK] ------------->
<----------- INIT-ACK[ASCONF; ASCONF_ACK] ------------
-------------------- COOKIE-ECHO -------------------->
<-------------------- COOKIE-ACK ---------------------
------------------ ASCONF; UNKNOWN ------------------>
... where ASCONF chunk of length 280 contains 2 parameters ...
1) Add IP address parameter (param length: 16)
2) Add/del IP address parameter (param length: 255)
... followed by an UNKNOWN chunk of e.g. 4 bytes. Here, the
Address Parameter in the ASCONF chunk is even missing, too.
This is just an example and similarly-crafted ASCONF chunks
could be used just as well.
The ASCONF chunk passes through sctp_verify_asconf() as all
parameters passed sanity checks, and after walking, we ended
up successfully at the chunk end boundary, and thus may invoke
sctp_process_asconf(). Parameter walking is done with
WORD_ROUND() to take padding into account.
In sctp_process_asconf()'s TLV processing, we may fail in
sctp_process_asconf_param() e.g., due to removal of the IP
address that is also the source address of the packet containing
the ASCONF chunk, and thus we need to add all TLVs after the
failure to our ASCONF response to remote via helper function
sctp_add_asconf_response(), which basically invokes a
sctp_addto_chunk() adding the error parameters to the given
skb.
When walking to the next parameter this time, we proceed
with ...
length = ntohs(asconf_param->param_hdr.length);
asconf_param = (void *)asconf_param + length;
... instead of the WORD_ROUND()'ed length, thus resulting here
in an off-by-one that leads to reading the follow-up garbage
parameter length of 12336, and thus throwing an skb_over_panic
for the reply when trying to sctp_addto_chunk() next time,
which implicitly calls the skb_put() with that length.
Fix it by using sctp_walk_params() [ which is also used in
INIT parameter processing ] macro in the verification *and*
in ASCONF processing: it will make sure we don't spill over,
that we walk parameters WORD_ROUND()'ed. Moreover, we're being
more defensive and guard against unknown parameter types and
missized addresses.
Joint work with Vlad Yasevich.
Fixes: b896b82be4ae ("[SCTP] ADDIP: Support for processing incoming ASCONF_ACK chunks.")
Signed-off-by: Daniel Borkmann <[email protected]>
Signed-off-by: Vlad Yasevich <[email protected]>
Acked-by: Neil Horman <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
CWE ID: CWE-399 | struct sctp_chunk *sctp_process_asconf(struct sctp_association *asoc,
struct sctp_chunk *asconf)
{
sctp_addip_chunk_t *addip = (sctp_addip_chunk_t *) asconf->chunk_hdr;
bool all_param_pass = true;
union sctp_params param;
sctp_addiphdr_t *hdr;
union sctp_addr_param *addr_param;
sctp_addip_param_t *asconf_param;
struct sctp_chunk *asconf_ack;
__be16 err_code;
int length = 0;
int chunk_len;
__u32 serial;
chunk_len = ntohs(asconf->chunk_hdr->length) - sizeof(sctp_chunkhdr_t);
hdr = (sctp_addiphdr_t *)asconf->skb->data;
serial = ntohl(hdr->serial);
/* Skip the addiphdr and store a pointer to address parameter. */
length = sizeof(sctp_addiphdr_t);
addr_param = (union sctp_addr_param *)(asconf->skb->data + length);
chunk_len -= length;
/* Skip the address parameter and store a pointer to the first
* asconf parameter.
*/
length = ntohs(addr_param->p.length);
asconf_param = (void *)addr_param + length;
chunk_len -= length;
/* create an ASCONF_ACK chunk.
* Based on the definitions of parameters, we know that the size of
* ASCONF_ACK parameters are less than or equal to the fourfold of ASCONF
* parameters.
*/
asconf_ack = sctp_make_asconf_ack(asoc, serial, chunk_len * 4);
if (!asconf_ack)
goto done;
/* Process the TLVs contained within the ASCONF chunk. */
sctp_walk_params(param, addip, addip_hdr.params) {
/* Skip preceeding address parameters. */
if (param.p->type == SCTP_PARAM_IPV4_ADDRESS ||
param.p->type == SCTP_PARAM_IPV6_ADDRESS)
continue;
err_code = sctp_process_asconf_param(asoc, asconf,
param.addip);
/* ADDIP 4.1 A7)
* If an error response is received for a TLV parameter,
* all TLVs with no response before the failed TLV are
* considered successful if not reported. All TLVs after
* the failed response are considered unsuccessful unless
* a specific success indication is present for the parameter.
*/
if (err_code != SCTP_ERROR_NO_ERROR)
all_param_pass = false;
if (!all_param_pass)
sctp_add_asconf_response(asconf_ack, param.addip->crr_id,
err_code, param.addip);
/* ADDIP 4.3 D11) When an endpoint receiving an ASCONF to add
* an IP address sends an 'Out of Resource' in its response, it
* MUST also fail any subsequent add or delete requests bundled
* in the ASCONF.
*/
if (err_code == SCTP_ERROR_RSRC_LOW)
goto done;
}
done:
asoc->peer.addip_serial++;
/* If we are sending a new ASCONF_ACK hold a reference to it in assoc
* after freeing the reference to old asconf ack if any.
*/
if (asconf_ack) {
sctp_chunk_hold(asconf_ack);
list_add_tail(&asconf_ack->transmitted_list,
&asoc->asconf_ack_list);
}
return asconf_ack;
}
| 7,493 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: static void webkit_web_view_update_settings(WebKitWebView* webView)
{
WebKitWebViewPrivate* priv = webView->priv;
WebKitWebSettings* webSettings = priv->webSettings.get();
Settings* settings = core(webView)->settings();
gchar* defaultEncoding, *cursiveFontFamily, *defaultFontFamily, *fantasyFontFamily, *monospaceFontFamily, *sansSerifFontFamily, *serifFontFamily, *userStylesheetUri, *defaultSpellCheckingLanguages;
gboolean autoLoadImages, autoShrinkImages, printBackgrounds,
enableScripts, enablePlugins, enableDeveloperExtras, resizableTextAreas,
enablePrivateBrowsing, enableCaretBrowsing, enableHTML5Database, enableHTML5LocalStorage,
enableXSSAuditor, enableSpatialNavigation, enableFrameFlattening, javascriptCanOpenWindows,
javaScriptCanAccessClipboard, enableOfflineWebAppCache,
enableUniversalAccessFromFileURI, enableFileAccessFromFileURI,
enableDOMPaste, tabKeyCyclesThroughElements, enableWebGL,
enableSiteSpecificQuirks, usePageCache, enableJavaApplet,
enableHyperlinkAuditing, enableFullscreen, enableDNSPrefetching;
WebKitEditingBehavior editingBehavior;
g_object_get(webSettings,
"default-encoding", &defaultEncoding,
"cursive-font-family", &cursiveFontFamily,
"default-font-family", &defaultFontFamily,
"fantasy-font-family", &fantasyFontFamily,
"monospace-font-family", &monospaceFontFamily,
"sans-serif-font-family", &sansSerifFontFamily,
"serif-font-family", &serifFontFamily,
"auto-load-images", &autoLoadImages,
"auto-shrink-images", &autoShrinkImages,
"print-backgrounds", &printBackgrounds,
"enable-scripts", &enableScripts,
"enable-plugins", &enablePlugins,
"resizable-text-areas", &resizableTextAreas,
"user-stylesheet-uri", &userStylesheetUri,
"enable-developer-extras", &enableDeveloperExtras,
"enable-private-browsing", &enablePrivateBrowsing,
"enable-caret-browsing", &enableCaretBrowsing,
"enable-html5-database", &enableHTML5Database,
"enable-html5-local-storage", &enableHTML5LocalStorage,
"enable-xss-auditor", &enableXSSAuditor,
"enable-spatial-navigation", &enableSpatialNavigation,
"enable-frame-flattening", &enableFrameFlattening,
"javascript-can-open-windows-automatically", &javascriptCanOpenWindows,
"javascript-can-access-clipboard", &javaScriptCanAccessClipboard,
"enable-offline-web-application-cache", &enableOfflineWebAppCache,
"editing-behavior", &editingBehavior,
"enable-universal-access-from-file-uris", &enableUniversalAccessFromFileURI,
"enable-file-access-from-file-uris", &enableFileAccessFromFileURI,
"enable-dom-paste", &enableDOMPaste,
"tab-key-cycles-through-elements", &tabKeyCyclesThroughElements,
"enable-site-specific-quirks", &enableSiteSpecificQuirks,
"enable-page-cache", &usePageCache,
"enable-java-applet", &enableJavaApplet,
"enable-hyperlink-auditing", &enableHyperlinkAuditing,
"spell-checking-languages", &defaultSpellCheckingLanguages,
"enable-fullscreen", &enableFullscreen,
"enable-dns-prefetching", &enableDNSPrefetching,
"enable-webgl", &enableWebGL,
NULL);
settings->setDefaultTextEncodingName(defaultEncoding);
settings->setCursiveFontFamily(cursiveFontFamily);
settings->setStandardFontFamily(defaultFontFamily);
settings->setFantasyFontFamily(fantasyFontFamily);
settings->setFixedFontFamily(monospaceFontFamily);
settings->setSansSerifFontFamily(sansSerifFontFamily);
settings->setSerifFontFamily(serifFontFamily);
settings->setLoadsImagesAutomatically(autoLoadImages);
settings->setShrinksStandaloneImagesToFit(autoShrinkImages);
settings->setShouldPrintBackgrounds(printBackgrounds);
settings->setJavaScriptEnabled(enableScripts);
settings->setPluginsEnabled(enablePlugins);
settings->setTextAreasAreResizable(resizableTextAreas);
settings->setUserStyleSheetLocation(KURL(KURL(), userStylesheetUri));
settings->setDeveloperExtrasEnabled(enableDeveloperExtras);
settings->setPrivateBrowsingEnabled(enablePrivateBrowsing);
settings->setCaretBrowsingEnabled(enableCaretBrowsing);
#if ENABLE(DATABASE)
AbstractDatabase::setIsAvailable(enableHTML5Database);
#endif
settings->setLocalStorageEnabled(enableHTML5LocalStorage);
settings->setXSSAuditorEnabled(enableXSSAuditor);
settings->setSpatialNavigationEnabled(enableSpatialNavigation);
settings->setFrameFlatteningEnabled(enableFrameFlattening);
settings->setJavaScriptCanOpenWindowsAutomatically(javascriptCanOpenWindows);
settings->setJavaScriptCanAccessClipboard(javaScriptCanAccessClipboard);
settings->setOfflineWebApplicationCacheEnabled(enableOfflineWebAppCache);
settings->setEditingBehaviorType(static_cast<WebCore::EditingBehaviorType>(editingBehavior));
settings->setAllowUniversalAccessFromFileURLs(enableUniversalAccessFromFileURI);
settings->setAllowFileAccessFromFileURLs(enableFileAccessFromFileURI);
settings->setDOMPasteAllowed(enableDOMPaste);
settings->setNeedsSiteSpecificQuirks(enableSiteSpecificQuirks);
settings->setUsesPageCache(usePageCache);
settings->setJavaEnabled(enableJavaApplet);
settings->setHyperlinkAuditingEnabled(enableHyperlinkAuditing);
settings->setDNSPrefetchingEnabled(enableDNSPrefetching);
#if ENABLE(FULLSCREEN_API)
settings->setFullScreenEnabled(enableFullscreen);
#endif
#if ENABLE(SPELLCHECK)
WebKit::EditorClient* client = static_cast<WebKit::EditorClient*>(core(webView)->editorClient());
static_cast<WebKit::TextCheckerClientEnchant*>(client->textChecker())->updateSpellCheckingLanguage(defaultSpellCheckingLanguages);
#endif
#if ENABLE(WEBGL)
settings->setWebGLEnabled(enableWebGL);
#endif
Page* page = core(webView);
if (page)
page->setTabKeyCyclesThroughElements(tabKeyCyclesThroughElements);
g_free(defaultEncoding);
g_free(cursiveFontFamily);
g_free(defaultFontFamily);
g_free(fantasyFontFamily);
g_free(monospaceFontFamily);
g_free(sansSerifFontFamily);
g_free(serifFontFamily);
g_free(userStylesheetUri);
webkit_web_view_screen_changed(GTK_WIDGET(webView), NULL);
}
Commit Message: 2011-06-02 Joone Hur <[email protected]>
Reviewed by Martin Robinson.
[GTK] Only load dictionaries if spell check is enabled
https://bugs.webkit.org/show_bug.cgi?id=32879
We don't need to call enchant if enable-spell-checking is false.
* webkit/webkitwebview.cpp:
(webkit_web_view_update_settings): Skip loading dictionaries when enable-spell-checking is false.
(webkit_web_view_settings_notify): Ditto.
git-svn-id: svn://svn.chromium.org/blink/trunk@87925 bbb929c8-8fbe-4397-9dbb-9b2b20218538
CWE ID: CWE-399 | static void webkit_web_view_update_settings(WebKitWebView* webView)
{
WebKitWebViewPrivate* priv = webView->priv;
WebKitWebSettings* webSettings = priv->webSettings.get();
Settings* settings = core(webView)->settings();
gchar* defaultEncoding, *cursiveFontFamily, *defaultFontFamily, *fantasyFontFamily, *monospaceFontFamily, *sansSerifFontFamily, *serifFontFamily, *userStylesheetUri, *defaultSpellCheckingLanguages;
gboolean autoLoadImages, autoShrinkImages, printBackgrounds,
enableScripts, enablePlugins, enableDeveloperExtras, resizableTextAreas,
enablePrivateBrowsing, enableCaretBrowsing, enableHTML5Database, enableHTML5LocalStorage,
enableXSSAuditor, enableSpatialNavigation, enableFrameFlattening, javascriptCanOpenWindows,
javaScriptCanAccessClipboard, enableOfflineWebAppCache,
enableUniversalAccessFromFileURI, enableFileAccessFromFileURI,
enableDOMPaste, tabKeyCyclesThroughElements, enableWebGL,
enableSiteSpecificQuirks, usePageCache, enableJavaApplet,
enableHyperlinkAuditing, enableFullscreen, enableDNSPrefetching,
enableSpellChecking;
WebKitEditingBehavior editingBehavior;
g_object_get(webSettings,
"default-encoding", &defaultEncoding,
"cursive-font-family", &cursiveFontFamily,
"default-font-family", &defaultFontFamily,
"fantasy-font-family", &fantasyFontFamily,
"monospace-font-family", &monospaceFontFamily,
"sans-serif-font-family", &sansSerifFontFamily,
"serif-font-family", &serifFontFamily,
"auto-load-images", &autoLoadImages,
"auto-shrink-images", &autoShrinkImages,
"print-backgrounds", &printBackgrounds,
"enable-scripts", &enableScripts,
"enable-plugins", &enablePlugins,
"resizable-text-areas", &resizableTextAreas,
"user-stylesheet-uri", &userStylesheetUri,
"enable-developer-extras", &enableDeveloperExtras,
"enable-private-browsing", &enablePrivateBrowsing,
"enable-caret-browsing", &enableCaretBrowsing,
"enable-html5-database", &enableHTML5Database,
"enable-html5-local-storage", &enableHTML5LocalStorage,
"enable-xss-auditor", &enableXSSAuditor,
"enable-spatial-navigation", &enableSpatialNavigation,
"enable-frame-flattening", &enableFrameFlattening,
"javascript-can-open-windows-automatically", &javascriptCanOpenWindows,
"javascript-can-access-clipboard", &javaScriptCanAccessClipboard,
"enable-offline-web-application-cache", &enableOfflineWebAppCache,
"editing-behavior", &editingBehavior,
"enable-universal-access-from-file-uris", &enableUniversalAccessFromFileURI,
"enable-file-access-from-file-uris", &enableFileAccessFromFileURI,
"enable-dom-paste", &enableDOMPaste,
"tab-key-cycles-through-elements", &tabKeyCyclesThroughElements,
"enable-site-specific-quirks", &enableSiteSpecificQuirks,
"enable-page-cache", &usePageCache,
"enable-java-applet", &enableJavaApplet,
"enable-hyperlink-auditing", &enableHyperlinkAuditing,
"enable-spell-checking", &enableSpellChecking,
"spell-checking-languages", &defaultSpellCheckingLanguages,
"enable-fullscreen", &enableFullscreen,
"enable-dns-prefetching", &enableDNSPrefetching,
"enable-webgl", &enableWebGL,
NULL);
settings->setDefaultTextEncodingName(defaultEncoding);
settings->setCursiveFontFamily(cursiveFontFamily);
settings->setStandardFontFamily(defaultFontFamily);
settings->setFantasyFontFamily(fantasyFontFamily);
settings->setFixedFontFamily(monospaceFontFamily);
settings->setSansSerifFontFamily(sansSerifFontFamily);
settings->setSerifFontFamily(serifFontFamily);
settings->setLoadsImagesAutomatically(autoLoadImages);
settings->setShrinksStandaloneImagesToFit(autoShrinkImages);
settings->setShouldPrintBackgrounds(printBackgrounds);
settings->setJavaScriptEnabled(enableScripts);
settings->setPluginsEnabled(enablePlugins);
settings->setTextAreasAreResizable(resizableTextAreas);
settings->setUserStyleSheetLocation(KURL(KURL(), userStylesheetUri));
settings->setDeveloperExtrasEnabled(enableDeveloperExtras);
settings->setPrivateBrowsingEnabled(enablePrivateBrowsing);
settings->setCaretBrowsingEnabled(enableCaretBrowsing);
#if ENABLE(DATABASE)
AbstractDatabase::setIsAvailable(enableHTML5Database);
#endif
settings->setLocalStorageEnabled(enableHTML5LocalStorage);
settings->setXSSAuditorEnabled(enableXSSAuditor);
settings->setSpatialNavigationEnabled(enableSpatialNavigation);
settings->setFrameFlatteningEnabled(enableFrameFlattening);
settings->setJavaScriptCanOpenWindowsAutomatically(javascriptCanOpenWindows);
settings->setJavaScriptCanAccessClipboard(javaScriptCanAccessClipboard);
settings->setOfflineWebApplicationCacheEnabled(enableOfflineWebAppCache);
settings->setEditingBehaviorType(static_cast<WebCore::EditingBehaviorType>(editingBehavior));
settings->setAllowUniversalAccessFromFileURLs(enableUniversalAccessFromFileURI);
settings->setAllowFileAccessFromFileURLs(enableFileAccessFromFileURI);
settings->setDOMPasteAllowed(enableDOMPaste);
settings->setNeedsSiteSpecificQuirks(enableSiteSpecificQuirks);
settings->setUsesPageCache(usePageCache);
settings->setJavaEnabled(enableJavaApplet);
settings->setHyperlinkAuditingEnabled(enableHyperlinkAuditing);
settings->setDNSPrefetchingEnabled(enableDNSPrefetching);
#if ENABLE(FULLSCREEN_API)
settings->setFullScreenEnabled(enableFullscreen);
#endif
#if ENABLE(SPELLCHECK)
if (enableSpellChecking) {
WebKit::EditorClient* client = static_cast<WebKit::EditorClient*>(core(webView)->editorClient());
static_cast<WebKit::TextCheckerClientEnchant*>(client->textChecker())->updateSpellCheckingLanguage(defaultSpellCheckingLanguages);
}
#endif
#if ENABLE(WEBGL)
settings->setWebGLEnabled(enableWebGL);
#endif
Page* page = core(webView);
if (page)
page->setTabKeyCyclesThroughElements(tabKeyCyclesThroughElements);
g_free(defaultEncoding);
g_free(cursiveFontFamily);
g_free(defaultFontFamily);
g_free(fantasyFontFamily);
g_free(monospaceFontFamily);
g_free(sansSerifFontFamily);
g_free(serifFontFamily);
g_free(userStylesheetUri);
webkit_web_view_screen_changed(GTK_WIDGET(webView), NULL);
}
| 19,753 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: int cdc_parse_cdc_header(struct usb_cdc_parsed_header *hdr,
struct usb_interface *intf,
u8 *buffer,
int buflen)
{
/* duplicates are ignored */
struct usb_cdc_union_desc *union_header = NULL;
/* duplicates are not tolerated */
struct usb_cdc_header_desc *header = NULL;
struct usb_cdc_ether_desc *ether = NULL;
struct usb_cdc_mdlm_detail_desc *detail = NULL;
struct usb_cdc_mdlm_desc *desc = NULL;
unsigned int elength;
int cnt = 0;
memset(hdr, 0x00, sizeof(struct usb_cdc_parsed_header));
hdr->phonet_magic_present = false;
while (buflen > 0) {
elength = buffer[0];
if (!elength) {
dev_err(&intf->dev, "skipping garbage byte\n");
elength = 1;
goto next_desc;
}
if (buffer[1] != USB_DT_CS_INTERFACE) {
dev_err(&intf->dev, "skipping garbage\n");
goto next_desc;
}
switch (buffer[2]) {
case USB_CDC_UNION_TYPE: /* we've found it */
if (elength < sizeof(struct usb_cdc_union_desc))
goto next_desc;
if (union_header) {
dev_err(&intf->dev, "More than one union descriptor, skipping ...\n");
goto next_desc;
}
union_header = (struct usb_cdc_union_desc *)buffer;
break;
case USB_CDC_COUNTRY_TYPE:
if (elength < sizeof(struct usb_cdc_country_functional_desc))
goto next_desc;
hdr->usb_cdc_country_functional_desc =
(struct usb_cdc_country_functional_desc *)buffer;
break;
case USB_CDC_HEADER_TYPE:
if (elength != sizeof(struct usb_cdc_header_desc))
goto next_desc;
if (header)
return -EINVAL;
header = (struct usb_cdc_header_desc *)buffer;
break;
case USB_CDC_ACM_TYPE:
if (elength < sizeof(struct usb_cdc_acm_descriptor))
goto next_desc;
hdr->usb_cdc_acm_descriptor =
(struct usb_cdc_acm_descriptor *)buffer;
break;
case USB_CDC_ETHERNET_TYPE:
if (elength != sizeof(struct usb_cdc_ether_desc))
goto next_desc;
if (ether)
return -EINVAL;
ether = (struct usb_cdc_ether_desc *)buffer;
break;
case USB_CDC_CALL_MANAGEMENT_TYPE:
if (elength < sizeof(struct usb_cdc_call_mgmt_descriptor))
goto next_desc;
hdr->usb_cdc_call_mgmt_descriptor =
(struct usb_cdc_call_mgmt_descriptor *)buffer;
break;
case USB_CDC_DMM_TYPE:
if (elength < sizeof(struct usb_cdc_dmm_desc))
goto next_desc;
hdr->usb_cdc_dmm_desc =
(struct usb_cdc_dmm_desc *)buffer;
break;
case USB_CDC_MDLM_TYPE:
if (elength < sizeof(struct usb_cdc_mdlm_desc *))
goto next_desc;
if (desc)
return -EINVAL;
desc = (struct usb_cdc_mdlm_desc *)buffer;
break;
case USB_CDC_MDLM_DETAIL_TYPE:
if (elength < sizeof(struct usb_cdc_mdlm_detail_desc *))
goto next_desc;
if (detail)
return -EINVAL;
detail = (struct usb_cdc_mdlm_detail_desc *)buffer;
break;
case USB_CDC_NCM_TYPE:
if (elength < sizeof(struct usb_cdc_ncm_desc))
goto next_desc;
hdr->usb_cdc_ncm_desc = (struct usb_cdc_ncm_desc *)buffer;
break;
case USB_CDC_MBIM_TYPE:
if (elength < sizeof(struct usb_cdc_mbim_desc))
goto next_desc;
hdr->usb_cdc_mbim_desc = (struct usb_cdc_mbim_desc *)buffer;
break;
case USB_CDC_MBIM_EXTENDED_TYPE:
if (elength < sizeof(struct usb_cdc_mbim_extended_desc))
break;
hdr->usb_cdc_mbim_extended_desc =
(struct usb_cdc_mbim_extended_desc *)buffer;
break;
case CDC_PHONET_MAGIC_NUMBER:
hdr->phonet_magic_present = true;
break;
default:
/*
* there are LOTS more CDC descriptors that
* could legitimately be found here.
*/
dev_dbg(&intf->dev, "Ignoring descriptor: type %02x, length %ud\n",
buffer[2], elength);
goto next_desc;
}
cnt++;
next_desc:
buflen -= elength;
buffer += elength;
}
hdr->usb_cdc_union_desc = union_header;
hdr->usb_cdc_header_desc = header;
hdr->usb_cdc_mdlm_detail_desc = detail;
hdr->usb_cdc_mdlm_desc = desc;
hdr->usb_cdc_ether_desc = ether;
return cnt;
}
Commit Message: USB: core: harden cdc_parse_cdc_header
Andrey Konovalov reported a possible out-of-bounds problem for the
cdc_parse_cdc_header function. He writes:
It looks like cdc_parse_cdc_header() doesn't validate buflen
before accessing buffer[1], buffer[2] and so on. The only check
present is while (buflen > 0).
So fix this issue up by properly validating the buffer length matches
what the descriptor says it is.
Reported-by: Andrey Konovalov <[email protected]>
Tested-by: Andrey Konovalov <[email protected]>
Cc: stable <[email protected]>
Signed-off-by: Greg Kroah-Hartman <[email protected]>
CWE ID: CWE-119 | int cdc_parse_cdc_header(struct usb_cdc_parsed_header *hdr,
struct usb_interface *intf,
u8 *buffer,
int buflen)
{
/* duplicates are ignored */
struct usb_cdc_union_desc *union_header = NULL;
/* duplicates are not tolerated */
struct usb_cdc_header_desc *header = NULL;
struct usb_cdc_ether_desc *ether = NULL;
struct usb_cdc_mdlm_detail_desc *detail = NULL;
struct usb_cdc_mdlm_desc *desc = NULL;
unsigned int elength;
int cnt = 0;
memset(hdr, 0x00, sizeof(struct usb_cdc_parsed_header));
hdr->phonet_magic_present = false;
while (buflen > 0) {
elength = buffer[0];
if (!elength) {
dev_err(&intf->dev, "skipping garbage byte\n");
elength = 1;
goto next_desc;
}
if ((buflen < elength) || (elength < 3)) {
dev_err(&intf->dev, "invalid descriptor buffer length\n");
break;
}
if (buffer[1] != USB_DT_CS_INTERFACE) {
dev_err(&intf->dev, "skipping garbage\n");
goto next_desc;
}
switch (buffer[2]) {
case USB_CDC_UNION_TYPE: /* we've found it */
if (elength < sizeof(struct usb_cdc_union_desc))
goto next_desc;
if (union_header) {
dev_err(&intf->dev, "More than one union descriptor, skipping ...\n");
goto next_desc;
}
union_header = (struct usb_cdc_union_desc *)buffer;
break;
case USB_CDC_COUNTRY_TYPE:
if (elength < sizeof(struct usb_cdc_country_functional_desc))
goto next_desc;
hdr->usb_cdc_country_functional_desc =
(struct usb_cdc_country_functional_desc *)buffer;
break;
case USB_CDC_HEADER_TYPE:
if (elength != sizeof(struct usb_cdc_header_desc))
goto next_desc;
if (header)
return -EINVAL;
header = (struct usb_cdc_header_desc *)buffer;
break;
case USB_CDC_ACM_TYPE:
if (elength < sizeof(struct usb_cdc_acm_descriptor))
goto next_desc;
hdr->usb_cdc_acm_descriptor =
(struct usb_cdc_acm_descriptor *)buffer;
break;
case USB_CDC_ETHERNET_TYPE:
if (elength != sizeof(struct usb_cdc_ether_desc))
goto next_desc;
if (ether)
return -EINVAL;
ether = (struct usb_cdc_ether_desc *)buffer;
break;
case USB_CDC_CALL_MANAGEMENT_TYPE:
if (elength < sizeof(struct usb_cdc_call_mgmt_descriptor))
goto next_desc;
hdr->usb_cdc_call_mgmt_descriptor =
(struct usb_cdc_call_mgmt_descriptor *)buffer;
break;
case USB_CDC_DMM_TYPE:
if (elength < sizeof(struct usb_cdc_dmm_desc))
goto next_desc;
hdr->usb_cdc_dmm_desc =
(struct usb_cdc_dmm_desc *)buffer;
break;
case USB_CDC_MDLM_TYPE:
if (elength < sizeof(struct usb_cdc_mdlm_desc *))
goto next_desc;
if (desc)
return -EINVAL;
desc = (struct usb_cdc_mdlm_desc *)buffer;
break;
case USB_CDC_MDLM_DETAIL_TYPE:
if (elength < sizeof(struct usb_cdc_mdlm_detail_desc *))
goto next_desc;
if (detail)
return -EINVAL;
detail = (struct usb_cdc_mdlm_detail_desc *)buffer;
break;
case USB_CDC_NCM_TYPE:
if (elength < sizeof(struct usb_cdc_ncm_desc))
goto next_desc;
hdr->usb_cdc_ncm_desc = (struct usb_cdc_ncm_desc *)buffer;
break;
case USB_CDC_MBIM_TYPE:
if (elength < sizeof(struct usb_cdc_mbim_desc))
goto next_desc;
hdr->usb_cdc_mbim_desc = (struct usb_cdc_mbim_desc *)buffer;
break;
case USB_CDC_MBIM_EXTENDED_TYPE:
if (elength < sizeof(struct usb_cdc_mbim_extended_desc))
break;
hdr->usb_cdc_mbim_extended_desc =
(struct usb_cdc_mbim_extended_desc *)buffer;
break;
case CDC_PHONET_MAGIC_NUMBER:
hdr->phonet_magic_present = true;
break;
default:
/*
* there are LOTS more CDC descriptors that
* could legitimately be found here.
*/
dev_dbg(&intf->dev, "Ignoring descriptor: type %02x, length %ud\n",
buffer[2], elength);
goto next_desc;
}
cnt++;
next_desc:
buflen -= elength;
buffer += elength;
}
hdr->usb_cdc_union_desc = union_header;
hdr->usb_cdc_header_desc = header;
hdr->usb_cdc_mdlm_detail_desc = detail;
hdr->usb_cdc_mdlm_desc = desc;
hdr->usb_cdc_ether_desc = ether;
return cnt;
}
| 19,615 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: bool GDataRootDirectory::ParseFromString(const std::string& serialized_proto) {
scoped_ptr<GDataRootDirectoryProto> proto(
new GDataRootDirectoryProto());
bool ok = proto->ParseFromString(serialized_proto);
if (ok) {
const std::string& title = proto->gdata_directory().gdata_entry().title();
if (title != "drive") {
LOG(ERROR) << "Incompatible proto detected: " << title;
return false;
}
FromProto(*proto.get());
set_origin(FROM_CACHE);
set_refresh_time(base::Time::Now());
}
return ok;
}
Commit Message: gdata: Define the resource ID for the root directory
Per the spec, the resource ID for the root directory is defined
as "folder:root". Add the resource ID to the root directory in our
file system representation so we can look up the root directory by
the resource ID.
BUG=127697
TEST=add unit tests
Review URL: https://chromiumcodereview.appspot.com/10332253
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@137928 0039d316-1c4b-4281-b951-d872f2087c98
CWE ID: | bool GDataRootDirectory::ParseFromString(const std::string& serialized_proto) {
scoped_ptr<GDataRootDirectoryProto> proto(
new GDataRootDirectoryProto());
bool ok = proto->ParseFromString(serialized_proto);
if (ok) {
const GDataEntryProto& entry_proto =
proto->gdata_directory().gdata_entry();
if (entry_proto.title() != "drive") {
LOG(ERROR) << "Incompatible proto detected (bad title): "
<< entry_proto.title();
return false;
}
// The title field for the root directory was originally empty. Discard
// the proto data if the older format is detected.
if (entry_proto.resource_id() != kGDataRootDirectoryResourceId) {
LOG(ERROR) << "Incompatible proto detected (bad resource ID): "
<< entry_proto.resource_id();
return false;
}
FromProto(*proto.get());
set_origin(FROM_CACHE);
set_refresh_time(base::Time::Now());
}
return ok;
}
| 28,492 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: bool SampleTable::isValid() const {
return mChunkOffsetOffset >= 0
&& mSampleToChunkOffset >= 0
&& mSampleSizeOffset >= 0
&& !mTimeToSample.empty();
}
Commit Message: SampleTable.cpp: Fixed a regression caused by a fix for bug
28076789.
Detail: Before the original fix
(Id207f369ab7b27787d83f5d8fc48dc53ed9fcdc9) for 28076789, the
code allowed a time-to-sample table size to be 0. The change
made in that fix disallowed such situation, which in fact should
be allowed. This current patch allows it again while maintaining
the security of the previous fix.
Bug: 28288202
Bug: 28076789
Change-Id: I1c9a60c7f0cfcbd3d908f24998dde15d5136a295
CWE ID: CWE-20 | bool SampleTable::isValid() const {
return mChunkOffsetOffset >= 0
&& mSampleToChunkOffset >= 0
&& mSampleSizeOffset >= 0
&& mHasTimeToSample;
}
| 13,572 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: RenderFrameObserverNatives::RenderFrameObserverNatives(ScriptContext* context)
: ObjectBackedNativeHandler(context), weak_ptr_factory_(this) {
RouteFunction(
"OnDocumentElementCreated",
base::Bind(&RenderFrameObserverNatives::OnDocumentElementCreated,
base::Unretained(this)));
}
Commit Message: [Extensions] Expand bindings access checks
BUG=601149
BUG=601073
Review URL: https://codereview.chromium.org/1866103002
Cr-Commit-Position: refs/heads/master@{#387710}
CWE ID: CWE-284 | RenderFrameObserverNatives::RenderFrameObserverNatives(ScriptContext* context)
: ObjectBackedNativeHandler(context), weak_ptr_factory_(this) {
RouteFunction(
"OnDocumentElementCreated", "app.window",
base::Bind(&RenderFrameObserverNatives::OnDocumentElementCreated,
base::Unretained(this)));
}
| 14,997 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: static int ipx_recvmsg(struct kiocb *iocb, struct socket *sock,
struct msghdr *msg, size_t size, int flags)
{
struct sock *sk = sock->sk;
struct ipx_sock *ipxs = ipx_sk(sk);
struct sockaddr_ipx *sipx = (struct sockaddr_ipx *)msg->msg_name;
struct ipxhdr *ipx = NULL;
struct sk_buff *skb;
int copied, rc;
lock_sock(sk);
/* put the autobinding in */
if (!ipxs->port) {
struct sockaddr_ipx uaddr;
uaddr.sipx_port = 0;
uaddr.sipx_network = 0;
#ifdef CONFIG_IPX_INTERN
rc = -ENETDOWN;
if (!ipxs->intrfc)
goto out; /* Someone zonked the iface */
memcpy(uaddr.sipx_node, ipxs->intrfc->if_node, IPX_NODE_LEN);
#endif /* CONFIG_IPX_INTERN */
rc = __ipx_bind(sock, (struct sockaddr *)&uaddr,
sizeof(struct sockaddr_ipx));
if (rc)
goto out;
}
rc = -ENOTCONN;
if (sock_flag(sk, SOCK_ZAPPED))
goto out;
skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT,
flags & MSG_DONTWAIT, &rc);
if (!skb)
goto out;
ipx = ipx_hdr(skb);
copied = ntohs(ipx->ipx_pktsize) - sizeof(struct ipxhdr);
if (copied > size) {
copied = size;
msg->msg_flags |= MSG_TRUNC;
}
rc = skb_copy_datagram_iovec(skb, sizeof(struct ipxhdr), msg->msg_iov,
copied);
if (rc)
goto out_free;
if (skb->tstamp.tv64)
sk->sk_stamp = skb->tstamp;
msg->msg_namelen = sizeof(*sipx);
if (sipx) {
sipx->sipx_family = AF_IPX;
sipx->sipx_port = ipx->ipx_source.sock;
memcpy(sipx->sipx_node, ipx->ipx_source.node, IPX_NODE_LEN);
sipx->sipx_network = IPX_SKB_CB(skb)->ipx_source_net;
sipx->sipx_type = ipx->ipx_type;
sipx->sipx_zero = 0;
}
rc = copied;
out_free:
skb_free_datagram(sk, skb);
out:
release_sock(sk);
return rc;
}
Commit Message: net: rework recvmsg handler msg_name and msg_namelen logic
This patch now always passes msg->msg_namelen as 0. recvmsg handlers must
set msg_namelen to the proper size <= sizeof(struct sockaddr_storage)
to return msg_name to the user.
This prevents numerous uninitialized memory leaks we had in the
recvmsg handlers and makes it harder for new code to accidentally leak
uninitialized memory.
Optimize for the case recvfrom is called with NULL as address. We don't
need to copy the address at all, so set it to NULL before invoking the
recvmsg handler. We can do so, because all the recvmsg handlers must
cope with the case a plain read() is called on them. read() also sets
msg_name to NULL.
Also document these changes in include/linux/net.h as suggested by David
Miller.
Changes since RFC:
Set msg->msg_name = NULL if user specified a NULL in msg_name but had a
non-null msg_namelen in verify_iovec/verify_compat_iovec. This doesn't
affect sendto as it would bail out earlier while trying to copy-in the
address. It also more naturally reflects the logic by the callers of
verify_iovec.
With this change in place I could remove "
if (!uaddr || msg_sys->msg_namelen == 0)
msg->msg_name = NULL
".
This change does not alter the user visible error logic as we ignore
msg_namelen as long as msg_name is NULL.
Also remove two unnecessary curly brackets in ___sys_recvmsg and change
comments to netdev style.
Cc: David Miller <[email protected]>
Suggested-by: Eric Dumazet <[email protected]>
Signed-off-by: Hannes Frederic Sowa <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
CWE ID: CWE-20 | static int ipx_recvmsg(struct kiocb *iocb, struct socket *sock,
struct msghdr *msg, size_t size, int flags)
{
struct sock *sk = sock->sk;
struct ipx_sock *ipxs = ipx_sk(sk);
struct sockaddr_ipx *sipx = (struct sockaddr_ipx *)msg->msg_name;
struct ipxhdr *ipx = NULL;
struct sk_buff *skb;
int copied, rc;
lock_sock(sk);
/* put the autobinding in */
if (!ipxs->port) {
struct sockaddr_ipx uaddr;
uaddr.sipx_port = 0;
uaddr.sipx_network = 0;
#ifdef CONFIG_IPX_INTERN
rc = -ENETDOWN;
if (!ipxs->intrfc)
goto out; /* Someone zonked the iface */
memcpy(uaddr.sipx_node, ipxs->intrfc->if_node, IPX_NODE_LEN);
#endif /* CONFIG_IPX_INTERN */
rc = __ipx_bind(sock, (struct sockaddr *)&uaddr,
sizeof(struct sockaddr_ipx));
if (rc)
goto out;
}
rc = -ENOTCONN;
if (sock_flag(sk, SOCK_ZAPPED))
goto out;
skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT,
flags & MSG_DONTWAIT, &rc);
if (!skb)
goto out;
ipx = ipx_hdr(skb);
copied = ntohs(ipx->ipx_pktsize) - sizeof(struct ipxhdr);
if (copied > size) {
copied = size;
msg->msg_flags |= MSG_TRUNC;
}
rc = skb_copy_datagram_iovec(skb, sizeof(struct ipxhdr), msg->msg_iov,
copied);
if (rc)
goto out_free;
if (skb->tstamp.tv64)
sk->sk_stamp = skb->tstamp;
if (sipx) {
sipx->sipx_family = AF_IPX;
sipx->sipx_port = ipx->ipx_source.sock;
memcpy(sipx->sipx_node, ipx->ipx_source.node, IPX_NODE_LEN);
sipx->sipx_network = IPX_SKB_CB(skb)->ipx_source_net;
sipx->sipx_type = ipx->ipx_type;
sipx->sipx_zero = 0;
msg->msg_namelen = sizeof(*sipx);
}
rc = copied;
out_free:
skb_free_datagram(sk, skb);
out:
release_sock(sk);
return rc;
}
| 2,955 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: static inline int map_from_unicode(unsigned code, enum entity_charset charset, unsigned *res)
{
unsigned char found;
const uni_to_enc *table;
size_t table_size;
switch (charset) {
case cs_8859_1:
/* identity mapping of code points to unicode */
if (code > 0xFF) {
return FAILURE;
}
*res = code;
break;
case cs_8859_5:
if (code <= 0xA0 || code == 0xAD /* soft hyphen */) {
*res = code;
} else if (code == 0x2116) {
*res = 0xF0; /* numero sign */
} else if (code == 0xA7) {
*res = 0xFD; /* section sign */
} else if (code >= 0x0401 && code <= 0x044F) {
if (code == 0x040D || code == 0x0450 || code == 0x045D)
return FAILURE;
*res = code - 0x360;
} else {
return FAILURE;
}
break;
case cs_8859_15:
if (code < 0xA4 || (code > 0xBE && code <= 0xFF)) {
*res = code;
} else { /* between A4 and 0xBE */
found = unimap_bsearch(unimap_iso885915,
code, sizeof(unimap_iso885915) / sizeof(*unimap_iso885915));
if (found)
*res = found;
else
return FAILURE;
}
break;
case cs_cp1252:
if (code <= 0x7F || (code >= 0xA0 && code <= 0xFF)) {
*res = code;
} else {
found = unimap_bsearch(unimap_win1252,
code, sizeof(unimap_win1252) / sizeof(*unimap_win1252));
if (found)
*res = found;
else
return FAILURE;
}
break;
case cs_macroman:
if (code == 0x7F)
return FAILURE;
table = unimap_macroman;
table_size = sizeof(unimap_macroman) / sizeof(*unimap_macroman);
goto table_over_7F;
case cs_cp1251:
table = unimap_win1251;
table_size = sizeof(unimap_win1251) / sizeof(*unimap_win1251);
goto table_over_7F;
case cs_koi8r:
table = unimap_koi8r;
table_size = sizeof(unimap_koi8r) / sizeof(*unimap_koi8r);
goto table_over_7F;
case cs_cp866:
table = unimap_cp866;
table_size = sizeof(unimap_cp866) / sizeof(*unimap_cp866);
table_over_7F:
if (code <= 0x7F) {
*res = code;
} else {
found = unimap_bsearch(table, code, table_size);
if (found)
*res = found;
else
return FAILURE;
}
break;
/* from here on, only map the possible characters in the ASCII range.
* to improve support here, it's a matter of building the unicode mappings.
* See <http://www.unicode.org/Public/6.0.0/ucd/Unihan.zip> */
case cs_sjis:
case cs_eucjp:
/* we interpret 0x5C as the Yen symbol. This is not universal.
* See <http://www.w3.org/Submission/japanese-xml/#ambiguity_of_yen> */
if (code >= 0x20 && code <= 0x7D) {
if (code == 0x5C)
return FAILURE;
*res = code;
} else {
return FAILURE;
}
break;
case cs_big5:
case cs_big5hkscs:
case cs_gb2312:
if (code >= 0x20 && code <= 0x7D) {
*res = code;
} else {
return FAILURE;
}
break;
default:
return FAILURE;
}
return SUCCESS;
}
Commit Message: Fix bug #72135 - don't create strings with lengths outside int range
CWE ID: CWE-190 | static inline int map_from_unicode(unsigned code, enum entity_charset charset, unsigned *res)
{
unsigned char found;
const uni_to_enc *table;
size_t table_size;
switch (charset) {
case cs_8859_1:
/* identity mapping of code points to unicode */
if (code > 0xFF) {
return FAILURE;
}
*res = code;
break;
case cs_8859_5:
if (code <= 0xA0 || code == 0xAD /* soft hyphen */) {
*res = code;
} else if (code == 0x2116) {
*res = 0xF0; /* numero sign */
} else if (code == 0xA7) {
*res = 0xFD; /* section sign */
} else if (code >= 0x0401 && code <= 0x044F) {
if (code == 0x040D || code == 0x0450 || code == 0x045D)
return FAILURE;
*res = code - 0x360;
} else {
return FAILURE;
}
break;
case cs_8859_15:
if (code < 0xA4 || (code > 0xBE && code <= 0xFF)) {
*res = code;
} else { /* between A4 and 0xBE */
found = unimap_bsearch(unimap_iso885915,
code, sizeof(unimap_iso885915) / sizeof(*unimap_iso885915));
if (found)
*res = found;
else
return FAILURE;
}
break;
case cs_cp1252:
if (code <= 0x7F || (code >= 0xA0 && code <= 0xFF)) {
*res = code;
} else {
found = unimap_bsearch(unimap_win1252,
code, sizeof(unimap_win1252) / sizeof(*unimap_win1252));
if (found)
*res = found;
else
return FAILURE;
}
break;
case cs_macroman:
if (code == 0x7F)
return FAILURE;
table = unimap_macroman;
table_size = sizeof(unimap_macroman) / sizeof(*unimap_macroman);
goto table_over_7F;
case cs_cp1251:
table = unimap_win1251;
table_size = sizeof(unimap_win1251) / sizeof(*unimap_win1251);
goto table_over_7F;
case cs_koi8r:
table = unimap_koi8r;
table_size = sizeof(unimap_koi8r) / sizeof(*unimap_koi8r);
goto table_over_7F;
case cs_cp866:
table = unimap_cp866;
table_size = sizeof(unimap_cp866) / sizeof(*unimap_cp866);
table_over_7F:
if (code <= 0x7F) {
*res = code;
} else {
found = unimap_bsearch(table, code, table_size);
if (found)
*res = found;
else
return FAILURE;
}
break;
/* from here on, only map the possible characters in the ASCII range.
* to improve support here, it's a matter of building the unicode mappings.
* See <http://www.unicode.org/Public/6.0.0/ucd/Unihan.zip> */
case cs_sjis:
case cs_eucjp:
/* we interpret 0x5C as the Yen symbol. This is not universal.
* See <http://www.w3.org/Submission/japanese-xml/#ambiguity_of_yen> */
if (code >= 0x20 && code <= 0x7D) {
if (code == 0x5C)
return FAILURE;
*res = code;
} else {
return FAILURE;
}
break;
case cs_big5:
case cs_big5hkscs:
case cs_gb2312:
if (code >= 0x20 && code <= 0x7D) {
*res = code;
} else {
return FAILURE;
}
break;
default:
return FAILURE;
}
return SUCCESS;
}
| 24,476 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: OMXNodeInstance::OMXNodeInstance(
OMX *owner, const sp<IOMXObserver> &observer, const char *name)
: mOwner(owner),
mNodeID(0),
mHandle(NULL),
mObserver(observer),
mDying(false),
mBufferIDCount(0)
{
mName = ADebug::GetDebugName(name);
DEBUG = ADebug::GetDebugLevelFromProperty(name, "debug.stagefright.omx-debug");
ALOGV("debug level for %s is %d", name, DEBUG);
DEBUG_BUMP = DEBUG;
mNumPortBuffers[0] = 0;
mNumPortBuffers[1] = 0;
mDebugLevelBumpPendingBuffers[0] = 0;
mDebugLevelBumpPendingBuffers[1] = 0;
mMetadataType[0] = kMetadataBufferTypeInvalid;
mMetadataType[1] = kMetadataBufferTypeInvalid;
mSecureBufferType[0] = kSecureBufferTypeUnknown;
mSecureBufferType[1] = kSecureBufferTypeUnknown;
mIsSecure = AString(name).endsWith(".secure");
}
Commit Message: DO NOT MERGE: IOMX: work against metadata buffer spoofing
- Prohibit direct set/getParam/Settings for extensions meant for
OMXNodeInstance alone. This disallows enabling metadata mode
without the knowledge of OMXNodeInstance.
- Use a backup buffer for metadata mode buffers and do not directly
share with clients.
- Disallow setting up metadata mode/tunneling/input surface
after first sendCommand.
- Disallow store-meta for input cross process.
- Disallow emptyBuffer for surface input (via IOMX).
- Fix checking for input surface.
Bug: 29422020
Change-Id: I801c77b80e703903f62e42d76fd2e76a34e4bc8e
(cherry picked from commit 7c3c2fa3e233c656fc8c2fc2a6634b3ecf8a23e8)
CWE ID: CWE-200 | OMXNodeInstance::OMXNodeInstance(
OMX *owner, const sp<IOMXObserver> &observer, const char *name)
: mOwner(owner),
mNodeID(0),
mHandle(NULL),
mObserver(observer),
mDying(false),
mSailed(false),
mQueriedProhibitedExtensions(false),
mBufferIDCount(0)
{
mName = ADebug::GetDebugName(name);
DEBUG = ADebug::GetDebugLevelFromProperty(name, "debug.stagefright.omx-debug");
ALOGV("debug level for %s is %d", name, DEBUG);
DEBUG_BUMP = DEBUG;
mNumPortBuffers[0] = 0;
mNumPortBuffers[1] = 0;
mDebugLevelBumpPendingBuffers[0] = 0;
mDebugLevelBumpPendingBuffers[1] = 0;
mMetadataType[0] = kMetadataBufferTypeInvalid;
mMetadataType[1] = kMetadataBufferTypeInvalid;
mSecureBufferType[0] = kSecureBufferTypeUnknown;
mSecureBufferType[1] = kSecureBufferTypeUnknown;
mIsSecure = AString(name).endsWith(".secure");
}
| 5,209 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: const Cluster* Segment::GetLast() const
{
if ((m_clusters == NULL) || (m_clusterCount <= 0))
return &m_eos;
const long idx = m_clusterCount - 1;
Cluster* const pCluster = m_clusters[idx];
assert(pCluster);
return pCluster;
}
Commit Message: libwebm: Pull from upstream
Rolling mkvparser from upstream. Primarily for fixing a bug on parsing
failures with certain Opus WebM files.
Upstream commit hash of this pull: 574045edd4ecbeb802ee3f1d214b5510269852ae
The diff is so huge because there were some style clean ups upstream.
But it was ensured that there were no breaking changes when the style
clean ups was done upstream.
Change-Id: Ib6e907175484b4b0ae1b55ab39522ea3188ad039
CWE ID: CWE-119 | const Cluster* Segment::GetLast() const
const long idx = m_clusterCount - 1;
Cluster* const pCluster = m_clusters[idx];
assert(pCluster);
return pCluster;
}
| 29,788 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: int usb_cypress_load_firmware(struct usb_device *udev, const struct firmware *fw, int type)
{
struct hexline *hx;
u8 reset;
int ret,pos=0;
hx = kmalloc(sizeof(*hx), GFP_KERNEL);
if (!hx)
return -ENOMEM;
/* stop the CPU */
reset = 1;
if ((ret = usb_cypress_writemem(udev,cypress[type].cpu_cs_register,&reset,1)) != 1)
err("could not stop the USB controller CPU.");
while ((ret = dvb_usb_get_hexline(fw, hx, &pos)) > 0) {
deb_fw("writing to address 0x%04x (buffer: 0x%02x %02x)\n", hx->addr, hx->len, hx->chk);
ret = usb_cypress_writemem(udev, hx->addr, hx->data, hx->len);
if (ret != hx->len) {
err("error while transferring firmware (transferred size: %d, block size: %d)",
ret, hx->len);
ret = -EINVAL;
break;
}
}
if (ret < 0) {
err("firmware download failed at %d with %d",pos,ret);
kfree(hx);
return ret;
}
if (ret == 0) {
/* restart the CPU */
reset = 0;
if (ret || usb_cypress_writemem(udev,cypress[type].cpu_cs_register,&reset,1) != 1) {
err("could not restart the USB controller CPU.");
ret = -EINVAL;
}
} else
ret = -EIO;
kfree(hx);
return ret;
}
Commit Message: [media] dvb-usb-firmware: don't do DMA on stack
The buffer allocation for the firmware data was changed in
commit 43fab9793c1f ("[media] dvb-usb: don't use stack for firmware load")
but the same applies for the reset value.
Fixes: 43fab9793c1f ("[media] dvb-usb: don't use stack for firmware load")
Cc: [email protected]
Signed-off-by: Stefan Brüns <[email protected]>
Signed-off-by: Mauro Carvalho Chehab <[email protected]>
CWE ID: CWE-119 | int usb_cypress_load_firmware(struct usb_device *udev, const struct firmware *fw, int type)
{
struct hexline *hx;
u8 *buf;
int ret, pos = 0;
u16 cpu_cs_register = cypress[type].cpu_cs_register;
buf = kmalloc(sizeof(*hx), GFP_KERNEL);
if (!buf)
return -ENOMEM;
hx = (struct hexline *)buf;
/* stop the CPU */
buf[0] = 1;
if (usb_cypress_writemem(udev, cpu_cs_register, buf, 1) != 1)
err("could not stop the USB controller CPU.");
while ((ret = dvb_usb_get_hexline(fw, hx, &pos)) > 0) {
deb_fw("writing to address 0x%04x (buffer: 0x%02x %02x)\n", hx->addr, hx->len, hx->chk);
ret = usb_cypress_writemem(udev, hx->addr, hx->data, hx->len);
if (ret != hx->len) {
err("error while transferring firmware (transferred size: %d, block size: %d)",
ret, hx->len);
ret = -EINVAL;
break;
}
}
if (ret < 0) {
err("firmware download failed at %d with %d",pos,ret);
kfree(buf);
return ret;
}
if (ret == 0) {
/* restart the CPU */
buf[0] = 0;
if (usb_cypress_writemem(udev, cpu_cs_register, buf, 1) != 1) {
err("could not restart the USB controller CPU.");
ret = -EINVAL;
}
} else
ret = -EIO;
kfree(buf);
return ret;
}
| 826 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: get_uncompressed_data(struct archive_read *a, const void **buff, size_t size,
size_t minimum)
{
struct _7zip *zip = (struct _7zip *)a->format->data;
ssize_t bytes_avail;
if (zip->codec == _7Z_COPY && zip->codec2 == (unsigned long)-1) {
/* Copy mode. */
/*
* Note: '1' here is a performance optimization.
* Recall that the decompression layer returns a count of
* available bytes; asking for more than that forces the
* decompressor to combine reads by copying data.
*/
*buff = __archive_read_ahead(a, 1, &bytes_avail);
if (bytes_avail <= 0) {
archive_set_error(&a->archive,
ARCHIVE_ERRNO_FILE_FORMAT,
"Truncated 7-Zip file data");
return (ARCHIVE_FATAL);
}
if ((size_t)bytes_avail >
zip->uncompressed_buffer_bytes_remaining)
bytes_avail = (ssize_t)
zip->uncompressed_buffer_bytes_remaining;
if ((size_t)bytes_avail > size)
bytes_avail = (ssize_t)size;
zip->pack_stream_bytes_unconsumed = bytes_avail;
} else if (zip->uncompressed_buffer_pointer == NULL) {
/* Decompression has failed. */
archive_set_error(&(a->archive),
ARCHIVE_ERRNO_MISC, "Damaged 7-Zip archive");
return (ARCHIVE_FATAL);
} else {
/* Packed mode. */
if (minimum > zip->uncompressed_buffer_bytes_remaining) {
/*
* If remaining uncompressed data size is less than
* the minimum size, fill the buffer up to the
* minimum size.
*/
if (extract_pack_stream(a, minimum) < 0)
return (ARCHIVE_FATAL);
}
if (size > zip->uncompressed_buffer_bytes_remaining)
bytes_avail = (ssize_t)
zip->uncompressed_buffer_bytes_remaining;
else
bytes_avail = (ssize_t)size;
*buff = zip->uncompressed_buffer_pointer;
zip->uncompressed_buffer_pointer += bytes_avail;
}
zip->uncompressed_buffer_bytes_remaining -= bytes_avail;
return (bytes_avail);
}
Commit Message: 7zip: fix crash when parsing certain archives
Fuzzing with CRCs disabled revealed that a call to get_uncompressed_data()
would sometimes fail to return at least 'minimum' bytes. This can cause
the crc32() invocation in header_bytes to read off into invalid memory.
A specially crafted archive can use this to cause a crash.
An ASAN trace is below, but ASAN is not required - an uninstrumented
binary will also crash.
==7719==ERROR: AddressSanitizer: SEGV on unknown address 0x631000040000 (pc 0x7fbdb3b3ec1d bp 0x7ffe77a51310 sp 0x7ffe77a51150 T0)
==7719==The signal is caused by a READ memory access.
#0 0x7fbdb3b3ec1c in crc32_z (/lib/x86_64-linux-gnu/libz.so.1+0x2c1c)
#1 0x84f5eb in header_bytes (/tmp/libarchive/bsdtar+0x84f5eb)
#2 0x856156 in read_Header (/tmp/libarchive/bsdtar+0x856156)
#3 0x84e134 in slurp_central_directory (/tmp/libarchive/bsdtar+0x84e134)
#4 0x849690 in archive_read_format_7zip_read_header (/tmp/libarchive/bsdtar+0x849690)
#5 0x5713b7 in _archive_read_next_header2 (/tmp/libarchive/bsdtar+0x5713b7)
#6 0x570e63 in _archive_read_next_header (/tmp/libarchive/bsdtar+0x570e63)
#7 0x6f08bd in archive_read_next_header (/tmp/libarchive/bsdtar+0x6f08bd)
#8 0x52373f in read_archive (/tmp/libarchive/bsdtar+0x52373f)
#9 0x5257be in tar_mode_x (/tmp/libarchive/bsdtar+0x5257be)
#10 0x51daeb in main (/tmp/libarchive/bsdtar+0x51daeb)
#11 0x7fbdb27cab96 in __libc_start_main /build/glibc-OTsEL5/glibc-2.27/csu/../csu/libc-start.c:310
#12 0x41dd09 in _start (/tmp/libarchive/bsdtar+0x41dd09)
This was primarly done with afl and FairFuzz. Some early corpus entries
may have been generated by qsym.
CWE ID: CWE-125 | get_uncompressed_data(struct archive_read *a, const void **buff, size_t size,
size_t minimum)
{
struct _7zip *zip = (struct _7zip *)a->format->data;
ssize_t bytes_avail;
if (zip->codec == _7Z_COPY && zip->codec2 == (unsigned long)-1) {
/* Copy mode. */
*buff = __archive_read_ahead(a, minimum, &bytes_avail);
if (bytes_avail <= 0) {
archive_set_error(&a->archive,
ARCHIVE_ERRNO_FILE_FORMAT,
"Truncated 7-Zip file data");
return (ARCHIVE_FATAL);
}
if ((size_t)bytes_avail >
zip->uncompressed_buffer_bytes_remaining)
bytes_avail = (ssize_t)
zip->uncompressed_buffer_bytes_remaining;
if ((size_t)bytes_avail > size)
bytes_avail = (ssize_t)size;
zip->pack_stream_bytes_unconsumed = bytes_avail;
} else if (zip->uncompressed_buffer_pointer == NULL) {
/* Decompression has failed. */
archive_set_error(&(a->archive),
ARCHIVE_ERRNO_MISC, "Damaged 7-Zip archive");
return (ARCHIVE_FATAL);
} else {
/* Packed mode. */
if (minimum > zip->uncompressed_buffer_bytes_remaining) {
/*
* If remaining uncompressed data size is less than
* the minimum size, fill the buffer up to the
* minimum size.
*/
if (extract_pack_stream(a, minimum) < 0)
return (ARCHIVE_FATAL);
}
if (size > zip->uncompressed_buffer_bytes_remaining)
bytes_avail = (ssize_t)
zip->uncompressed_buffer_bytes_remaining;
else
bytes_avail = (ssize_t)size;
*buff = zip->uncompressed_buffer_pointer;
zip->uncompressed_buffer_pointer += bytes_avail;
}
zip->uncompressed_buffer_bytes_remaining -= bytes_avail;
return (bytes_avail);
}
| 1,298 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: xmlParseEntityValue(xmlParserCtxtPtr ctxt, xmlChar **orig) {
xmlChar *buf = NULL;
int len = 0;
int size = XML_PARSER_BUFFER_SIZE;
int c, l;
xmlChar stop;
xmlChar *ret = NULL;
const xmlChar *cur = NULL;
xmlParserInputPtr input;
if (RAW == '"') stop = '"';
else if (RAW == '\'') stop = '\'';
else {
xmlFatalErr(ctxt, XML_ERR_ENTITY_NOT_STARTED, NULL);
return(NULL);
}
buf = (xmlChar *) xmlMallocAtomic(size * sizeof(xmlChar));
if (buf == NULL) {
xmlErrMemory(ctxt, NULL);
return(NULL);
}
/*
* The content of the entity definition is copied in a buffer.
*/
ctxt->instate = XML_PARSER_ENTITY_VALUE;
input = ctxt->input;
GROW;
NEXT;
c = CUR_CHAR(l);
/*
* NOTE: 4.4.5 Included in Literal
* When a parameter entity reference appears in a literal entity
* value, ... a single or double quote character in the replacement
* text is always treated as a normal data character and will not
* terminate the literal.
* In practice it means we stop the loop only when back at parsing
* the initial entity and the quote is found
*/
while ((IS_CHAR(c)) && ((c != stop) || /* checked */
(ctxt->input != input))) {
if (len + 5 >= size) {
xmlChar *tmp;
size *= 2;
tmp = (xmlChar *) xmlRealloc(buf, size * sizeof(xmlChar));
if (tmp == NULL) {
xmlErrMemory(ctxt, NULL);
xmlFree(buf);
return(NULL);
}
buf = tmp;
}
COPY_BUF(l,buf,len,c);
NEXTL(l);
/*
* Pop-up of finished entities.
*/
while ((RAW == 0) && (ctxt->inputNr > 1)) /* non input consuming */
xmlPopInput(ctxt);
GROW;
c = CUR_CHAR(l);
if (c == 0) {
GROW;
c = CUR_CHAR(l);
}
}
buf[len] = 0;
/*
* Raise problem w.r.t. '&' and '%' being used in non-entities
* reference constructs. Note Charref will be handled in
* xmlStringDecodeEntities()
*/
cur = buf;
while (*cur != 0) { /* non input consuming */
if ((*cur == '%') || ((*cur == '&') && (cur[1] != '#'))) {
xmlChar *name;
xmlChar tmp = *cur;
cur++;
name = xmlParseStringName(ctxt, &cur);
if ((name == NULL) || (*cur != ';')) {
xmlFatalErrMsgInt(ctxt, XML_ERR_ENTITY_CHAR_ERROR,
"EntityValue: '%c' forbidden except for entities references\n",
tmp);
}
if ((tmp == '%') && (ctxt->inSubset == 1) &&
(ctxt->inputNr == 1)) {
xmlFatalErr(ctxt, XML_ERR_ENTITY_PE_INTERNAL, NULL);
}
if (name != NULL)
xmlFree(name);
if (*cur == 0)
break;
}
cur++;
}
/*
* Then PEReference entities are substituted.
*/
if (c != stop) {
xmlFatalErr(ctxt, XML_ERR_ENTITY_NOT_FINISHED, NULL);
xmlFree(buf);
} else {
NEXT;
/*
* NOTE: 4.4.7 Bypassed
* When a general entity reference appears in the EntityValue in
* an entity declaration, it is bypassed and left as is.
* so XML_SUBSTITUTE_REF is not set here.
*/
ret = xmlStringDecodeEntities(ctxt, buf, XML_SUBSTITUTE_PEREF,
0, 0, 0);
if (orig != NULL)
*orig = buf;
else
xmlFree(buf);
}
return(ret);
}
Commit Message: libxml: XML_PARSER_EOF checks from upstream
BUG=229019
TBR=cpu
Review URL: https://chromiumcodereview.appspot.com/14053009
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@196804 0039d316-1c4b-4281-b951-d872f2087c98
CWE ID: CWE-119 | xmlParseEntityValue(xmlParserCtxtPtr ctxt, xmlChar **orig) {
xmlChar *buf = NULL;
int len = 0;
int size = XML_PARSER_BUFFER_SIZE;
int c, l;
xmlChar stop;
xmlChar *ret = NULL;
const xmlChar *cur = NULL;
xmlParserInputPtr input;
if (RAW == '"') stop = '"';
else if (RAW == '\'') stop = '\'';
else {
xmlFatalErr(ctxt, XML_ERR_ENTITY_NOT_STARTED, NULL);
return(NULL);
}
buf = (xmlChar *) xmlMallocAtomic(size * sizeof(xmlChar));
if (buf == NULL) {
xmlErrMemory(ctxt, NULL);
return(NULL);
}
/*
* The content of the entity definition is copied in a buffer.
*/
ctxt->instate = XML_PARSER_ENTITY_VALUE;
input = ctxt->input;
GROW;
if (ctxt->instate == XML_PARSER_EOF) {
xmlFree(buf);
return(NULL);
}
NEXT;
c = CUR_CHAR(l);
/*
* NOTE: 4.4.5 Included in Literal
* When a parameter entity reference appears in a literal entity
* value, ... a single or double quote character in the replacement
* text is always treated as a normal data character and will not
* terminate the literal.
* In practice it means we stop the loop only when back at parsing
* the initial entity and the quote is found
*/
while (((IS_CHAR(c)) && ((c != stop) || /* checked */
(ctxt->input != input))) && (ctxt->instate != XML_PARSER_EOF)) {
if (len + 5 >= size) {
xmlChar *tmp;
size *= 2;
tmp = (xmlChar *) xmlRealloc(buf, size * sizeof(xmlChar));
if (tmp == NULL) {
xmlErrMemory(ctxt, NULL);
xmlFree(buf);
return(NULL);
}
buf = tmp;
}
COPY_BUF(l,buf,len,c);
NEXTL(l);
/*
* Pop-up of finished entities.
*/
while ((RAW == 0) && (ctxt->inputNr > 1)) /* non input consuming */
xmlPopInput(ctxt);
GROW;
c = CUR_CHAR(l);
if (c == 0) {
GROW;
c = CUR_CHAR(l);
}
}
buf[len] = 0;
if (ctxt->instate == XML_PARSER_EOF) {
xmlFree(buf);
return(NULL);
}
/*
* Raise problem w.r.t. '&' and '%' being used in non-entities
* reference constructs. Note Charref will be handled in
* xmlStringDecodeEntities()
*/
cur = buf;
while (*cur != 0) { /* non input consuming */
if ((*cur == '%') || ((*cur == '&') && (cur[1] != '#'))) {
xmlChar *name;
xmlChar tmp = *cur;
cur++;
name = xmlParseStringName(ctxt, &cur);
if ((name == NULL) || (*cur != ';')) {
xmlFatalErrMsgInt(ctxt, XML_ERR_ENTITY_CHAR_ERROR,
"EntityValue: '%c' forbidden except for entities references\n",
tmp);
}
if ((tmp == '%') && (ctxt->inSubset == 1) &&
(ctxt->inputNr == 1)) {
xmlFatalErr(ctxt, XML_ERR_ENTITY_PE_INTERNAL, NULL);
}
if (name != NULL)
xmlFree(name);
if (*cur == 0)
break;
}
cur++;
}
/*
* Then PEReference entities are substituted.
*/
if (c != stop) {
xmlFatalErr(ctxt, XML_ERR_ENTITY_NOT_FINISHED, NULL);
xmlFree(buf);
} else {
NEXT;
/*
* NOTE: 4.4.7 Bypassed
* When a general entity reference appears in the EntityValue in
* an entity declaration, it is bypassed and left as is.
* so XML_SUBSTITUTE_REF is not set here.
*/
ret = xmlStringDecodeEntities(ctxt, buf, XML_SUBSTITUTE_PEREF,
0, 0, 0);
if (orig != NULL)
*orig = buf;
else
xmlFree(buf);
}
return(ret);
}
| 1,008 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: int phar_verify_signature(php_stream *fp, size_t end_of_phar, php_uint32 sig_type, char *sig, int sig_len, char *fname, char **signature, int *signature_len, char **error) /* {{{ */
{
int read_size, len;
zend_off_t read_len;
unsigned char buf[1024];
php_stream_rewind(fp);
switch (sig_type) {
case PHAR_SIG_OPENSSL: {
#ifdef PHAR_HAVE_OPENSSL
BIO *in;
EVP_PKEY *key;
EVP_MD *mdtype = (EVP_MD *) EVP_sha1();
EVP_MD_CTX md_ctx;
#else
int tempsig;
#endif
zend_string *pubkey = NULL;
char *pfile;
php_stream *pfp;
#ifndef PHAR_HAVE_OPENSSL
if (!zend_hash_str_exists(&module_registry, "openssl", sizeof("openssl")-1)) {
if (error) {
spprintf(error, 0, "openssl not loaded");
}
return FAILURE;
}
#endif
/* use __FILE__ . '.pubkey' for public key file */
spprintf(&pfile, 0, "%s.pubkey", fname);
pfp = php_stream_open_wrapper(pfile, "rb", 0, NULL);
efree(pfile);
if (!pfp || !(pubkey = php_stream_copy_to_mem(pfp, PHP_STREAM_COPY_ALL, 0)) || !ZSTR_LEN(pubkey)) {
if (pfp) {
php_stream_close(pfp);
}
if (error) {
spprintf(error, 0, "openssl public key could not be read");
}
return FAILURE;
}
php_stream_close(pfp);
#ifndef PHAR_HAVE_OPENSSL
tempsig = sig_len;
if (FAILURE == phar_call_openssl_signverify(0, fp, end_of_phar, pubkey ? ZSTR_VAL(pubkey) : NULL, pubkey ? ZSTR_LEN(pubkey) : 0, &sig, &tempsig)) {
if (pubkey) {
zend_string_release(pubkey);
}
if (error) {
spprintf(error, 0, "openssl signature could not be verified");
}
return FAILURE;
}
if (pubkey) {
zend_string_release(pubkey);
}
sig_len = tempsig;
#else
in = BIO_new_mem_buf(pubkey ? ZSTR_VAL(pubkey) : NULL, pubkey ? ZSTR_LEN(pubkey) : 0);
if (NULL == in) {
zend_string_release(pubkey);
if (error) {
spprintf(error, 0, "openssl signature could not be processed");
}
return FAILURE;
}
key = PEM_read_bio_PUBKEY(in, NULL,NULL, NULL);
BIO_free(in);
zend_string_release(pubkey);
if (NULL == key) {
if (error) {
spprintf(error, 0, "openssl signature could not be processed");
}
return FAILURE;
}
EVP_VerifyInit(&md_ctx, mdtype);
read_len = end_of_phar;
if (read_len > sizeof(buf)) {
read_size = sizeof(buf);
} else {
read_size = (int)read_len;
}
php_stream_seek(fp, 0, SEEK_SET);
while (read_size && (len = php_stream_read(fp, (char*)buf, read_size)) > 0) {
EVP_VerifyUpdate (&md_ctx, buf, len);
read_len -= (zend_off_t)len;
if (read_len < read_size) {
read_size = (int)read_len;
}
}
if (EVP_VerifyFinal(&md_ctx, (unsigned char *)sig, sig_len, key) != 1) {
/* 1: signature verified, 0: signature does not match, -1: failed signature operation */
EVP_MD_CTX_cleanup(&md_ctx);
if (error) {
spprintf(error, 0, "broken openssl signature");
}
return FAILURE;
}
EVP_MD_CTX_cleanup(&md_ctx);
#endif
*signature_len = phar_hex_str((const char*)sig, sig_len, signature);
}
break;
#ifdef PHAR_HASH_OK
case PHAR_SIG_SHA512: {
unsigned char digest[64];
PHP_SHA512_CTX context;
PHP_SHA512Init(&context);
read_len = end_of_phar;
if (read_len > sizeof(buf)) {
read_size = sizeof(buf);
} else {
read_size = (int)read_len;
}
while ((len = php_stream_read(fp, (char*)buf, read_size)) > 0) {
PHP_SHA512Update(&context, buf, len);
read_len -= (zend_off_t)len;
if (read_len < read_size) {
read_size = (int)read_len;
}
}
PHP_SHA512Final(digest, &context);
if (memcmp(digest, sig, sizeof(digest))) {
if (error) {
spprintf(error, 0, "broken signature");
}
return FAILURE;
}
*signature_len = phar_hex_str((const char*)digest, sizeof(digest), signature);
break;
}
case PHAR_SIG_SHA256: {
unsigned char digest[32];
PHP_SHA256_CTX context;
PHP_SHA256Init(&context);
read_len = end_of_phar;
if (read_len > sizeof(buf)) {
read_size = sizeof(buf);
} else {
read_size = (int)read_len;
}
while ((len = php_stream_read(fp, (char*)buf, read_size)) > 0) {
PHP_SHA256Update(&context, buf, len);
read_len -= (zend_off_t)len;
if (read_len < read_size) {
read_size = (int)read_len;
}
}
PHP_SHA256Final(digest, &context);
if (memcmp(digest, sig, sizeof(digest))) {
if (error) {
spprintf(error, 0, "broken signature");
}
return FAILURE;
}
*signature_len = phar_hex_str((const char*)digest, sizeof(digest), signature);
break;
}
#else
case PHAR_SIG_SHA512:
case PHAR_SIG_SHA256:
if (error) {
spprintf(error, 0, "unsupported signature");
}
return FAILURE;
#endif
case PHAR_SIG_SHA1: {
unsigned char digest[20];
PHP_SHA1_CTX context;
PHP_SHA1Init(&context);
read_len = end_of_phar;
if (read_len > sizeof(buf)) {
read_size = sizeof(buf);
} else {
read_size = (int)read_len;
}
while ((len = php_stream_read(fp, (char*)buf, read_size)) > 0) {
PHP_SHA1Update(&context, buf, len);
read_len -= (zend_off_t)len;
if (read_len < read_size) {
read_size = (int)read_len;
}
}
PHP_SHA1Final(digest, &context);
if (memcmp(digest, sig, sizeof(digest))) {
if (error) {
spprintf(error, 0, "broken signature");
}
return FAILURE;
}
*signature_len = phar_hex_str((const char*)digest, sizeof(digest), signature);
break;
}
case PHAR_SIG_MD5: {
unsigned char digest[16];
PHP_MD5_CTX context;
PHP_MD5Init(&context);
read_len = end_of_phar;
if (read_len > sizeof(buf)) {
read_size = sizeof(buf);
} else {
read_size = (int)read_len;
}
while ((len = php_stream_read(fp, (char*)buf, read_size)) > 0) {
PHP_MD5Update(&context, buf, len);
read_len -= (zend_off_t)len;
if (read_len < read_size) {
read_size = (int)read_len;
}
}
PHP_MD5Final(digest, &context);
if (memcmp(digest, sig, sizeof(digest))) {
if (error) {
spprintf(error, 0, "broken signature");
}
return FAILURE;
}
*signature_len = phar_hex_str((const char*)digest, sizeof(digest), signature);
break;
}
default:
if (error) {
spprintf(error, 0, "broken or unsupported signature");
}
return FAILURE;
}
return SUCCESS;
}
/* }}} */
Commit Message: Fix bug #72928 - Out of bound when verify signature of zip phar in phar_parse_zipfile
(cherry picked from commit 19484ab77466f99c78fc0e677f7e03da0584d6a2)
CWE ID: CWE-119 | int phar_verify_signature(php_stream *fp, size_t end_of_phar, php_uint32 sig_type, char *sig, int sig_len, char *fname, char **signature, int *signature_len, char **error) /* {{{ */
{
int read_size, len;
zend_off_t read_len;
unsigned char buf[1024];
php_stream_rewind(fp);
switch (sig_type) {
case PHAR_SIG_OPENSSL: {
#ifdef PHAR_HAVE_OPENSSL
BIO *in;
EVP_PKEY *key;
EVP_MD *mdtype = (EVP_MD *) EVP_sha1();
EVP_MD_CTX md_ctx;
#else
int tempsig;
#endif
zend_string *pubkey = NULL;
char *pfile;
php_stream *pfp;
#ifndef PHAR_HAVE_OPENSSL
if (!zend_hash_str_exists(&module_registry, "openssl", sizeof("openssl")-1)) {
if (error) {
spprintf(error, 0, "openssl not loaded");
}
return FAILURE;
}
#endif
/* use __FILE__ . '.pubkey' for public key file */
spprintf(&pfile, 0, "%s.pubkey", fname);
pfp = php_stream_open_wrapper(pfile, "rb", 0, NULL);
efree(pfile);
if (!pfp || !(pubkey = php_stream_copy_to_mem(pfp, PHP_STREAM_COPY_ALL, 0)) || !ZSTR_LEN(pubkey)) {
if (pfp) {
php_stream_close(pfp);
}
if (error) {
spprintf(error, 0, "openssl public key could not be read");
}
return FAILURE;
}
php_stream_close(pfp);
#ifndef PHAR_HAVE_OPENSSL
tempsig = sig_len;
if (FAILURE == phar_call_openssl_signverify(0, fp, end_of_phar, pubkey ? ZSTR_VAL(pubkey) : NULL, pubkey ? ZSTR_LEN(pubkey) : 0, &sig, &tempsig)) {
if (pubkey) {
zend_string_release(pubkey);
}
if (error) {
spprintf(error, 0, "openssl signature could not be verified");
}
return FAILURE;
}
if (pubkey) {
zend_string_release(pubkey);
}
sig_len = tempsig;
#else
in = BIO_new_mem_buf(pubkey ? ZSTR_VAL(pubkey) : NULL, pubkey ? ZSTR_LEN(pubkey) : 0);
if (NULL == in) {
zend_string_release(pubkey);
if (error) {
spprintf(error, 0, "openssl signature could not be processed");
}
return FAILURE;
}
key = PEM_read_bio_PUBKEY(in, NULL,NULL, NULL);
BIO_free(in);
zend_string_release(pubkey);
if (NULL == key) {
if (error) {
spprintf(error, 0, "openssl signature could not be processed");
}
return FAILURE;
}
EVP_VerifyInit(&md_ctx, mdtype);
read_len = end_of_phar;
if (read_len > sizeof(buf)) {
read_size = sizeof(buf);
} else {
read_size = (int)read_len;
}
php_stream_seek(fp, 0, SEEK_SET);
while (read_size && (len = php_stream_read(fp, (char*)buf, read_size)) > 0) {
EVP_VerifyUpdate (&md_ctx, buf, len);
read_len -= (zend_off_t)len;
if (read_len < read_size) {
read_size = (int)read_len;
}
}
if (EVP_VerifyFinal(&md_ctx, (unsigned char *)sig, sig_len, key) != 1) {
/* 1: signature verified, 0: signature does not match, -1: failed signature operation */
EVP_MD_CTX_cleanup(&md_ctx);
if (error) {
spprintf(error, 0, "broken openssl signature");
}
return FAILURE;
}
EVP_MD_CTX_cleanup(&md_ctx);
#endif
*signature_len = phar_hex_str((const char*)sig, sig_len, signature);
}
break;
#ifdef PHAR_HASH_OK
case PHAR_SIG_SHA512: {
unsigned char digest[64];
PHP_SHA512_CTX context;
if (sig_len < sizeof(digest)) {
if (error) {
spprintf(error, 0, "broken signature");
}
return FAILURE;
}
PHP_SHA512Init(&context);
read_len = end_of_phar;
if (read_len > sizeof(buf)) {
read_size = sizeof(buf);
} else {
read_size = (int)read_len;
}
while ((len = php_stream_read(fp, (char*)buf, read_size)) > 0) {
PHP_SHA512Update(&context, buf, len);
read_len -= (zend_off_t)len;
if (read_len < read_size) {
read_size = (int)read_len;
}
}
PHP_SHA512Final(digest, &context);
if (memcmp(digest, sig, sizeof(digest))) {
if (error) {
spprintf(error, 0, "broken signature");
}
return FAILURE;
}
*signature_len = phar_hex_str((const char*)digest, sizeof(digest), signature);
break;
}
case PHAR_SIG_SHA256: {
unsigned char digest[32];
PHP_SHA256_CTX context;
if (sig_len < sizeof(digest)) {
if (error) {
spprintf(error, 0, "broken signature");
}
return FAILURE;
}
PHP_SHA256Init(&context);
read_len = end_of_phar;
if (read_len > sizeof(buf)) {
read_size = sizeof(buf);
} else {
read_size = (int)read_len;
}
while ((len = php_stream_read(fp, (char*)buf, read_size)) > 0) {
PHP_SHA256Update(&context, buf, len);
read_len -= (zend_off_t)len;
if (read_len < read_size) {
read_size = (int)read_len;
}
}
PHP_SHA256Final(digest, &context);
if (memcmp(digest, sig, sizeof(digest))) {
if (error) {
spprintf(error, 0, "broken signature");
}
return FAILURE;
}
*signature_len = phar_hex_str((const char*)digest, sizeof(digest), signature);
break;
}
#else
case PHAR_SIG_SHA512:
case PHAR_SIG_SHA256:
if (error) {
spprintf(error, 0, "unsupported signature");
}
return FAILURE;
#endif
case PHAR_SIG_SHA1: {
unsigned char digest[20];
PHP_SHA1_CTX context;
if (sig_len < sizeof(digest)) {
if (error) {
spprintf(error, 0, "broken signature");
}
return FAILURE;
}
PHP_SHA1Init(&context);
read_len = end_of_phar;
if (read_len > sizeof(buf)) {
read_size = sizeof(buf);
} else {
read_size = (int)read_len;
}
while ((len = php_stream_read(fp, (char*)buf, read_size)) > 0) {
PHP_SHA1Update(&context, buf, len);
read_len -= (zend_off_t)len;
if (read_len < read_size) {
read_size = (int)read_len;
}
}
PHP_SHA1Final(digest, &context);
if (memcmp(digest, sig, sizeof(digest))) {
if (error) {
spprintf(error, 0, "broken signature");
}
return FAILURE;
}
*signature_len = phar_hex_str((const char*)digest, sizeof(digest), signature);
break;
}
case PHAR_SIG_MD5: {
unsigned char digest[16];
PHP_MD5_CTX context;
if (sig_len < sizeof(digest)) {
if (error) {
spprintf(error, 0, "broken signature");
}
return FAILURE;
}
PHP_MD5Init(&context);
read_len = end_of_phar;
if (read_len > sizeof(buf)) {
read_size = sizeof(buf);
} else {
read_size = (int)read_len;
}
while ((len = php_stream_read(fp, (char*)buf, read_size)) > 0) {
PHP_MD5Update(&context, buf, len);
read_len -= (zend_off_t)len;
if (read_len < read_size) {
read_size = (int)read_len;
}
}
PHP_MD5Final(digest, &context);
if (memcmp(digest, sig, sizeof(digest))) {
if (error) {
spprintf(error, 0, "broken signature");
}
return FAILURE;
}
*signature_len = phar_hex_str((const char*)digest, sizeof(digest), signature);
break;
}
default:
if (error) {
spprintf(error, 0, "broken or unsupported signature");
}
return FAILURE;
}
return SUCCESS;
}
/* }}} */
| 10,805 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: void RenderWidgetHostViewAura::CopyFromCompositingSurface(
const gfx::Rect& src_subrect,
const gfx::Size& dst_size,
const base::Callback<void(bool)>& callback,
skia::PlatformBitmap* output) {
base::ScopedClosureRunner scoped_callback_runner(base::Bind(callback, false));
std::map<uint64, scoped_refptr<ui::Texture> >::iterator it =
image_transport_clients_.find(current_surface_);
if (it == image_transport_clients_.end())
return;
ui::Texture* container = it->second;
DCHECK(container);
gfx::Size dst_size_in_pixel = ConvertSizeToPixel(this, dst_size);
if (!output->Allocate(
dst_size_in_pixel.width(), dst_size_in_pixel.height(), true))
return;
ImageTransportFactory* factory = ImageTransportFactory::GetInstance();
GLHelper* gl_helper = factory->GetGLHelper();
if (!gl_helper)
return;
unsigned char* addr = static_cast<unsigned char*>(
output->GetBitmap().getPixels());
scoped_callback_runner.Release();
base::Callback<void(bool)> wrapper_callback = base::Bind(
&RenderWidgetHostViewAura::CopyFromCompositingSurfaceFinished,
AsWeakPtr(),
callback);
++pending_thumbnail_tasks_;
gfx::Rect src_subrect_in_gl = src_subrect;
src_subrect_in_gl.set_y(GetViewBounds().height() - src_subrect.bottom());
gfx::Rect src_subrect_in_pixel = ConvertRectToPixel(this, src_subrect_in_gl);
gl_helper->CropScaleReadbackAndCleanTexture(container->PrepareTexture(),
container->size(),
src_subrect_in_pixel,
dst_size_in_pixel,
addr,
wrapper_callback);
}
Commit Message: Implement TextureImageTransportSurface using texture mailbox
This has a couple of advantages:
- allow tearing down and recreating the UI parent context without
losing the renderer contexts
- do not require a context to be able to generate textures when
creating the GLSurfaceHandle
- clearer ownership semantics that potentially allows for more
robust and easier lost context handling/thumbnailing/etc., since a texture is at
any given time owned by either: UI parent, mailbox, or
TextureImageTransportSurface
- simplify frontbuffer protection logic;
the frontbuffer textures are now owned by RWHV where they are refcounted
The TextureImageTransportSurface informs RenderWidgetHostView of the
mailbox names for the front- and backbuffer textures by
associating them with a surface_handle (1 or 2) in the AcceleratedSurfaceNew message.
During SwapBuffers() or PostSubBuffer() cycles, it then uses
produceTextureCHROMIUM() and consumeTextureCHROMIUM()
to transfer ownership between renderer and browser compositor.
RWHV sends back the surface_handle of the buffer being returned with the Swap ACK
(or 0 if no buffer is being returned in which case TextureImageTransportSurface will
allocate a new texture - note that this could be used to
simply keep textures for thumbnailing).
BUG=154815,139616
[email protected]
Review URL: https://chromiumcodereview.appspot.com/11194042
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@171569 0039d316-1c4b-4281-b951-d872f2087c98
CWE ID: | void RenderWidgetHostViewAura::CopyFromCompositingSurface(
const gfx::Rect& src_subrect,
const gfx::Size& dst_size,
const base::Callback<void(bool)>& callback,
skia::PlatformBitmap* output) {
base::ScopedClosureRunner scoped_callback_runner(base::Bind(callback, false));
std::map<uint64, scoped_refptr<ui::Texture> >::iterator it =
image_transport_clients_.find(current_surface_);
if (it == image_transport_clients_.end())
return;
ui::Texture* container = it->second;
DCHECK(container);
gfx::Size dst_size_in_pixel = ConvertSizeToPixel(this, dst_size);
if (!output->Allocate(
dst_size_in_pixel.width(), dst_size_in_pixel.height(), true))
return;
ImageTransportFactory* factory = ImageTransportFactory::GetInstance();
GLHelper* gl_helper = factory->GetGLHelper();
if (!gl_helper)
return;
unsigned char* addr = static_cast<unsigned char*>(
output->GetBitmap().getPixels());
scoped_callback_runner.Release();
// own completion handlers (where we can try to free the frontbuffer).
base::Callback<void(bool)> wrapper_callback = base::Bind(
&RenderWidgetHostViewAura::CopyFromCompositingSurfaceFinished,
AsWeakPtr(),
callback);
++pending_thumbnail_tasks_;
gfx::Rect src_subrect_in_gl = src_subrect;
src_subrect_in_gl.set_y(GetViewBounds().height() - src_subrect.bottom());
gfx::Rect src_subrect_in_pixel = ConvertRectToPixel(this, src_subrect_in_gl);
gl_helper->CropScaleReadbackAndCleanTexture(container->PrepareTexture(),
container->size(),
src_subrect_in_pixel,
dst_size_in_pixel,
addr,
wrapper_callback);
}
| 3,316 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: int sequencer_write(int dev, struct file *file, const char __user *buf, int count)
{
unsigned char event_rec[EV_SZ], ev_code;
int p = 0, c, ev_size;
int mode = translate_mode(file);
dev = dev >> 4;
DEB(printk("sequencer_write(dev=%d, count=%d)\n", dev, count));
if (mode == OPEN_READ)
return -EIO;
c = count;
while (c >= 4)
{
if (copy_from_user((char *) event_rec, &(buf)[p], 4))
goto out;
ev_code = event_rec[0];
if (ev_code == SEQ_FULLSIZE)
{
int err, fmt;
dev = *(unsigned short *) &event_rec[2];
if (dev < 0 || dev >= max_synthdev || synth_devs[dev] == NULL)
return -ENXIO;
if (!(synth_open_mask & (1 << dev)))
return -ENXIO;
fmt = (*(short *) &event_rec[0]) & 0xffff;
err = synth_devs[dev]->load_patch(dev, fmt, buf, p + 4, c, 0);
if (err < 0)
return err;
return err;
}
if (ev_code >= 128)
{
if (seq_mode == SEQ_2 && ev_code == SEQ_EXTENDED)
{
printk(KERN_WARNING "Sequencer: Invalid level 2 event %x\n", ev_code);
return -EINVAL;
}
ev_size = 8;
if (c < ev_size)
{
if (!seq_playing)
seq_startplay();
return count - c;
}
if (copy_from_user((char *)&event_rec[4],
&(buf)[p + 4], 4))
goto out;
}
else
{
if (seq_mode == SEQ_2)
{
printk(KERN_WARNING "Sequencer: 4 byte event in level 2 mode\n");
return -EINVAL;
}
ev_size = 4;
if (event_rec[0] != SEQ_MIDIPUTC)
obsolete_api_used = 1;
}
if (event_rec[0] == SEQ_MIDIPUTC)
{
if (!midi_opened[event_rec[2]])
{
int err, mode;
int dev = event_rec[2];
if (dev >= max_mididev || midi_devs[dev]==NULL)
{
/*printk("Sequencer Error: Nonexistent MIDI device %d\n", dev);*/
return -ENXIO;
}
mode = translate_mode(file);
if ((err = midi_devs[dev]->open(dev, mode,
sequencer_midi_input, sequencer_midi_output)) < 0)
{
seq_reset();
printk(KERN_WARNING "Sequencer Error: Unable to open Midi #%d\n", dev);
return err;
}
midi_opened[dev] = 1;
}
}
if (!seq_queue(event_rec, (file->f_flags & (O_NONBLOCK) ? 1 : 0)))
{
int processed = count - c;
if (!seq_playing)
seq_startplay();
if (!processed && (file->f_flags & O_NONBLOCK))
return -EAGAIN;
else
return processed;
}
p += ev_size;
c -= ev_size;
}
if (!seq_playing)
seq_startplay();
out:
return count;
}
Commit Message: sound/oss: remove offset from load_patch callbacks
Was: [PATCH] sound/oss/midi_synth: prevent underflow, use of
uninitialized value, and signedness issue
The offset passed to midi_synth_load_patch() can be essentially
arbitrary. If it's greater than the header length, this will result in
a copy_from_user(dst, src, negative_val). While this will just return
-EFAULT on x86, on other architectures this may cause memory corruption.
Additionally, the length field of the sysex_info structure may not be
initialized prior to its use. Finally, a signed comparison may result
in an unintentionally large loop.
On suggestion by Takashi Iwai, version two removes the offset argument
from the load_patch callbacks entirely, which also resolves similar
issues in opl3. Compile tested only.
v3 adjusts comments and hopefully gets copy offsets right.
Signed-off-by: Dan Rosenberg <[email protected]>
Signed-off-by: Takashi Iwai <[email protected]>
CWE ID: CWE-189 | int sequencer_write(int dev, struct file *file, const char __user *buf, int count)
{
unsigned char event_rec[EV_SZ], ev_code;
int p = 0, c, ev_size;
int mode = translate_mode(file);
dev = dev >> 4;
DEB(printk("sequencer_write(dev=%d, count=%d)\n", dev, count));
if (mode == OPEN_READ)
return -EIO;
c = count;
while (c >= 4)
{
if (copy_from_user((char *) event_rec, &(buf)[p], 4))
goto out;
ev_code = event_rec[0];
if (ev_code == SEQ_FULLSIZE)
{
int err, fmt;
dev = *(unsigned short *) &event_rec[2];
if (dev < 0 || dev >= max_synthdev || synth_devs[dev] == NULL)
return -ENXIO;
if (!(synth_open_mask & (1 << dev)))
return -ENXIO;
fmt = (*(short *) &event_rec[0]) & 0xffff;
err = synth_devs[dev]->load_patch(dev, fmt, buf + p, c, 0);
if (err < 0)
return err;
return err;
}
if (ev_code >= 128)
{
if (seq_mode == SEQ_2 && ev_code == SEQ_EXTENDED)
{
printk(KERN_WARNING "Sequencer: Invalid level 2 event %x\n", ev_code);
return -EINVAL;
}
ev_size = 8;
if (c < ev_size)
{
if (!seq_playing)
seq_startplay();
return count - c;
}
if (copy_from_user((char *)&event_rec[4],
&(buf)[p + 4], 4))
goto out;
}
else
{
if (seq_mode == SEQ_2)
{
printk(KERN_WARNING "Sequencer: 4 byte event in level 2 mode\n");
return -EINVAL;
}
ev_size = 4;
if (event_rec[0] != SEQ_MIDIPUTC)
obsolete_api_used = 1;
}
if (event_rec[0] == SEQ_MIDIPUTC)
{
if (!midi_opened[event_rec[2]])
{
int err, mode;
int dev = event_rec[2];
if (dev >= max_mididev || midi_devs[dev]==NULL)
{
/*printk("Sequencer Error: Nonexistent MIDI device %d\n", dev);*/
return -ENXIO;
}
mode = translate_mode(file);
if ((err = midi_devs[dev]->open(dev, mode,
sequencer_midi_input, sequencer_midi_output)) < 0)
{
seq_reset();
printk(KERN_WARNING "Sequencer Error: Unable to open Midi #%d\n", dev);
return err;
}
midi_opened[dev] = 1;
}
}
if (!seq_queue(event_rec, (file->f_flags & (O_NONBLOCK) ? 1 : 0)))
{
int processed = count - c;
if (!seq_playing)
seq_startplay();
if (!processed && (file->f_flags & O_NONBLOCK))
return -EAGAIN;
else
return processed;
}
p += ev_size;
c -= ev_size;
}
if (!seq_playing)
seq_startplay();
out:
return count;
}
| 9,199 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: bool ParamTraits<FilePath>::Read(const Message* m,
PickleIterator* iter,
param_type* r) {
FilePath::StringType value;
if (!ParamTraits<FilePath::StringType>::Read(m, iter, &value))
return false;
*r = FilePath(value);
return true;
}
Commit Message: Validate that paths don't contain embedded NULLs at deserialization.
BUG=166867
Review URL: https://chromiumcodereview.appspot.com/11743009
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@174935 0039d316-1c4b-4281-b951-d872f2087c98
CWE ID: | bool ParamTraits<FilePath>::Read(const Message* m,
PickleIterator* iter,
param_type* r) {
FilePath::StringType value;
if (!ParamTraits<FilePath::StringType>::Read(m, iter, &value))
return false;
// Reject embedded NULs as they can cause security checks to go awry.
if (value.find(FILE_PATH_LITERAL('\0')) != FilePath::StringType::npos)
return false;
*r = FilePath(value);
return true;
}
| 3,750 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: static int store_xauthority(void) {
fs_build_mnt_dir();
char *src;
char *dest = RUN_XAUTHORITY_FILE;
FILE *fp = fopen(dest, "w");
if (fp) {
fprintf(fp, "\n");
SET_PERMS_STREAM(fp, getuid(), getgid(), 0600);
fclose(fp);
}
if (asprintf(&src, "%s/.Xauthority", cfg.homedir) == -1)
errExit("asprintf");
struct stat s;
if (stat(src, &s) == 0) {
if (is_link(src)) {
fprintf(stderr, "Warning: invalid .Xauthority file\n");
return 0;
}
copy_file_as_user(src, dest, getuid(), getgid(), 0600);
fs_logger2("clone", dest);
return 1; // file copied
}
return 0;
}
Commit Message: security fix
CWE ID: CWE-269 | static int store_xauthority(void) {
fs_build_mnt_dir();
char *src;
char *dest = RUN_XAUTHORITY_FILE;
// create an empty file as root, and change ownership to user
FILE *fp = fopen(dest, "w");
if (fp) {
fprintf(fp, "\n");
SET_PERMS_STREAM(fp, getuid(), getgid(), 0600);
fclose(fp);
}
if (asprintf(&src, "%s/.Xauthority", cfg.homedir) == -1)
errExit("asprintf");
struct stat s;
if (stat(src, &s) == 0) {
if (is_link(src)) {
fprintf(stderr, "Warning: invalid .Xauthority file\n");
return 0;
}
copy_file_as_user(src, dest, getuid(), getgid(), 0600);
fs_logger2("clone", dest);
return 1; // file copied
}
return 0;
}
| 13,954 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: vmnc_handle_wmvi_rectangle (GstVMncDec * dec, struct RfbRectangle *rect,
const guint8 * data, int len, gboolean decode)
{
GstVideoFormat format;
gint bpp, tc;
guint32 redmask, greenmask, bluemask;
guint32 endianness, dataendianness;
GstVideoCodecState *state;
/* A WMVi rectangle has a 16byte payload */
if (len < 16) {
GST_DEBUG_OBJECT (dec, "Bad WMVi rect: too short");
return ERROR_INSUFFICIENT_DATA;
}
/* We only compare 13 bytes; ignoring the 3 padding bytes at the end */
if (dec->have_format && memcmp (data, dec->format.descriptor, 13) == 0) {
/* Nothing changed, so just exit */
return 16;
}
/* Store the whole block for simple comparison later */
memcpy (dec->format.descriptor, data, 16);
if (rect->x != 0 || rect->y != 0) {
GST_WARNING_OBJECT (dec, "Bad WMVi rect: wrong coordinates");
return ERROR_INVALID;
}
bpp = data[0];
dec->format.depth = data[1];
dec->format.big_endian = data[2];
dataendianness = data[2] ? G_BIG_ENDIAN : G_LITTLE_ENDIAN;
tc = data[3];
if (bpp != 8 && bpp != 16 && bpp != 32) {
GST_WARNING_OBJECT (dec, "Bad bpp value: %d", bpp);
return ERROR_INVALID;
}
if (!tc) {
GST_WARNING_OBJECT (dec, "Paletted video not supported");
return ERROR_INVALID;
}
dec->format.bytes_per_pixel = bpp / 8;
dec->format.width = rect->width;
dec->format.height = rect->height;
redmask = (guint32) (RFB_GET_UINT16 (data + 4)) << data[10];
greenmask = (guint32) (RFB_GET_UINT16 (data + 6)) << data[11];
bluemask = (guint32) (RFB_GET_UINT16 (data + 8)) << data[12];
GST_DEBUG_OBJECT (dec, "Red: mask %d, shift %d",
RFB_GET_UINT16 (data + 4), data[10]);
GST_DEBUG_OBJECT (dec, "Green: mask %d, shift %d",
RFB_GET_UINT16 (data + 6), data[11]);
GST_DEBUG_OBJECT (dec, "Blue: mask %d, shift %d",
RFB_GET_UINT16 (data + 8), data[12]);
GST_DEBUG_OBJECT (dec, "BPP: %d. endianness: %s", bpp,
data[2] ? "big" : "little");
/* GStreamer's RGB caps are a bit weird. */
if (bpp == 8) {
endianness = G_BYTE_ORDER; /* Doesn't matter */
} else if (bpp == 16) {
/* We require host-endian. */
endianness = G_BYTE_ORDER;
} else { /* bpp == 32 */
/* We require big endian */
endianness = G_BIG_ENDIAN;
if (endianness != dataendianness) {
redmask = GUINT32_SWAP_LE_BE (redmask);
greenmask = GUINT32_SWAP_LE_BE (greenmask);
bluemask = GUINT32_SWAP_LE_BE (bluemask);
}
}
format = gst_video_format_from_masks (dec->format.depth, bpp, endianness,
redmask, greenmask, bluemask, 0);
GST_DEBUG_OBJECT (dec, "From depth: %d bpp: %u endianess: %s redmask: %X "
"greenmask: %X bluemask: %X got format %s",
dec->format.depth, bpp, endianness == G_BIG_ENDIAN ? "BE" : "LE",
GUINT32_FROM_BE (redmask), GUINT32_FROM_BE (greenmask),
GUINT32_FROM_BE (bluemask),
format == GST_VIDEO_FORMAT_UNKNOWN ? "UNKOWN" :
gst_video_format_to_string (format));
if (format == GST_VIDEO_FORMAT_UNKNOWN) {
GST_WARNING_OBJECT (dec, "Video format unknown to GStreamer");
return ERROR_INVALID;
}
dec->have_format = TRUE;
if (!decode) {
GST_LOG_OBJECT (dec, "Parsing, not setting caps");
return 16;
}
state = gst_video_decoder_set_output_state (GST_VIDEO_DECODER (dec), format,
rect->width, rect->height, dec->input_state);
gst_video_codec_state_unref (state);
g_free (dec->imagedata);
dec->imagedata = g_malloc (dec->format.width * dec->format.height *
dec->format.bytes_per_pixel);
GST_DEBUG_OBJECT (dec, "Allocated image data at %p", dec->imagedata);
dec->format.stride = dec->format.width * dec->format.bytes_per_pixel;
return 16;
}
Commit Message:
CWE ID: CWE-200 | vmnc_handle_wmvi_rectangle (GstVMncDec * dec, struct RfbRectangle *rect,
const guint8 * data, int len, gboolean decode)
{
GstVideoFormat format;
gint bpp, tc;
guint32 redmask, greenmask, bluemask;
guint32 endianness, dataendianness;
GstVideoCodecState *state;
/* A WMVi rectangle has a 16byte payload */
if (len < 16) {
GST_DEBUG_OBJECT (dec, "Bad WMVi rect: too short");
return ERROR_INSUFFICIENT_DATA;
}
/* We only compare 13 bytes; ignoring the 3 padding bytes at the end */
if (dec->have_format && memcmp (data, dec->format.descriptor, 13) == 0) {
/* Nothing changed, so just exit */
return 16;
}
/* Store the whole block for simple comparison later */
memcpy (dec->format.descriptor, data, 16);
if (rect->x != 0 || rect->y != 0) {
GST_WARNING_OBJECT (dec, "Bad WMVi rect: wrong coordinates");
return ERROR_INVALID;
}
bpp = data[0];
dec->format.depth = data[1];
dec->format.big_endian = data[2];
dataendianness = data[2] ? G_BIG_ENDIAN : G_LITTLE_ENDIAN;
tc = data[3];
if (bpp != 8 && bpp != 16 && bpp != 32) {
GST_WARNING_OBJECT (dec, "Bad bpp value: %d", bpp);
return ERROR_INVALID;
}
if (!tc) {
GST_WARNING_OBJECT (dec, "Paletted video not supported");
return ERROR_INVALID;
}
dec->format.bytes_per_pixel = bpp / 8;
dec->format.width = rect->width;
dec->format.height = rect->height;
redmask = (guint32) (RFB_GET_UINT16 (data + 4)) << data[10];
greenmask = (guint32) (RFB_GET_UINT16 (data + 6)) << data[11];
bluemask = (guint32) (RFB_GET_UINT16 (data + 8)) << data[12];
GST_DEBUG_OBJECT (dec, "Red: mask %d, shift %d",
RFB_GET_UINT16 (data + 4), data[10]);
GST_DEBUG_OBJECT (dec, "Green: mask %d, shift %d",
RFB_GET_UINT16 (data + 6), data[11]);
GST_DEBUG_OBJECT (dec, "Blue: mask %d, shift %d",
RFB_GET_UINT16 (data + 8), data[12]);
GST_DEBUG_OBJECT (dec, "BPP: %d. endianness: %s", bpp,
data[2] ? "big" : "little");
/* GStreamer's RGB caps are a bit weird. */
if (bpp == 8) {
endianness = G_BYTE_ORDER; /* Doesn't matter */
} else if (bpp == 16) {
/* We require host-endian. */
endianness = G_BYTE_ORDER;
} else { /* bpp == 32 */
/* We require big endian */
endianness = G_BIG_ENDIAN;
if (endianness != dataendianness) {
redmask = GUINT32_SWAP_LE_BE (redmask);
greenmask = GUINT32_SWAP_LE_BE (greenmask);
bluemask = GUINT32_SWAP_LE_BE (bluemask);
}
}
format = gst_video_format_from_masks (dec->format.depth, bpp, endianness,
redmask, greenmask, bluemask, 0);
GST_DEBUG_OBJECT (dec, "From depth: %d bpp: %u endianess: %s redmask: %X "
"greenmask: %X bluemask: %X got format %s",
dec->format.depth, bpp, endianness == G_BIG_ENDIAN ? "BE" : "LE",
GUINT32_FROM_BE (redmask), GUINT32_FROM_BE (greenmask),
GUINT32_FROM_BE (bluemask),
format == GST_VIDEO_FORMAT_UNKNOWN ? "UNKOWN" :
gst_video_format_to_string (format));
if (format == GST_VIDEO_FORMAT_UNKNOWN) {
GST_WARNING_OBJECT (dec, "Video format unknown to GStreamer");
return ERROR_INVALID;
}
dec->have_format = TRUE;
if (!decode) {
GST_LOG_OBJECT (dec, "Parsing, not setting caps");
return 16;
}
state = gst_video_decoder_set_output_state (GST_VIDEO_DECODER (dec), format,
rect->width, rect->height, dec->input_state);
gst_video_codec_state_unref (state);
g_free (dec->imagedata);
dec->imagedata = g_malloc0 (dec->format.width * dec->format.height *
dec->format.bytes_per_pixel);
GST_DEBUG_OBJECT (dec, "Allocated image data at %p", dec->imagedata);
dec->format.stride = dec->format.width * dec->format.bytes_per_pixel;
return 16;
}
| 18,302 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: user_change_icon_file_authorized_cb (Daemon *daemon,
User *user,
GDBusMethodInvocation *context,
gpointer data)
{
g_autofree gchar *filename = NULL;
g_autoptr(GFile) file = NULL;
g_autoptr(GFileInfo) info = NULL;
guint32 mode;
GFileType type;
guint64 size;
filename = g_strdup (data);
if (filename == NULL ||
*filename == '\0') {
g_autofree gchar *dest_path = NULL;
g_autoptr(GFile) dest = NULL;
g_autoptr(GError) error = NULL;
g_clear_pointer (&filename, g_free);
dest_path = g_build_filename (ICONDIR, accounts_user_get_user_name (ACCOUNTS_USER (user)), NULL);
dest = g_file_new_for_path (dest_path);
if (!g_file_delete (dest, NULL, &error) &&
!g_error_matches (error, G_IO_ERROR, G_IO_ERROR_NOT_FOUND)) {
throw_error (context, ERROR_FAILED, "failed to remove user icon, %s", error->message);
return;
}
goto icon_saved;
}
file = g_file_new_for_path (filename);
info = g_file_query_info (file, G_FILE_ATTRIBUTE_UNIX_MODE ","
G_FILE_ATTRIBUTE_STANDARD_TYPE ","
G_FILE_ATTRIBUTE_STANDARD_SIZE,
return;
}
Commit Message:
CWE ID: CWE-22 | user_change_icon_file_authorized_cb (Daemon *daemon,
User *user,
GDBusMethodInvocation *context,
gpointer data)
{
g_autofree gchar *filename = NULL;
g_autoptr(GFile) file = NULL;
g_autoptr(GFileInfo) info = NULL;
guint32 mode;
GFileType type;
guint64 size;
filename = g_strdup (data);
if (filename == NULL ||
*filename == '\0') {
g_autofree gchar *dest_path = NULL;
g_autoptr(GFile) dest = NULL;
g_autoptr(GError) error = NULL;
g_clear_pointer (&filename, g_free);
dest_path = g_build_filename (ICONDIR, accounts_user_get_user_name (ACCOUNTS_USER (user)), NULL);
dest = g_file_new_for_path (dest_path);
if (!g_file_delete (dest, NULL, &error) &&
!g_error_matches (error, G_IO_ERROR, G_IO_ERROR_NOT_FOUND)) {
throw_error (context, ERROR_FAILED, "failed to remove user icon, %s", error->message);
return;
}
goto icon_saved;
}
file = g_file_new_for_path (filename);
g_clear_pointer (&filename, g_free);
/* Canonicalize path so we can call g_str_has_prefix on it
* below without concern for ../ path components moving outside
* the prefix
*/
filename = g_file_get_path (file);
info = g_file_query_info (file, G_FILE_ATTRIBUTE_UNIX_MODE ","
G_FILE_ATTRIBUTE_STANDARD_TYPE ","
G_FILE_ATTRIBUTE_STANDARD_SIZE,
return;
}
| 22,159 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: devzvol_readdir(struct vnode *dvp, struct uio *uiop, struct cred *cred,
int *eofp, caller_context_t *ct_unused, int flags_unused)
{
struct sdev_node *sdvp = VTOSDEV(dvp);
char *ptr;
sdcmn_err13(("zv readdir of '%s' %s'", sdvp->sdev_path,
sdvp->sdev_name));
if (strcmp(sdvp->sdev_path, ZVOL_DIR) == 0) {
struct vnode *vp;
rw_exit(&sdvp->sdev_contents);
(void) devname_lookup_func(sdvp, "dsk", &vp, cred,
devzvol_create_dir, SDEV_VATTR);
VN_RELE(vp);
(void) devname_lookup_func(sdvp, "rdsk", &vp, cred,
devzvol_create_dir, SDEV_VATTR);
VN_RELE(vp);
rw_enter(&sdvp->sdev_contents, RW_READER);
return (devname_readdir_func(dvp, uiop, cred, eofp, 0));
}
if (uiop->uio_offset == 0)
devzvol_prunedir(sdvp);
ptr = sdvp->sdev_path + strlen(ZVOL_DIR);
if ((strcmp(ptr, "/dsk") == 0) || (strcmp(ptr, "/rdsk") == 0)) {
rw_exit(&sdvp->sdev_contents);
devzvol_create_pool_dirs(dvp);
rw_enter(&sdvp->sdev_contents, RW_READER);
return (devname_readdir_func(dvp, uiop, cred, eofp, 0));
}
ptr = strchr(ptr + 1, '/') + 1;
rw_exit(&sdvp->sdev_contents);
sdev_iter_datasets(dvp, ZFS_IOC_DATASET_LIST_NEXT, ptr);
rw_enter(&sdvp->sdev_contents, RW_READER);
return (devname_readdir_func(dvp, uiop, cred, eofp, 0));
}
Commit Message: 5421 devzvol_readdir() needs to be more careful with strchr
Reviewed by: Keith Wesolowski <[email protected]>
Reviewed by: Jerry Jelinek <[email protected]>
Approved by: Dan McDonald <[email protected]>
CWE ID: | devzvol_readdir(struct vnode *dvp, struct uio *uiop, struct cred *cred,
int *eofp, caller_context_t *ct_unused, int flags_unused)
{
struct sdev_node *sdvp = VTOSDEV(dvp);
char *ptr;
sdcmn_err13(("zv readdir of '%s' %s'", sdvp->sdev_path,
sdvp->sdev_name));
if (strcmp(sdvp->sdev_path, ZVOL_DIR) == 0) {
struct vnode *vp;
rw_exit(&sdvp->sdev_contents);
(void) devname_lookup_func(sdvp, "dsk", &vp, cred,
devzvol_create_dir, SDEV_VATTR);
VN_RELE(vp);
(void) devname_lookup_func(sdvp, "rdsk", &vp, cred,
devzvol_create_dir, SDEV_VATTR);
VN_RELE(vp);
rw_enter(&sdvp->sdev_contents, RW_READER);
return (devname_readdir_func(dvp, uiop, cred, eofp, 0));
}
if (uiop->uio_offset == 0)
devzvol_prunedir(sdvp);
ptr = sdvp->sdev_path + strlen(ZVOL_DIR);
if ((strcmp(ptr, "/dsk") == 0) || (strcmp(ptr, "/rdsk") == 0)) {
rw_exit(&sdvp->sdev_contents);
devzvol_create_pool_dirs(dvp);
rw_enter(&sdvp->sdev_contents, RW_READER);
return (devname_readdir_func(dvp, uiop, cred, eofp, 0));
}
ptr = strchr(ptr + 1, '/');
if (ptr == NULL)
return (ENOENT);
ptr++;
rw_exit(&sdvp->sdev_contents);
sdev_iter_datasets(dvp, ZFS_IOC_DATASET_LIST_NEXT, ptr);
rw_enter(&sdvp->sdev_contents, RW_READER);
return (devname_readdir_func(dvp, uiop, cred, eofp, 0));
}
| 20,013 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: status_t BnGraphicBufferProducer::onTransact(
uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags)
{
switch(code) {
case REQUEST_BUFFER: {
CHECK_INTERFACE(IGraphicBufferProducer, data, reply);
int bufferIdx = data.readInt32();
sp<GraphicBuffer> buffer;
int result = requestBuffer(bufferIdx, &buffer);
reply->writeInt32(buffer != 0);
if (buffer != 0) {
reply->write(*buffer);
}
reply->writeInt32(result);
return NO_ERROR;
}
case SET_BUFFER_COUNT: {
CHECK_INTERFACE(IGraphicBufferProducer, data, reply);
int bufferCount = data.readInt32();
int result = setBufferCount(bufferCount);
reply->writeInt32(result);
return NO_ERROR;
}
case DEQUEUE_BUFFER: {
CHECK_INTERFACE(IGraphicBufferProducer, data, reply);
bool async = static_cast<bool>(data.readInt32());
uint32_t width = data.readUint32();
uint32_t height = data.readUint32();
PixelFormat format = static_cast<PixelFormat>(data.readInt32());
uint32_t usage = data.readUint32();
int buf = 0;
sp<Fence> fence;
int result = dequeueBuffer(&buf, &fence, async, width, height,
format, usage);
reply->writeInt32(buf);
reply->writeInt32(fence != NULL);
if (fence != NULL) {
reply->write(*fence);
}
reply->writeInt32(result);
return NO_ERROR;
}
case DETACH_BUFFER: {
CHECK_INTERFACE(IGraphicBufferProducer, data, reply);
int slot = data.readInt32();
int result = detachBuffer(slot);
reply->writeInt32(result);
return NO_ERROR;
}
case DETACH_NEXT_BUFFER: {
CHECK_INTERFACE(IGraphicBufferProducer, data, reply);
sp<GraphicBuffer> buffer;
sp<Fence> fence;
int32_t result = detachNextBuffer(&buffer, &fence);
reply->writeInt32(result);
if (result == NO_ERROR) {
reply->writeInt32(buffer != NULL);
if (buffer != NULL) {
reply->write(*buffer);
}
reply->writeInt32(fence != NULL);
if (fence != NULL) {
reply->write(*fence);
}
}
return NO_ERROR;
}
case ATTACH_BUFFER: {
CHECK_INTERFACE(IGraphicBufferProducer, data, reply);
sp<GraphicBuffer> buffer = new GraphicBuffer();
data.read(*buffer.get());
int slot = 0;
int result = attachBuffer(&slot, buffer);
reply->writeInt32(slot);
reply->writeInt32(result);
return NO_ERROR;
}
case QUEUE_BUFFER: {
CHECK_INTERFACE(IGraphicBufferProducer, data, reply);
int buf = data.readInt32();
QueueBufferInput input(data);
QueueBufferOutput* const output =
reinterpret_cast<QueueBufferOutput *>(
reply->writeInplace(sizeof(QueueBufferOutput)));
memset(output, 0, sizeof(QueueBufferOutput));
status_t result = queueBuffer(buf, input, output);
reply->writeInt32(result);
return NO_ERROR;
}
case CANCEL_BUFFER: {
CHECK_INTERFACE(IGraphicBufferProducer, data, reply);
int buf = data.readInt32();
sp<Fence> fence = new Fence();
data.read(*fence.get());
cancelBuffer(buf, fence);
return NO_ERROR;
}
case QUERY: {
CHECK_INTERFACE(IGraphicBufferProducer, data, reply);
int value = 0;
int what = data.readInt32();
int res = query(what, &value);
reply->writeInt32(value);
reply->writeInt32(res);
return NO_ERROR;
}
case CONNECT: {
CHECK_INTERFACE(IGraphicBufferProducer, data, reply);
sp<IProducerListener> listener;
if (data.readInt32() == 1) {
listener = IProducerListener::asInterface(data.readStrongBinder());
}
int api = data.readInt32();
bool producerControlledByApp = data.readInt32();
QueueBufferOutput* const output =
reinterpret_cast<QueueBufferOutput *>(
reply->writeInplace(sizeof(QueueBufferOutput)));
status_t res = connect(listener, api, producerControlledByApp, output);
reply->writeInt32(res);
return NO_ERROR;
}
case DISCONNECT: {
CHECK_INTERFACE(IGraphicBufferProducer, data, reply);
int api = data.readInt32();
status_t res = disconnect(api);
reply->writeInt32(res);
return NO_ERROR;
}
case SET_SIDEBAND_STREAM: {
CHECK_INTERFACE(IGraphicBufferProducer, data, reply);
sp<NativeHandle> stream;
if (data.readInt32()) {
stream = NativeHandle::create(data.readNativeHandle(), true);
}
status_t result = setSidebandStream(stream);
reply->writeInt32(result);
return NO_ERROR;
}
case ALLOCATE_BUFFERS: {
CHECK_INTERFACE(IGraphicBufferProducer, data, reply);
bool async = static_cast<bool>(data.readInt32());
uint32_t width = data.readUint32();
uint32_t height = data.readUint32();
PixelFormat format = static_cast<PixelFormat>(data.readInt32());
uint32_t usage = data.readUint32();
allocateBuffers(async, width, height, format, usage);
return NO_ERROR;
}
case ALLOW_ALLOCATION: {
CHECK_INTERFACE(IGraphicBufferProducer, data, reply);
bool allow = static_cast<bool>(data.readInt32());
status_t result = allowAllocation(allow);
reply->writeInt32(result);
return NO_ERROR;
}
case SET_GENERATION_NUMBER: {
CHECK_INTERFACE(IGraphicBufferProducer, data, reply);
uint32_t generationNumber = data.readUint32();
status_t result = setGenerationNumber(generationNumber);
reply->writeInt32(result);
return NO_ERROR;
}
case GET_CONSUMER_NAME: {
CHECK_INTERFACE(IGraphicBufferProducer, data, reply);
reply->writeString8(getConsumerName());
return NO_ERROR;
}
}
return BBinder::onTransact(code, data, reply, flags);
}
Commit Message: BQ: fix some uninitialized variables
Bug 27555981
Bug 27556038
Change-Id: I436b6fec589677d7e36c0e980f6e59808415dc0e
CWE ID: CWE-200 | status_t BnGraphicBufferProducer::onTransact(
uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags)
{
switch(code) {
case REQUEST_BUFFER: {
CHECK_INTERFACE(IGraphicBufferProducer, data, reply);
int bufferIdx = data.readInt32();
sp<GraphicBuffer> buffer;
int result = requestBuffer(bufferIdx, &buffer);
reply->writeInt32(buffer != 0);
if (buffer != 0) {
reply->write(*buffer);
}
reply->writeInt32(result);
return NO_ERROR;
}
case SET_BUFFER_COUNT: {
CHECK_INTERFACE(IGraphicBufferProducer, data, reply);
int bufferCount = data.readInt32();
int result = setBufferCount(bufferCount);
reply->writeInt32(result);
return NO_ERROR;
}
case DEQUEUE_BUFFER: {
CHECK_INTERFACE(IGraphicBufferProducer, data, reply);
bool async = static_cast<bool>(data.readInt32());
uint32_t width = data.readUint32();
uint32_t height = data.readUint32();
PixelFormat format = static_cast<PixelFormat>(data.readInt32());
uint32_t usage = data.readUint32();
int buf = 0;
sp<Fence> fence;
int result = dequeueBuffer(&buf, &fence, async, width, height,
format, usage);
reply->writeInt32(buf);
reply->writeInt32(fence != NULL);
if (fence != NULL) {
reply->write(*fence);
}
reply->writeInt32(result);
return NO_ERROR;
}
case DETACH_BUFFER: {
CHECK_INTERFACE(IGraphicBufferProducer, data, reply);
int slot = data.readInt32();
int result = detachBuffer(slot);
reply->writeInt32(result);
return NO_ERROR;
}
case DETACH_NEXT_BUFFER: {
CHECK_INTERFACE(IGraphicBufferProducer, data, reply);
sp<GraphicBuffer> buffer;
sp<Fence> fence;
int32_t result = detachNextBuffer(&buffer, &fence);
reply->writeInt32(result);
if (result == NO_ERROR) {
reply->writeInt32(buffer != NULL);
if (buffer != NULL) {
reply->write(*buffer);
}
reply->writeInt32(fence != NULL);
if (fence != NULL) {
reply->write(*fence);
}
}
return NO_ERROR;
}
case ATTACH_BUFFER: {
CHECK_INTERFACE(IGraphicBufferProducer, data, reply);
sp<GraphicBuffer> buffer = new GraphicBuffer();
data.read(*buffer.get());
int slot = 0;
int result = attachBuffer(&slot, buffer);
reply->writeInt32(slot);
reply->writeInt32(result);
return NO_ERROR;
}
case QUEUE_BUFFER: {
CHECK_INTERFACE(IGraphicBufferProducer, data, reply);
int buf = data.readInt32();
QueueBufferInput input(data);
QueueBufferOutput* const output =
reinterpret_cast<QueueBufferOutput *>(
reply->writeInplace(sizeof(QueueBufferOutput)));
memset(output, 0, sizeof(QueueBufferOutput));
status_t result = queueBuffer(buf, input, output);
reply->writeInt32(result);
return NO_ERROR;
}
case CANCEL_BUFFER: {
CHECK_INTERFACE(IGraphicBufferProducer, data, reply);
int buf = data.readInt32();
sp<Fence> fence = new Fence();
data.read(*fence.get());
cancelBuffer(buf, fence);
return NO_ERROR;
}
case QUERY: {
CHECK_INTERFACE(IGraphicBufferProducer, data, reply);
int value = 0;
int what = data.readInt32();
int res = query(what, &value);
reply->writeInt32(value);
reply->writeInt32(res);
return NO_ERROR;
}
case CONNECT: {
CHECK_INTERFACE(IGraphicBufferProducer, data, reply);
sp<IProducerListener> listener;
if (data.readInt32() == 1) {
listener = IProducerListener::asInterface(data.readStrongBinder());
}
int api = data.readInt32();
bool producerControlledByApp = data.readInt32();
QueueBufferOutput* const output =
reinterpret_cast<QueueBufferOutput *>(
reply->writeInplace(sizeof(QueueBufferOutput)));
memset(output, 0, sizeof(QueueBufferOutput));
status_t res = connect(listener, api, producerControlledByApp, output);
reply->writeInt32(res);
return NO_ERROR;
}
case DISCONNECT: {
CHECK_INTERFACE(IGraphicBufferProducer, data, reply);
int api = data.readInt32();
status_t res = disconnect(api);
reply->writeInt32(res);
return NO_ERROR;
}
case SET_SIDEBAND_STREAM: {
CHECK_INTERFACE(IGraphicBufferProducer, data, reply);
sp<NativeHandle> stream;
if (data.readInt32()) {
stream = NativeHandle::create(data.readNativeHandle(), true);
}
status_t result = setSidebandStream(stream);
reply->writeInt32(result);
return NO_ERROR;
}
case ALLOCATE_BUFFERS: {
CHECK_INTERFACE(IGraphicBufferProducer, data, reply);
bool async = static_cast<bool>(data.readInt32());
uint32_t width = data.readUint32();
uint32_t height = data.readUint32();
PixelFormat format = static_cast<PixelFormat>(data.readInt32());
uint32_t usage = data.readUint32();
allocateBuffers(async, width, height, format, usage);
return NO_ERROR;
}
case ALLOW_ALLOCATION: {
CHECK_INTERFACE(IGraphicBufferProducer, data, reply);
bool allow = static_cast<bool>(data.readInt32());
status_t result = allowAllocation(allow);
reply->writeInt32(result);
return NO_ERROR;
}
case SET_GENERATION_NUMBER: {
CHECK_INTERFACE(IGraphicBufferProducer, data, reply);
uint32_t generationNumber = data.readUint32();
status_t result = setGenerationNumber(generationNumber);
reply->writeInt32(result);
return NO_ERROR;
}
case GET_CONSUMER_NAME: {
CHECK_INTERFACE(IGraphicBufferProducer, data, reply);
reply->writeString8(getConsumerName());
return NO_ERROR;
}
}
return BBinder::onTransact(code, data, reply, flags);
}
| 3,641 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: bool AppCacheDatabase::FindCache(int64_t cache_id, CacheRecord* record) {
DCHECK(record);
if (!LazyOpen(kDontCreate))
return false;
static const char kSql[] =
"SELECT cache_id, group_id, online_wildcard, update_time, cache_size"
" FROM Caches WHERE cache_id = ?";
sql::Statement statement(db_->GetCachedStatement(SQL_FROM_HERE, kSql));
statement.BindInt64(0, cache_id);
if (!statement.Step())
return false;
ReadCacheRecord(statement, record);
return true;
}
Commit Message: Reland "AppCache: Add padding to cross-origin responses."
This is a reland of 85b389caa7d725cdd31f59e9a2b79ff54804b7b7
Initialized CacheRecord::padding_size to 0.
Original change's description:
> AppCache: Add padding to cross-origin responses.
>
> Bug: 918293
> Change-Id: I4f16640f06feac009d6bbbb624951da6d2669f6c
> Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/1488059
> Commit-Queue: Staphany Park <[email protected]>
> Reviewed-by: Victor Costan <[email protected]>
> Reviewed-by: Marijn Kruisselbrink <[email protected]>
> Cr-Commit-Position: refs/heads/master@{#644624}
Bug: 918293
Change-Id: Ie1d3f99c7e8a854d33255a4d66243da2ce16441c
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/1539906
Reviewed-by: Victor Costan <[email protected]>
Commit-Queue: Staphany Park <[email protected]>
Cr-Commit-Position: refs/heads/master@{#644719}
CWE ID: CWE-200 | bool AppCacheDatabase::FindCache(int64_t cache_id, CacheRecord* record) {
DCHECK(record);
if (!LazyOpen(kDontCreate))
return false;
static const char kSql[] =
"SELECT cache_id, group_id, online_wildcard, update_time, cache_size, "
"padding_size"
" FROM Caches WHERE cache_id = ?";
sql::Statement statement(db_->GetCachedStatement(SQL_FROM_HERE, kSql));
statement.BindInt64(0, cache_id);
if (!statement.Step())
return false;
ReadCacheRecord(statement, record);
return true;
}
| 13,437 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: static void __exit pf_exit(void)
{
struct pf_unit *pf;
int unit;
unregister_blkdev(major, name);
for (pf = units, unit = 0; unit < PF_UNITS; pf++, unit++) {
if (pf->present)
del_gendisk(pf->disk);
blk_cleanup_queue(pf->disk->queue);
blk_mq_free_tag_set(&pf->tag_set);
put_disk(pf->disk);
if (pf->present)
pi_release(pf->pi);
}
}
Commit Message: paride/pf: Fix potential NULL pointer dereference
Syzkaller report this:
pf: pf version 1.04, major 47, cluster 64, nice 0
pf: No ATAPI disk detected
kasan: CONFIG_KASAN_INLINE enabled
kasan: GPF could be caused by NULL-ptr deref or user memory access
general protection fault: 0000 [#1] SMP KASAN PTI
CPU: 0 PID: 9887 Comm: syz-executor.0 Tainted: G C 5.1.0-rc3+ #8
Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.10.2-1ubuntu1 04/01/2014
RIP: 0010:pf_init+0x7af/0x1000 [pf]
Code: 46 77 d2 48 89 d8 48 c1 e8 03 80 3c 28 00 74 08 48 89 df e8 03 25 a6 d2 4c 8b 23 49 8d bc 24 80 05 00 00 48 89 f8 48 c1 e8 03 <80> 3c 28 00 74 05 e8 e6 24 a6 d2 49 8b bc 24 80 05 00 00 e8 79 34
RSP: 0018:ffff8881abcbf998 EFLAGS: 00010202
RAX: 00000000000000b0 RBX: ffffffffc1e4a8a8 RCX: ffffffffaec50788
RDX: 0000000000039b10 RSI: ffffc9000153c000 RDI: 0000000000000580
RBP: dffffc0000000000 R08: ffffed103ee44e59 R09: ffffed103ee44e59
R10: 0000000000000001 R11: ffffed103ee44e58 R12: 0000000000000000
R13: ffffffffc1e4b028 R14: 0000000000000000 R15: 0000000000000020
FS: 00007f1b78a91700(0000) GS:ffff8881f7200000(0000) knlGS:0000000000000000
CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
CR2: 00007f6d72b207f8 CR3: 00000001d5790004 CR4: 00000000007606f0
DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000
DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400
PKRU: 55555554
Call Trace:
? 0xffffffffc1e50000
do_one_initcall+0xbc/0x47d init/main.c:901
do_init_module+0x1b5/0x547 kernel/module.c:3456
load_module+0x6405/0x8c10 kernel/module.c:3804
__do_sys_finit_module+0x162/0x190 kernel/module.c:3898
do_syscall_64+0x9f/0x450 arch/x86/entry/common.c:290
entry_SYSCALL_64_after_hwframe+0x49/0xbe
RIP: 0033:0x462e99
Code: f7 d8 64 89 02 b8 ff ff ff ff c3 66 0f 1f 44 00 00 48 89 f8 48 89 f7 48 89 d6 48 89 ca 4d 89 c2 4d 89 c8 4c 8b 4c 24 08 0f 05 <48> 3d 01 f0 ff ff 73 01 c3 48 c7 c1 bc ff ff ff f7 d8 64 89 01 48
RSP: 002b:00007f1b78a90c58 EFLAGS: 00000246 ORIG_RAX: 0000000000000139
RAX: ffffffffffffffda RBX: 000000000073bf00 RCX: 0000000000462e99
RDX: 0000000000000000 RSI: 0000000020000180 RDI: 0000000000000003
RBP: 00007f1b78a90c70 R08: 0000000000000000 R09: 0000000000000000
R10: 0000000000000000 R11: 0000000000000246 R12: 00007f1b78a916bc
R13: 00000000004bcefa R14: 00000000006f6fb0 R15: 0000000000000004
Modules linked in: pf(+) paride gpio_tps65218 tps65218 i2c_cht_wc ati_remote dc395x act_meta_skbtcindex act_ife ife ecdh_generic rc_xbox_dvd sky81452_regulator v4l2_fwnode leds_blinkm snd_usb_hiface comedi(C) aes_ti slhc cfi_cmdset_0020 mtd cfi_util sx8654 mdio_gpio of_mdio fixed_phy mdio_bitbang libphy alcor_pci matrix_keymap hid_uclogic usbhid scsi_transport_fc videobuf2_v4l2 videobuf2_dma_sg snd_soc_pcm179x_spi snd_soc_pcm179x_codec i2c_demux_pinctrl mdev snd_indigodj isl6405 mii enc28j60 cmac adt7316_i2c(C) adt7316(C) fmc_trivial fmc nf_reject_ipv4 authenc rc_dtt200u rtc_ds1672 dvb_usb_dibusb_mc dvb_usb_dibusb_mc_common dib3000mc dibx000_common dvb_usb_dibusb_common dvb_usb dvb_core videobuf2_common videobuf2_vmalloc videobuf2_memops regulator_haptic adf7242 mac802154 ieee802154 s5h1409 da9034_ts snd_intel8x0m wmi cx24120 usbcore sdhci_cadence sdhci_pltfm sdhci mmc_core joydev i2c_algo_bit scsi_transport_iscsi iscsi_boot_sysfs ves1820 lockd grace nfs_acl auth_rpcgss sunrp
c
ip_vs snd_soc_adau7002 snd_cs4281 snd_rawmidi gameport snd_opl3_lib snd_seq_device snd_hwdep snd_ac97_codec ad7418 hid_primax hid snd_soc_cs4265 snd_soc_core snd_pcm_dmaengine snd_pcm snd_timer ac97_bus snd_compress snd soundcore ti_adc108s102 eeprom_93cx6 i2c_algo_pca mlxreg_hotplug st_pressure st_sensors industrialio_triggered_buffer kfifo_buf industrialio v4l2_common videodev media snd_soc_adau_utils rc_pinnacle_grey rc_core pps_gpio leds_lm3692x nandcore ledtrig_pattern iptable_security iptable_raw iptable_mangle iptable_nat nf_nat nf_conntrack nf_defrag_ipv6 nf_defrag_ipv4 iptable_filter bpfilter ip6_vti ip_vti ip_gre ipip sit tunnel4 ip_tunnel hsr veth netdevsim vxcan batman_adv cfg80211 rfkill chnl_net caif nlmon dummy team bonding vcan bridge stp llc ip6_gre gre ip6_tunnel tunnel6 tun mousedev ppdev tpm kvm_intel kvm irqbypass crct10dif_pclmul crc32_pclmul crc32c_intel ghash_clmulni_intel aesni_intel ide_pci_generic aes_x86_64 piix crypto_simd input_leds psmouse cryp
td
glue_helper ide_core intel_agp serio_raw intel_gtt agpgart ata_generic i2c_piix4 pata_acpi parport_pc parport rtc_cmos floppy sch_fq_codel ip_tables x_tables sha1_ssse3 sha1_generic ipv6 [last unloaded: paride]
Dumping ftrace buffer:
(ftrace buffer empty)
---[ end trace 7a818cf5f210d79e ]---
If alloc_disk fails in pf_init_units, pf->disk will be
NULL, however in pf_detect and pf_exit, it's not check
this before free.It may result a NULL pointer dereference.
Also when register_blkdev failed, blk_cleanup_queue() and
blk_mq_free_tag_set() should be called to free resources.
Reported-by: Hulk Robot <[email protected]>
Fixes: 6ce59025f118 ("paride/pf: cleanup queues when detection fails")
Signed-off-by: YueHaibing <[email protected]>
Signed-off-by: Jens Axboe <[email protected]>
CWE ID: CWE-476 | static void __exit pf_exit(void)
{
struct pf_unit *pf;
int unit;
unregister_blkdev(major, name);
for (pf = units, unit = 0; unit < PF_UNITS; pf++, unit++) {
if (!pf->disk)
continue;
if (pf->present)
del_gendisk(pf->disk);
blk_cleanup_queue(pf->disk->queue);
blk_mq_free_tag_set(&pf->tag_set);
put_disk(pf->disk);
if (pf->present)
pi_release(pf->pi);
}
}
| 4,184 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: void FolderHeaderView::Update() {
if (!folder_item_)
return;
folder_name_view_->SetVisible(folder_name_visible_);
if (folder_name_visible_)
folder_name_view_->SetText(base::UTF8ToUTF16(folder_item_->name()));
Layout();
}
Commit Message: Enforce the maximum length of the folder name in UI.
BUG=355797
[email protected]
Review URL: https://codereview.chromium.org/203863005
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@260156 0039d316-1c4b-4281-b951-d872f2087c98
CWE ID: CWE-399 | void FolderHeaderView::Update() {
if (!folder_item_)
return;
folder_name_view_->SetVisible(folder_name_visible_);
if (folder_name_visible_) {
folder_name_view_->SetText(base::UTF8ToUTF16(folder_item_->name()));
folder_name_view_->Update();
}
Layout();
}
| 3,545 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: void ServiceWorkerDevToolsAgentHost::WorkerRestarted(int worker_process_id,
int worker_route_id) {
DCHECK_EQ(WORKER_TERMINATED, state_);
state_ = WORKER_NOT_READY;
worker_process_id_ = worker_process_id;
worker_route_id_ = worker_route_id;
RenderProcessHost* host = RenderProcessHost::FromID(worker_process_id_);
for (DevToolsSession* session : sessions())
session->SetRenderer(host, nullptr);
}
Commit Message: DevTools: speculative fix for crash in NetworkHandler::Disable
This keeps BrowserContext* and StoragePartition* instead of
RenderProcessHost* in an attemp to resolve UAF of RenderProcessHost
upon closure of DevTools front-end.
Bug: 801117, 783067, 780694
Change-Id: I6c2cca60cc0c29f0949d189cf918769059f80c1b
Reviewed-on: https://chromium-review.googlesource.com/876657
Commit-Queue: Andrey Kosyakov <[email protected]>
Reviewed-by: Dmitry Gozman <[email protected]>
Cr-Commit-Position: refs/heads/master@{#531157}
CWE ID: CWE-20 | void ServiceWorkerDevToolsAgentHost::WorkerRestarted(int worker_process_id,
int worker_route_id) {
DCHECK_EQ(WORKER_TERMINATED, state_);
state_ = WORKER_NOT_READY;
worker_process_id_ = worker_process_id;
worker_route_id_ = worker_route_id;
for (DevToolsSession* session : sessions())
session->SetRenderer(worker_process_id_, nullptr);
}
| 19,627 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: void BiquadDSPKernel::process(const float* source, float* destination, size_t framesToProcess)
{
ASSERT(source && destination && biquadProcessor());
updateCoefficientsIfNecessary(true, false);
m_biquad.process(source, destination, framesToProcess);
}
Commit Message: Initialize value since calculateFinalValues may fail to do so.
Fix threading issue where updateCoefficientsIfNecessary was not always
called from the audio thread. This causes the value not to be
initialized.
Thus,
o Initialize the variable to some value, just in case.
o Split updateCoefficientsIfNecessary into two functions with the code
that sets the coefficients pulled out in to the new function
updateCoefficients.
o Simplify updateCoefficientsIfNecessary since useSmoothing was always
true, and forceUpdate is not longer needed.
o Add process lock to prevent the audio thread from updating the
coefficients while they are being read in the main thread. The audio
thread will update them the next time around.
o Make getFrequencyResponse set the lock while reading the
coefficients of the biquad in preparation for computing the
frequency response.
BUG=389219
Review URL: https://codereview.chromium.org/354213002
git-svn-id: svn://svn.chromium.org/blink/trunk@177250 bbb929c8-8fbe-4397-9dbb-9b2b20218538
CWE ID: CWE-119 | void BiquadDSPKernel::process(const float* source, float* destination, size_t framesToProcess)
{
ASSERT(source && destination && biquadProcessor());
// The audio thread can't block on this lock; skip updating the coefficients for this block if
// necessary. We'll get them the next time around.
{
MutexTryLocker tryLocker(m_processLock);
if (tryLocker.locked())
updateCoefficientsIfNecessary();
}
m_biquad.process(source, destination, framesToProcess);
}
| 4,972 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: static inline int mount_entry_on_systemfs(struct mntent *mntent)
{
return mount_entry_on_generic(mntent, mntent->mnt_dir);
}
Commit Message: CVE-2015-1335: Protect container mounts against symlinks
When a container starts up, lxc sets up the container's inital fstree
by doing a bunch of mounting, guided by the container configuration
file. The container config is owned by the admin or user on the host,
so we do not try to guard against bad entries. However, since the
mount target is in the container, it's possible that the container admin
could divert the mount with symbolic links. This could bypass proper
container startup (i.e. confinement of a root-owned container by the
restrictive apparmor policy, by diverting the required write to
/proc/self/attr/current), or bypass the (path-based) apparmor policy
by diverting, say, /proc to /mnt in the container.
To prevent this,
1. do not allow mounts to paths containing symbolic links
2. do not allow bind mounts from relative paths containing symbolic
links.
Details:
Define safe_mount which ensures that the container has not inserted any
symbolic links into any mount targets for mounts to be done during
container setup.
The host's mount path may contain symbolic links. As it is under the
control of the administrator, that's ok. So safe_mount begins the check
for symbolic links after the rootfs->mount, by opening that directory.
It opens each directory along the path using openat() relative to the
parent directory using O_NOFOLLOW. When the target is reached, it
mounts onto /proc/self/fd/<targetfd>.
Use safe_mount() in mount_entry(), when mounting container proc,
and when needed. In particular, safe_mount() need not be used in
any case where:
1. the mount is done in the container's namespace
2. the mount is for the container's rootfs
3. the mount is relative to a tmpfs or proc/sysfs which we have
just safe_mount()ed ourselves
Since we were using proc/net as a temporary placeholder for /proc/sys/net
during container startup, and proc/net is a symbolic link, use proc/tty
instead.
Update the lxc.container.conf manpage with details about the new
restrictions.
Finally, add a testcase to test some symbolic link possibilities.
Reported-by: Roman Fiedler
Signed-off-by: Serge Hallyn <[email protected]>
Acked-by: Stéphane Graber <[email protected]>
CWE ID: CWE-59 | static inline int mount_entry_on_systemfs(struct mntent *mntent)
{
return mount_entry_on_generic(mntent, mntent->mnt_dir, NULL);
}
| 21,951 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: void Reinitialize(ReinitTestCase test_case) {
feature_list_.InitAndEnableFeature(network::features::kNetworkService);
ASSERT_TRUE(temp_directory_.CreateUniqueTempDir());
AppCacheDatabase db(temp_directory_.GetPath().AppendASCII("Index"));
EXPECT_TRUE(db.LazyOpen(true));
if (test_case == CORRUPT_CACHE_ON_INSTALL ||
test_case == CORRUPT_CACHE_ON_LOAD_EXISTING) {
const std::string kCorruptData("deadbeef");
base::FilePath disk_cache_directory =
temp_directory_.GetPath().AppendASCII("Cache");
ASSERT_TRUE(base::CreateDirectory(disk_cache_directory));
base::FilePath index_file = disk_cache_directory.AppendASCII("index");
EXPECT_EQ(static_cast<int>(kCorruptData.length()),
base::WriteFile(index_file, kCorruptData.data(),
kCorruptData.length()));
base::FilePath entry_file =
disk_cache_directory.AppendASCII("01234567_0");
EXPECT_EQ(static_cast<int>(kCorruptData.length()),
base::WriteFile(entry_file, kCorruptData.data(),
kCorruptData.length()));
}
if (test_case == CORRUPT_CACHE_ON_LOAD_EXISTING) {
AppCacheDatabase db(temp_directory_.GetPath().AppendASCII("Index"));
GURL manifest_url = GetMockUrl("manifest");
AppCacheDatabase::GroupRecord group_record;
group_record.group_id = 1;
group_record.manifest_url = manifest_url;
group_record.origin = url::Origin::Create(manifest_url);
EXPECT_TRUE(db.InsertGroup(&group_record));
AppCacheDatabase::CacheRecord cache_record;
cache_record.cache_id = 1;
cache_record.group_id = 1;
cache_record.online_wildcard = false;
cache_record.update_time = kZeroTime;
cache_record.cache_size = kDefaultEntrySize;
EXPECT_TRUE(db.InsertCache(&cache_record));
AppCacheDatabase::EntryRecord entry_record;
entry_record.cache_id = 1;
entry_record.url = manifest_url;
entry_record.flags = AppCacheEntry::MANIFEST;
entry_record.response_id = 1;
entry_record.response_size = kDefaultEntrySize;
EXPECT_TRUE(db.InsertEntry(&entry_record));
}
service_.reset(new AppCacheServiceImpl(nullptr));
auto loader_factory_getter = base::MakeRefCounted<URLLoaderFactoryGetter>();
loader_factory_getter->SetNetworkFactoryForTesting(
&mock_url_loader_factory_, /* is_corb_enabled = */ true);
service_->set_url_loader_factory_getter(loader_factory_getter.get());
service_->Initialize(temp_directory_.GetPath());
mock_quota_manager_proxy_ = new MockQuotaManagerProxy();
service_->quota_manager_proxy_ = mock_quota_manager_proxy_;
delegate_.reset(new MockStorageDelegate(this));
observer_.reset(new MockServiceObserver(this));
service_->AddObserver(observer_.get());
FlushAllTasks();
base::SequencedTaskRunnerHandle::Get()->PostTask(
FROM_HERE,
base::BindOnce(&AppCacheStorageImplTest::Continue_Reinitialize,
base::Unretained(this), test_case));
}
Commit Message: Reland "AppCache: Add padding to cross-origin responses."
This is a reland of 85b389caa7d725cdd31f59e9a2b79ff54804b7b7
Initialized CacheRecord::padding_size to 0.
Original change's description:
> AppCache: Add padding to cross-origin responses.
>
> Bug: 918293
> Change-Id: I4f16640f06feac009d6bbbb624951da6d2669f6c
> Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/1488059
> Commit-Queue: Staphany Park <[email protected]>
> Reviewed-by: Victor Costan <[email protected]>
> Reviewed-by: Marijn Kruisselbrink <[email protected]>
> Cr-Commit-Position: refs/heads/master@{#644624}
Bug: 918293
Change-Id: Ie1d3f99c7e8a854d33255a4d66243da2ce16441c
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/1539906
Reviewed-by: Victor Costan <[email protected]>
Commit-Queue: Staphany Park <[email protected]>
Cr-Commit-Position: refs/heads/master@{#644719}
CWE ID: CWE-200 | void Reinitialize(ReinitTestCase test_case) {
feature_list_.InitAndEnableFeature(network::features::kNetworkService);
ASSERT_TRUE(temp_directory_.CreateUniqueTempDir());
AppCacheDatabase db(temp_directory_.GetPath().AppendASCII("Index"));
EXPECT_TRUE(db.LazyOpen(true));
if (test_case == CORRUPT_CACHE_ON_INSTALL ||
test_case == CORRUPT_CACHE_ON_LOAD_EXISTING) {
const std::string kCorruptData("deadbeef");
base::FilePath disk_cache_directory =
temp_directory_.GetPath().AppendASCII("Cache");
ASSERT_TRUE(base::CreateDirectory(disk_cache_directory));
base::FilePath index_file = disk_cache_directory.AppendASCII("index");
EXPECT_EQ(static_cast<int>(kCorruptData.length()),
base::WriteFile(index_file, kCorruptData.data(),
kCorruptData.length()));
base::FilePath entry_file =
disk_cache_directory.AppendASCII("01234567_0");
EXPECT_EQ(static_cast<int>(kCorruptData.length()),
base::WriteFile(entry_file, kCorruptData.data(),
kCorruptData.length()));
}
if (test_case == CORRUPT_CACHE_ON_LOAD_EXISTING) {
AppCacheDatabase db(temp_directory_.GetPath().AppendASCII("Index"));
GURL manifest_url = GetMockUrl("manifest");
AppCacheDatabase::GroupRecord group_record;
group_record.group_id = 1;
group_record.manifest_url = manifest_url;
group_record.origin = url::Origin::Create(manifest_url);
EXPECT_TRUE(db.InsertGroup(&group_record));
AppCacheDatabase::CacheRecord cache_record;
cache_record.cache_id = 1;
cache_record.group_id = 1;
cache_record.online_wildcard = false;
cache_record.update_time = kZeroTime;
cache_record.cache_size = kDefaultEntrySize;
cache_record.padding_size = 0;
EXPECT_TRUE(db.InsertCache(&cache_record));
AppCacheDatabase::EntryRecord entry_record;
entry_record.cache_id = 1;
entry_record.url = manifest_url;
entry_record.flags = AppCacheEntry::MANIFEST;
entry_record.response_id = 1;
entry_record.response_size = kDefaultEntrySize;
entry_record.padding_size = 0;
EXPECT_TRUE(db.InsertEntry(&entry_record));
}
service_.reset(new AppCacheServiceImpl(nullptr));
auto loader_factory_getter = base::MakeRefCounted<URLLoaderFactoryGetter>();
loader_factory_getter->SetNetworkFactoryForTesting(
&mock_url_loader_factory_, /* is_corb_enabled = */ true);
service_->set_url_loader_factory_getter(loader_factory_getter.get());
service_->Initialize(temp_directory_.GetPath());
mock_quota_manager_proxy_ = new MockQuotaManagerProxy();
service_->quota_manager_proxy_ = mock_quota_manager_proxy_;
delegate_.reset(new MockStorageDelegate(this));
observer_.reset(new MockServiceObserver(this));
service_->AddObserver(observer_.get());
FlushAllTasks();
base::SequencedTaskRunnerHandle::Get()->PostTask(
FROM_HERE,
base::BindOnce(&AppCacheStorageImplTest::Continue_Reinitialize,
base::Unretained(this), test_case));
}
| 14,577 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: xmlParsePEReference(xmlParserCtxtPtr ctxt)
{
const xmlChar *name;
xmlEntityPtr entity = NULL;
xmlParserInputPtr input;
if (RAW != '%')
return;
NEXT;
name = xmlParseName(ctxt);
if (name == NULL) {
xmlFatalErrMsg(ctxt, XML_ERR_PEREF_NO_NAME, "PEReference: no name\n");
return;
}
if (xmlParserDebugEntities)
xmlGenericError(xmlGenericErrorContext,
"PEReference: %s\n", name);
if (RAW != ';') {
xmlFatalErr(ctxt, XML_ERR_PEREF_SEMICOL_MISSING, NULL);
return;
}
NEXT;
/*
* Increate the number of entity references parsed
*/
ctxt->nbentities++;
/*
* Request the entity from SAX
*/
if ((ctxt->sax != NULL) &&
(ctxt->sax->getParameterEntity != NULL))
entity = ctxt->sax->getParameterEntity(ctxt->userData, name);
if (ctxt->instate == XML_PARSER_EOF)
return;
if (entity == NULL) {
/*
* [ WFC: Entity Declared ]
* In a document without any DTD, a document with only an
* internal DTD subset which contains no parameter entity
* references, or a document with "standalone='yes'", ...
* ... The declaration of a parameter entity must precede
* any reference to it...
*/
if ((ctxt->standalone == 1) ||
((ctxt->hasExternalSubset == 0) &&
(ctxt->hasPErefs == 0))) {
xmlFatalErrMsgStr(ctxt, XML_ERR_UNDECLARED_ENTITY,
"PEReference: %%%s; not found\n",
name);
} else {
/*
* [ VC: Entity Declared ]
* In a document with an external subset or external
* parameter entities with "standalone='no'", ...
* ... The declaration of a parameter entity must
* precede any reference to it...
*/
if ((ctxt->validate) && (ctxt->vctxt.error != NULL)) {
xmlValidityError(ctxt, XML_WAR_UNDECLARED_ENTITY,
"PEReference: %%%s; not found\n",
name, NULL);
} else
xmlWarningMsg(ctxt, XML_WAR_UNDECLARED_ENTITY,
"PEReference: %%%s; not found\n",
name, NULL);
ctxt->valid = 0;
}
xmlParserEntityCheck(ctxt, 0, NULL, 0);
} else {
/*
* Internal checking in case the entity quest barfed
*/
if ((entity->etype != XML_INTERNAL_PARAMETER_ENTITY) &&
(entity->etype != XML_EXTERNAL_PARAMETER_ENTITY)) {
xmlWarningMsg(ctxt, XML_WAR_UNDECLARED_ENTITY,
"Internal: %%%s; is not a parameter entity\n",
name, NULL);
} else {
xmlChar start[4];
xmlCharEncoding enc;
if ((entity->etype == XML_EXTERNAL_PARAMETER_ENTITY) &&
((ctxt->options & XML_PARSE_NOENT) == 0) &&
((ctxt->options & XML_PARSE_DTDVALID) == 0) &&
((ctxt->options & XML_PARSE_DTDLOAD) == 0) &&
((ctxt->options & XML_PARSE_DTDATTR) == 0) &&
(ctxt->replaceEntities == 0) &&
(ctxt->validate == 0))
return;
input = xmlNewEntityInputStream(ctxt, entity);
if (xmlPushInput(ctxt, input) < 0)
return;
if (entity->etype == XML_EXTERNAL_PARAMETER_ENTITY) {
/*
* Get the 4 first bytes and decode the charset
* if enc != XML_CHAR_ENCODING_NONE
* plug some encoding conversion routines.
* Note that, since we may have some non-UTF8
* encoding (like UTF16, bug 135229), the 'length'
* is not known, but we can calculate based upon
* the amount of data in the buffer.
*/
GROW
if (ctxt->instate == XML_PARSER_EOF)
return;
if ((ctxt->input->end - ctxt->input->cur)>=4) {
start[0] = RAW;
start[1] = NXT(1);
start[2] = NXT(2);
start[3] = NXT(3);
enc = xmlDetectCharEncoding(start, 4);
if (enc != XML_CHAR_ENCODING_NONE) {
xmlSwitchEncoding(ctxt, enc);
}
}
if ((CMP5(CUR_PTR, '<', '?', 'x', 'm', 'l')) &&
(IS_BLANK_CH(NXT(5)))) {
xmlParseTextDecl(ctxt);
}
}
}
}
ctxt->hasPErefs = 1;
}
Commit Message: Detect infinite recursion in parameter entities
When expanding a parameter entity in a DTD, infinite recursion could
lead to an infinite loop or memory exhaustion.
Thanks to Wei Lei for the first of many reports.
Fixes bug 759579.
CWE ID: CWE-835 | xmlParsePEReference(xmlParserCtxtPtr ctxt)
{
const xmlChar *name;
xmlEntityPtr entity = NULL;
xmlParserInputPtr input;
if (RAW != '%')
return;
NEXT;
name = xmlParseName(ctxt);
if (name == NULL) {
xmlFatalErrMsg(ctxt, XML_ERR_PEREF_NO_NAME, "PEReference: no name\n");
return;
}
if (xmlParserDebugEntities)
xmlGenericError(xmlGenericErrorContext,
"PEReference: %s\n", name);
if (RAW != ';') {
xmlFatalErr(ctxt, XML_ERR_PEREF_SEMICOL_MISSING, NULL);
return;
}
NEXT;
/*
* Increate the number of entity references parsed
*/
ctxt->nbentities++;
/*
* Request the entity from SAX
*/
if ((ctxt->sax != NULL) &&
(ctxt->sax->getParameterEntity != NULL))
entity = ctxt->sax->getParameterEntity(ctxt->userData, name);
if (ctxt->instate == XML_PARSER_EOF)
return;
if (entity == NULL) {
/*
* [ WFC: Entity Declared ]
* In a document without any DTD, a document with only an
* internal DTD subset which contains no parameter entity
* references, or a document with "standalone='yes'", ...
* ... The declaration of a parameter entity must precede
* any reference to it...
*/
if ((ctxt->standalone == 1) ||
((ctxt->hasExternalSubset == 0) &&
(ctxt->hasPErefs == 0))) {
xmlFatalErrMsgStr(ctxt, XML_ERR_UNDECLARED_ENTITY,
"PEReference: %%%s; not found\n",
name);
} else {
/*
* [ VC: Entity Declared ]
* In a document with an external subset or external
* parameter entities with "standalone='no'", ...
* ... The declaration of a parameter entity must
* precede any reference to it...
*/
if ((ctxt->validate) && (ctxt->vctxt.error != NULL)) {
xmlValidityError(ctxt, XML_WAR_UNDECLARED_ENTITY,
"PEReference: %%%s; not found\n",
name, NULL);
} else
xmlWarningMsg(ctxt, XML_WAR_UNDECLARED_ENTITY,
"PEReference: %%%s; not found\n",
name, NULL);
ctxt->valid = 0;
}
xmlParserEntityCheck(ctxt, 0, NULL, 0);
} else {
/*
* Internal checking in case the entity quest barfed
*/
if ((entity->etype != XML_INTERNAL_PARAMETER_ENTITY) &&
(entity->etype != XML_EXTERNAL_PARAMETER_ENTITY)) {
xmlWarningMsg(ctxt, XML_WAR_UNDECLARED_ENTITY,
"Internal: %%%s; is not a parameter entity\n",
name, NULL);
} else {
xmlChar start[4];
xmlCharEncoding enc;
if ((entity->etype == XML_EXTERNAL_PARAMETER_ENTITY) &&
((ctxt->options & XML_PARSE_NOENT) == 0) &&
((ctxt->options & XML_PARSE_DTDVALID) == 0) &&
((ctxt->options & XML_PARSE_DTDLOAD) == 0) &&
((ctxt->options & XML_PARSE_DTDATTR) == 0) &&
(ctxt->replaceEntities == 0) &&
(ctxt->validate == 0))
return;
input = xmlNewEntityInputStream(ctxt, entity);
if (xmlPushInput(ctxt, input) < 0) {
xmlFreeInputStream(input);
return;
}
if (entity->etype == XML_EXTERNAL_PARAMETER_ENTITY) {
/*
* Get the 4 first bytes and decode the charset
* if enc != XML_CHAR_ENCODING_NONE
* plug some encoding conversion routines.
* Note that, since we may have some non-UTF8
* encoding (like UTF16, bug 135229), the 'length'
* is not known, but we can calculate based upon
* the amount of data in the buffer.
*/
GROW
if (ctxt->instate == XML_PARSER_EOF)
return;
if ((ctxt->input->end - ctxt->input->cur)>=4) {
start[0] = RAW;
start[1] = NXT(1);
start[2] = NXT(2);
start[3] = NXT(3);
enc = xmlDetectCharEncoding(start, 4);
if (enc != XML_CHAR_ENCODING_NONE) {
xmlSwitchEncoding(ctxt, enc);
}
}
if ((CMP5(CUR_PTR, '<', '?', 'x', 'm', 'l')) &&
(IS_BLANK_CH(NXT(5)))) {
xmlParseTextDecl(ctxt);
}
}
}
}
ctxt->hasPErefs = 1;
}
| 1,201 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: error::Error GLES2DecoderImpl::HandleReadPixels(
uint32 immediate_data_size, const gles2::ReadPixels& c) {
GLint x = c.x;
GLint y = c.y;
GLsizei width = c.width;
GLsizei height = c.height;
GLenum format = c.format;
GLenum type = c.type;
if (width < 0 || height < 0) {
SetGLError(GL_INVALID_VALUE, "glReadPixels", "dimensions < 0");
return error::kNoError;
}
typedef gles2::ReadPixels::Result Result;
uint32 pixels_size;
if (!GLES2Util::ComputeImageDataSizes(
width, height, format, type, pack_alignment_, &pixels_size, NULL, NULL)) {
return error::kOutOfBounds;
}
void* pixels = GetSharedMemoryAs<void*>(
c.pixels_shm_id, c.pixels_shm_offset, pixels_size);
Result* result = GetSharedMemoryAs<Result*>(
c.result_shm_id, c.result_shm_offset, sizeof(*result));
if (!pixels || !result) {
return error::kOutOfBounds;
}
if (!validators_->read_pixel_format.IsValid(format)) {
SetGLErrorInvalidEnum("glReadPixels", format, "format");
return error::kNoError;
}
if (!validators_->pixel_type.IsValid(type)) {
SetGLErrorInvalidEnum("glReadPixels", type, "type");
return error::kNoError;
}
if (width == 0 || height == 0) {
return error::kNoError;
}
gfx::Size max_size = GetBoundReadFrameBufferSize();
GLint max_x;
GLint max_y;
if (!SafeAdd(x, width, &max_x) || !SafeAdd(y, height, &max_y)) {
SetGLError(GL_INVALID_VALUE, "glReadPixels", "dimensions out of range");
return error::kNoError;
}
if (!CheckBoundFramebuffersValid("glReadPixels")) {
return error::kNoError;
}
CopyRealGLErrorsToWrapper();
ScopedResolvedFrameBufferBinder binder(this, false, true);
if (x < 0 || y < 0 || max_x > max_size.width() || max_y > max_size.height()) {
uint32 temp_size;
uint32 unpadded_row_size;
uint32 padded_row_size;
if (!GLES2Util::ComputeImageDataSizes(
width, 2, format, type, pack_alignment_, &temp_size,
&unpadded_row_size, &padded_row_size)) {
SetGLError(GL_INVALID_VALUE, "glReadPixels", "dimensions out of range");
return error::kNoError;
}
GLint dest_x_offset = std::max(-x, 0);
uint32 dest_row_offset;
if (!GLES2Util::ComputeImageDataSizes(
dest_x_offset, 1, format, type, pack_alignment_, &dest_row_offset, NULL,
NULL)) {
SetGLError(GL_INVALID_VALUE, "glReadPixels", "dimensions out of range");
return error::kNoError;
}
int8* dst = static_cast<int8*>(pixels);
GLint read_x = std::max(0, x);
GLint read_end_x = std::max(0, std::min(max_size.width(), max_x));
GLint read_width = read_end_x - read_x;
for (GLint yy = 0; yy < height; ++yy) {
GLint ry = y + yy;
memset(dst, 0, unpadded_row_size);
if (ry >= 0 && ry < max_size.height() && read_width > 0) {
glReadPixels(
read_x, ry, read_width, 1, format, type, dst + dest_row_offset);
}
dst += padded_row_size;
}
} else {
glReadPixels(x, y, width, height, format, type, pixels);
}
GLenum error = PeekGLError();
if (error == GL_NO_ERROR) {
*result = true;
GLenum read_format = GetBoundReadFrameBufferInternalFormat();
uint32 channels_exist = GLES2Util::GetChannelsForFormat(read_format);
if ((channels_exist & 0x0008) == 0 &&
!feature_info_->feature_flags().disable_workarounds) {
uint32 temp_size;
uint32 unpadded_row_size;
uint32 padded_row_size;
if (!GLES2Util::ComputeImageDataSizes(
width, 2, format, type, pack_alignment_, &temp_size,
&unpadded_row_size, &padded_row_size)) {
SetGLError(GL_INVALID_VALUE, "glReadPixels", "dimensions out of range");
return error::kNoError;
}
if (type != GL_UNSIGNED_BYTE) {
SetGLError(
GL_INVALID_OPERATION, "glReadPixels",
"unsupported readPixel format");
return error::kNoError;
}
switch (format) {
case GL_RGBA:
case GL_BGRA_EXT:
case GL_ALPHA: {
int offset = (format == GL_ALPHA) ? 0 : 3;
int step = (format == GL_ALPHA) ? 1 : 4;
uint8* dst = static_cast<uint8*>(pixels) + offset;
for (GLint yy = 0; yy < height; ++yy) {
uint8* end = dst + unpadded_row_size;
for (uint8* d = dst; d < end; d += step) {
*d = 255;
}
dst += padded_row_size;
}
break;
}
default:
break;
}
}
}
return error::kNoError;
}
Commit Message: Fix SafeAdd and SafeMultiply
BUG=145648,145544
Review URL: https://chromiumcodereview.appspot.com/10916165
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@155478 0039d316-1c4b-4281-b951-d872f2087c98
CWE ID: CWE-189 | error::Error GLES2DecoderImpl::HandleReadPixels(
uint32 immediate_data_size, const gles2::ReadPixels& c) {
GLint x = c.x;
GLint y = c.y;
GLsizei width = c.width;
GLsizei height = c.height;
GLenum format = c.format;
GLenum type = c.type;
if (width < 0 || height < 0) {
SetGLError(GL_INVALID_VALUE, "glReadPixels", "dimensions < 0");
return error::kNoError;
}
typedef gles2::ReadPixels::Result Result;
uint32 pixels_size;
if (!GLES2Util::ComputeImageDataSizes(
width, height, format, type, pack_alignment_, &pixels_size, NULL, NULL)) {
return error::kOutOfBounds;
}
void* pixels = GetSharedMemoryAs<void*>(
c.pixels_shm_id, c.pixels_shm_offset, pixels_size);
Result* result = GetSharedMemoryAs<Result*>(
c.result_shm_id, c.result_shm_offset, sizeof(*result));
if (!pixels || !result) {
return error::kOutOfBounds;
}
if (!validators_->read_pixel_format.IsValid(format)) {
SetGLErrorInvalidEnum("glReadPixels", format, "format");
return error::kNoError;
}
if (!validators_->pixel_type.IsValid(type)) {
SetGLErrorInvalidEnum("glReadPixels", type, "type");
return error::kNoError;
}
if (width == 0 || height == 0) {
return error::kNoError;
}
gfx::Size max_size = GetBoundReadFrameBufferSize();
int32 max_x;
int32 max_y;
if (!SafeAddInt32(x, width, &max_x) || !SafeAddInt32(y, height, &max_y)) {
SetGLError(GL_INVALID_VALUE, "glReadPixels", "dimensions out of range");
return error::kNoError;
}
if (!CheckBoundFramebuffersValid("glReadPixels")) {
return error::kNoError;
}
CopyRealGLErrorsToWrapper();
ScopedResolvedFrameBufferBinder binder(this, false, true);
if (x < 0 || y < 0 || max_x > max_size.width() || max_y > max_size.height()) {
uint32 temp_size;
uint32 unpadded_row_size;
uint32 padded_row_size;
if (!GLES2Util::ComputeImageDataSizes(
width, 2, format, type, pack_alignment_, &temp_size,
&unpadded_row_size, &padded_row_size)) {
SetGLError(GL_INVALID_VALUE, "glReadPixels", "dimensions out of range");
return error::kNoError;
}
GLint dest_x_offset = std::max(-x, 0);
uint32 dest_row_offset;
if (!GLES2Util::ComputeImageDataSizes(
dest_x_offset, 1, format, type, pack_alignment_, &dest_row_offset, NULL,
NULL)) {
SetGLError(GL_INVALID_VALUE, "glReadPixels", "dimensions out of range");
return error::kNoError;
}
int8* dst = static_cast<int8*>(pixels);
GLint read_x = std::max(0, x);
GLint read_end_x = std::max(0, std::min(max_size.width(), max_x));
GLint read_width = read_end_x - read_x;
for (GLint yy = 0; yy < height; ++yy) {
GLint ry = y + yy;
memset(dst, 0, unpadded_row_size);
if (ry >= 0 && ry < max_size.height() && read_width > 0) {
glReadPixels(
read_x, ry, read_width, 1, format, type, dst + dest_row_offset);
}
dst += padded_row_size;
}
} else {
glReadPixels(x, y, width, height, format, type, pixels);
}
GLenum error = PeekGLError();
if (error == GL_NO_ERROR) {
*result = true;
GLenum read_format = GetBoundReadFrameBufferInternalFormat();
uint32 channels_exist = GLES2Util::GetChannelsForFormat(read_format);
if ((channels_exist & 0x0008) == 0 &&
!feature_info_->feature_flags().disable_workarounds) {
uint32 temp_size;
uint32 unpadded_row_size;
uint32 padded_row_size;
if (!GLES2Util::ComputeImageDataSizes(
width, 2, format, type, pack_alignment_, &temp_size,
&unpadded_row_size, &padded_row_size)) {
SetGLError(GL_INVALID_VALUE, "glReadPixels", "dimensions out of range");
return error::kNoError;
}
if (type != GL_UNSIGNED_BYTE) {
SetGLError(
GL_INVALID_OPERATION, "glReadPixels",
"unsupported readPixel format");
return error::kNoError;
}
switch (format) {
case GL_RGBA:
case GL_BGRA_EXT:
case GL_ALPHA: {
int offset = (format == GL_ALPHA) ? 0 : 3;
int step = (format == GL_ALPHA) ? 1 : 4;
uint8* dst = static_cast<uint8*>(pixels) + offset;
for (GLint yy = 0; yy < height; ++yy) {
uint8* end = dst + unpadded_row_size;
for (uint8* d = dst; d < end; d += step) {
*d = 255;
}
dst += padded_row_size;
}
break;
}
default:
break;
}
}
}
return error::kNoError;
}
| 17,829 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: QuotaTask::QuotaTask(QuotaTaskObserver* observer)
: observer_(observer),
original_task_runner_(base::MessageLoopProxy::current()) {
}
Commit Message: Quota double-delete fix
BUG=142310
Review URL: https://chromiumcodereview.appspot.com/10832407
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@152532 0039d316-1c4b-4281-b951-d872f2087c98
CWE ID: CWE-399 | QuotaTask::QuotaTask(QuotaTaskObserver* observer)
: observer_(observer),
original_task_runner_(base::MessageLoopProxy::current()),
delete_scheduled_(false) {
}
| 27,161 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: void OneClickSigninHelper::ShowInfoBarIfPossible(net::URLRequest* request,
ProfileIOData* io_data,
int child_id,
int route_id) {
std::string google_chrome_signin_value;
std::string google_accounts_signin_value;
request->GetResponseHeaderByName("Google-Chrome-SignIn",
&google_chrome_signin_value);
request->GetResponseHeaderByName("Google-Accounts-SignIn",
&google_accounts_signin_value);
if (!google_accounts_signin_value.empty() ||
!google_chrome_signin_value.empty()) {
VLOG(1) << "OneClickSigninHelper::ShowInfoBarIfPossible:"
<< " g-a-s='" << google_accounts_signin_value << "'"
<< " g-c-s='" << google_chrome_signin_value << "'";
}
if (!gaia::IsGaiaSignonRealm(request->original_url().GetOrigin()))
return;
std::vector<std::pair<std::string, std::string> > pairs;
base::SplitStringIntoKeyValuePairs(google_accounts_signin_value, '=', ',',
&pairs);
std::string session_index;
std::string email;
for (size_t i = 0; i < pairs.size(); ++i) {
const std::pair<std::string, std::string>& pair = pairs[i];
const std::string& key = pair.first;
const std::string& value = pair.second;
if (key == "email") {
TrimString(value, "\"", &email);
} else if (key == "sessionindex") {
session_index = value;
}
}
if (!email.empty())
io_data->set_reverse_autologin_pending_email(email);
if (!email.empty() || !session_index.empty()) {
VLOG(1) << "OneClickSigninHelper::ShowInfoBarIfPossible:"
<< " email=" << email
<< " sessionindex=" << session_index;
}
AutoAccept auto_accept = AUTO_ACCEPT_NONE;
signin::Source source = signin::SOURCE_UNKNOWN;
GURL continue_url;
std::vector<std::string> tokens;
base::SplitString(google_chrome_signin_value, ',', &tokens);
for (size_t i = 0; i < tokens.size(); ++i) {
const std::string& token = tokens[i];
if (token == "accepted") {
auto_accept = AUTO_ACCEPT_ACCEPTED;
} else if (token == "configure") {
auto_accept = AUTO_ACCEPT_CONFIGURE;
} else if (token == "rejected-for-profile") {
auto_accept = AUTO_ACCEPT_REJECTED_FOR_PROFILE;
}
}
source = GetSigninSource(request->url(), &continue_url);
if (source != signin::SOURCE_UNKNOWN)
auto_accept = AUTO_ACCEPT_EXPLICIT;
if (auto_accept != AUTO_ACCEPT_NONE) {
VLOG(1) << "OneClickSigninHelper::ShowInfoBarIfPossible:"
<< " auto_accept=" << auto_accept;
}
if (session_index.empty() && email.empty() &&
auto_accept == AUTO_ACCEPT_NONE && !continue_url.is_valid()) {
return;
}
content::BrowserThread::PostTask(
content::BrowserThread::UI, FROM_HERE,
base::Bind(&OneClickSigninHelper::ShowInfoBarUIThread, session_index,
email, auto_accept, source, continue_url, child_id, route_id));
}
Commit Message: During redirects in the one click sign in flow, check the current URL
instead of original URL to validate gaia http headers.
BUG=307159
Review URL: https://codereview.chromium.org/77343002
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@236563 0039d316-1c4b-4281-b951-d872f2087c98
CWE ID: CWE-287 | void OneClickSigninHelper::ShowInfoBarIfPossible(net::URLRequest* request,
ProfileIOData* io_data,
int child_id,
int route_id) {
std::string google_chrome_signin_value;
std::string google_accounts_signin_value;
request->GetResponseHeaderByName("Google-Chrome-SignIn",
&google_chrome_signin_value);
request->GetResponseHeaderByName("Google-Accounts-SignIn",
&google_accounts_signin_value);
if (!google_accounts_signin_value.empty() ||
!google_chrome_signin_value.empty()) {
VLOG(1) << "OneClickSigninHelper::ShowInfoBarIfPossible:"
<< " g-a-s='" << google_accounts_signin_value << "'"
<< " g-c-s='" << google_chrome_signin_value << "'";
}
if (!gaia::IsGaiaSignonRealm(request->url().GetOrigin()))
return;
std::vector<std::pair<std::string, std::string> > pairs;
base::SplitStringIntoKeyValuePairs(google_accounts_signin_value, '=', ',',
&pairs);
std::string session_index;
std::string email;
for (size_t i = 0; i < pairs.size(); ++i) {
const std::pair<std::string, std::string>& pair = pairs[i];
const std::string& key = pair.first;
const std::string& value = pair.second;
if (key == "email") {
TrimString(value, "\"", &email);
} else if (key == "sessionindex") {
session_index = value;
}
}
if (!email.empty())
io_data->set_reverse_autologin_pending_email(email);
if (!email.empty() || !session_index.empty()) {
VLOG(1) << "OneClickSigninHelper::ShowInfoBarIfPossible:"
<< " email=" << email
<< " sessionindex=" << session_index;
}
AutoAccept auto_accept = AUTO_ACCEPT_NONE;
signin::Source source = signin::SOURCE_UNKNOWN;
GURL continue_url;
std::vector<std::string> tokens;
base::SplitString(google_chrome_signin_value, ',', &tokens);
for (size_t i = 0; i < tokens.size(); ++i) {
const std::string& token = tokens[i];
if (token == "accepted") {
auto_accept = AUTO_ACCEPT_ACCEPTED;
} else if (token == "configure") {
auto_accept = AUTO_ACCEPT_CONFIGURE;
} else if (token == "rejected-for-profile") {
auto_accept = AUTO_ACCEPT_REJECTED_FOR_PROFILE;
}
}
source = GetSigninSource(request->url(), &continue_url);
if (source != signin::SOURCE_UNKNOWN)
auto_accept = AUTO_ACCEPT_EXPLICIT;
if (auto_accept != AUTO_ACCEPT_NONE) {
VLOG(1) << "OneClickSigninHelper::ShowInfoBarIfPossible:"
<< " auto_accept=" << auto_accept;
}
if (session_index.empty() && email.empty() &&
auto_accept == AUTO_ACCEPT_NONE && !continue_url.is_valid()) {
return;
}
content::BrowserThread::PostTask(
content::BrowserThread::UI, FROM_HERE,
base::Bind(&OneClickSigninHelper::ShowInfoBarUIThread, session_index,
email, auto_accept, source, continue_url, child_id, route_id));
}
| 5,713 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: static int handle_wrmsr(struct kvm_vcpu *vcpu)
{
struct msr_data msr;
u32 ecx = vcpu->arch.regs[VCPU_REGS_RCX];
u64 data = (vcpu->arch.regs[VCPU_REGS_RAX] & -1u)
| ((u64)(vcpu->arch.regs[VCPU_REGS_RDX] & -1u) << 32);
msr.data = data;
msr.index = ecx;
msr.host_initiated = false;
if (vmx_set_msr(vcpu, &msr) != 0) {
trace_kvm_msr_write_ex(ecx, data);
kvm_inject_gp(vcpu, 0);
return 1;
}
trace_kvm_msr_write(ecx, data);
skip_emulated_instruction(vcpu);
return 1;
}
Commit Message: KVM: x86: Check non-canonical addresses upon WRMSR
Upon WRMSR, the CPU should inject #GP if a non-canonical value (address) is
written to certain MSRs. The behavior is "almost" identical for AMD and Intel
(ignoring MSRs that are not implemented in either architecture since they would
anyhow #GP). However, IA32_SYSENTER_ESP and IA32_SYSENTER_EIP cause #GP if
non-canonical address is written on Intel but not on AMD (which ignores the top
32-bits).
Accordingly, this patch injects a #GP on the MSRs which behave identically on
Intel and AMD. To eliminate the differences between the architecutres, the
value which is written to IA32_SYSENTER_ESP and IA32_SYSENTER_EIP is turned to
canonical value before writing instead of injecting a #GP.
Some references from Intel and AMD manuals:
According to Intel SDM description of WRMSR instruction #GP is expected on
WRMSR "If the source register contains a non-canonical address and ECX
specifies one of the following MSRs: IA32_DS_AREA, IA32_FS_BASE, IA32_GS_BASE,
IA32_KERNEL_GS_BASE, IA32_LSTAR, IA32_SYSENTER_EIP, IA32_SYSENTER_ESP."
According to AMD manual instruction manual:
LSTAR/CSTAR (SYSCALL): "The WRMSR instruction loads the target RIP into the
LSTAR and CSTAR registers. If an RIP written by WRMSR is not in canonical
form, a general-protection exception (#GP) occurs."
IA32_GS_BASE and IA32_FS_BASE (WRFSBASE/WRGSBASE): "The address written to the
base field must be in canonical form or a #GP fault will occur."
IA32_KERNEL_GS_BASE (SWAPGS): "The address stored in the KernelGSbase MSR must
be in canonical form."
This patch fixes CVE-2014-3610.
Cc: [email protected]
Signed-off-by: Nadav Amit <[email protected]>
Signed-off-by: Paolo Bonzini <[email protected]>
CWE ID: CWE-264 | static int handle_wrmsr(struct kvm_vcpu *vcpu)
{
struct msr_data msr;
u32 ecx = vcpu->arch.regs[VCPU_REGS_RCX];
u64 data = (vcpu->arch.regs[VCPU_REGS_RAX] & -1u)
| ((u64)(vcpu->arch.regs[VCPU_REGS_RDX] & -1u) << 32);
msr.data = data;
msr.index = ecx;
msr.host_initiated = false;
if (kvm_set_msr(vcpu, &msr) != 0) {
trace_kvm_msr_write_ex(ecx, data);
kvm_inject_gp(vcpu, 0);
return 1;
}
trace_kvm_msr_write(ecx, data);
skip_emulated_instruction(vcpu);
return 1;
}
| 16,666 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: static MagickBooleanType ReadPSDChannelPixels(Image *image,
const size_t channels,const size_t row,const ssize_t type,
const unsigned char *pixels,ExceptionInfo *exception)
{
Quantum
pixel;
register const unsigned char
*p;
register Quantum
*q;
register ssize_t
x;
size_t
packet_size;
unsigned short
nibble;
p=pixels;
q=GetAuthenticPixels(image,0,row,image->columns,1,exception);
if (q == (Quantum *) NULL)
return MagickFalse;
packet_size=GetPSDPacketSize(image);
for (x=0; x < (ssize_t) image->columns; x++)
{
if (packet_size == 1)
pixel=ScaleCharToQuantum(*p++);
else
{
p=PushShortPixel(MSBEndian,p,&nibble);
pixel=ScaleShortToQuantum(nibble);
}
switch (type)
{
case -1:
{
SetPixelAlpha(image,pixel,q);
break;
}
case -2:
case 0:
{
SetPixelRed(image,pixel,q);
if (channels == 1 || type == -2)
SetPixelGray(image,pixel,q);
if (image->storage_class == PseudoClass)
{
if (packet_size == 1)
SetPixelIndex(image,ScaleQuantumToChar(pixel),q);
else
SetPixelIndex(image,ScaleQuantumToShort(pixel),q);
SetPixelViaPixelInfo(image,image->colormap+(ssize_t)
ConstrainColormapIndex(image,GetPixelIndex(image,q),exception),q);
if (image->depth == 1)
{
ssize_t
bit,
number_bits;
number_bits=image->columns-x;
if (number_bits > 8)
number_bits=8;
for (bit=0; bit < number_bits; bit++)
{
SetPixelIndex(image,(((unsigned char) pixel) &
(0x01 << (7-bit))) != 0 ? 0 : 255,q);
SetPixelViaPixelInfo(image,image->colormap+(ssize_t)
ConstrainColormapIndex(image,GetPixelIndex(image,q),
exception),q);
q+=GetPixelChannels(image);
x++;
}
x--;
continue;
}
}
break;
}
case 1:
{
if (image->storage_class == PseudoClass)
SetPixelAlpha(image,pixel,q);
else
SetPixelGreen(image,pixel,q);
break;
}
case 2:
{
if (image->storage_class == PseudoClass)
SetPixelAlpha(image,pixel,q);
else
SetPixelBlue(image,pixel,q);
break;
}
case 3:
{
if (image->colorspace == CMYKColorspace)
SetPixelBlack(image,pixel,q);
else
if (image->alpha_trait != UndefinedPixelTrait)
SetPixelAlpha(image,pixel,q);
break;
}
case 4:
{
if ((IssRGBCompatibleColorspace(image->colorspace) != MagickFalse) &&
(channels > 3))
break;
if (image->alpha_trait != UndefinedPixelTrait)
SetPixelAlpha(image,pixel,q);
break;
}
default:
break;
}
q+=GetPixelChannels(image);
}
return(SyncAuthenticPixels(image,exception));
}
Commit Message: Rewrite reading pixel values.
CWE ID: CWE-125 | static MagickBooleanType ReadPSDChannelPixels(Image *image,
const size_t channels,const size_t row,const ssize_t type,
const unsigned char *pixels,ExceptionInfo *exception)
{
Quantum
pixel;
register const unsigned char
*p;
register Quantum
*q;
register ssize_t
x;
size_t
packet_size;
unsigned short
nibble;
p=pixels;
q=GetAuthenticPixels(image,0,row,image->columns,1,exception);
if (q == (Quantum *) NULL)
return MagickFalse;
packet_size=GetPSDPacketSize(image);
for (x=0; x < (ssize_t) image->columns; x++)
{
if (packet_size == 1)
pixel=ScaleCharToQuantum(*p++);
else
{
p=PushShortPixel(MSBEndian,p,&nibble);
pixel=ScaleShortToQuantum(nibble);
}
if (image->depth > 1)
{
SetPSDPixel(image,channels,type,packet_size,pixel,q,exception);
q+=GetPixelChannels(image);
}
else
{
ssize_t
bit,
number_bits;
number_bits=image->columns-x;
if (number_bits > 8)
number_bits=8;
for (bit = 0; bit < number_bits; bit++)
{
SetPSDPixel(image,channels,type,packet_size,(((unsigned char) pixel)
& (0x01 << (7-bit))) != 0 ? 0 : 255,q,exception);
q+=GetPixelChannels(image);
x++;
}
if (x != image->columns)
x--;
continue;
}
}
return(SyncAuthenticPixels(image,exception));
}
| 11,554 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: bool IDNSpoofChecker::SafeToDisplayAsUnicode(base::StringPiece16 label,
bool is_tld_ascii) {
UErrorCode status = U_ZERO_ERROR;
int32_t result =
uspoof_check(checker_, label.data(),
base::checked_cast<int32_t>(label.size()), nullptr, &status);
if (U_FAILURE(status) || (result & USPOOF_ALL_CHECKS))
return false;
icu::UnicodeString label_string(FALSE, label.data(),
base::checked_cast<int32_t>(label.size()));
if (deviation_characters_.containsSome(label_string))
return false;
result &= USPOOF_RESTRICTION_LEVEL_MASK;
if (result == USPOOF_ASCII)
return true;
if (result == USPOOF_SINGLE_SCRIPT_RESTRICTIVE &&
kana_letters_exceptions_.containsNone(label_string) &&
combining_diacritics_exceptions_.containsNone(label_string)) {
return !is_tld_ascii || !IsMadeOfLatinAlikeCyrillic(label_string);
}
if (non_ascii_latin_letters_.containsSome(label_string) &&
!lgc_letters_n_ascii_.containsAll(label_string))
return false;
if (!tls_index.initialized())
tls_index.Initialize(&OnThreadTermination);
icu::RegexMatcher* dangerous_pattern =
reinterpret_cast<icu::RegexMatcher*>(tls_index.Get());
if (!dangerous_pattern) {
dangerous_pattern = new icu::RegexMatcher(
icu::UnicodeString(
R"([^\p{scx=kana}\p{scx=hira}\p{scx=hani}])"
R"([\u30ce\u30f3\u30bd\u30be])"
R"([^\p{scx=kana}\p{scx=hira}\p{scx=hani}]|)"
R"([^\p{scx=kana}\p{scx=hira}]\u30fc|^\u30fc|)"
R"([^\p{scx=kana}][\u30fd\u30fe]|^[\u30fd\u30fe]|)"
R"(^[\p{scx=kana}]+[\u3078-\u307a][\p{scx=kana}]+$|)"
R"(^[\p{scx=hira}]+[\u30d8-\u30da][\p{scx=hira}]+$|)"
R"([a-z]\u30fb|\u30fb[a-z]|)"
R"([^\p{scx=latn}\p{scx=grek}\p{scx=cyrl}][\u0300-\u0339]|)"
R"([ijl\u0131]\u0307)",
-1, US_INV),
0, status);
tls_index.Set(dangerous_pattern);
}
dangerous_pattern->reset(label_string);
return !dangerous_pattern->find();
}
Commit Message: Block dotless-i / j + a combining mark
U+0131 (doltess i) and U+0237 (dotless j) are blocked from being
followed by a combining mark in U+0300 block.
Bug: 774842
Test: See the bug
Change-Id: I92aac0e97233184864d060fd0f137a90b042c679
Reviewed-on: https://chromium-review.googlesource.com/767888
Commit-Queue: Jungshik Shin <[email protected]>
Reviewed-by: Peter Kasting <[email protected]>
Cr-Commit-Position: refs/heads/master@{#517605}
CWE ID: CWE-20 | bool IDNSpoofChecker::SafeToDisplayAsUnicode(base::StringPiece16 label,
bool is_tld_ascii) {
UErrorCode status = U_ZERO_ERROR;
int32_t result =
uspoof_check(checker_, label.data(),
base::checked_cast<int32_t>(label.size()), nullptr, &status);
if (U_FAILURE(status) || (result & USPOOF_ALL_CHECKS))
return false;
icu::UnicodeString label_string(FALSE, label.data(),
base::checked_cast<int32_t>(label.size()));
if (deviation_characters_.containsSome(label_string))
return false;
result &= USPOOF_RESTRICTION_LEVEL_MASK;
if (result == USPOOF_ASCII)
return true;
if (result == USPOOF_SINGLE_SCRIPT_RESTRICTIVE &&
kana_letters_exceptions_.containsNone(label_string) &&
combining_diacritics_exceptions_.containsNone(label_string)) {
return !is_tld_ascii || !IsMadeOfLatinAlikeCyrillic(label_string);
}
if (non_ascii_latin_letters_.containsSome(label_string) &&
!lgc_letters_n_ascii_.containsAll(label_string))
return false;
if (!tls_index.initialized())
tls_index.Initialize(&OnThreadTermination);
icu::RegexMatcher* dangerous_pattern =
reinterpret_cast<icu::RegexMatcher*>(tls_index.Get());
if (!dangerous_pattern) {
// - Disallow dotless i (U+0131) followed by a combining mark.
dangerous_pattern = new icu::RegexMatcher(
icu::UnicodeString(
R"([^\p{scx=kana}\p{scx=hira}\p{scx=hani}])"
R"([\u30ce\u30f3\u30bd\u30be])"
R"([^\p{scx=kana}\p{scx=hira}\p{scx=hani}]|)"
R"([^\p{scx=kana}\p{scx=hira}]\u30fc|^\u30fc|)"
R"([^\p{scx=kana}][\u30fd\u30fe]|^[\u30fd\u30fe]|)"
R"(^[\p{scx=kana}]+[\u3078-\u307a][\p{scx=kana}]+$|)"
R"(^[\p{scx=hira}]+[\u30d8-\u30da][\p{scx=hira}]+$|)"
R"([a-z]\u30fb|\u30fb[a-z]|)"
R"([^\p{scx=latn}\p{scx=grek}\p{scx=cyrl}][\u0300-\u0339]|)"
R"(\u0131[\u0300-\u0339]|)"
R"([ijl]\u0307)",
-1, US_INV),
0, status);
tls_index.Set(dangerous_pattern);
}
dangerous_pattern->reset(label_string);
return !dangerous_pattern->find();
}
| 29,275 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: long Segment::ParseNext(const Cluster* pCurr, const Cluster*& pResult,
long long& pos, long& len) {
assert(pCurr);
assert(!pCurr->EOS());
assert(m_clusters);
pResult = 0;
if (pCurr->m_index >= 0) { // loaded (not merely preloaded)
assert(m_clusters[pCurr->m_index] == pCurr);
const long next_idx = pCurr->m_index + 1;
if (next_idx < m_clusterCount) {
pResult = m_clusters[next_idx];
return 0; // success
}
const long result = LoadCluster(pos, len);
if (result < 0) // error or underflow
return result;
if (result > 0) // no more clusters
{
return 1;
}
pResult = GetLast();
return 0; // success
}
assert(m_pos > 0);
long long total, avail;
long status = m_pReader->Length(&total, &avail);
if (status < 0) // error
return status;
assert((total < 0) || (avail <= total));
const long long segment_stop = (m_size < 0) ? -1 : m_start + m_size;
pos = pCurr->m_element_start;
if (pCurr->m_element_size >= 0)
pos += pCurr->m_element_size;
else {
if ((pos + 1) > avail) {
len = 1;
return E_BUFFER_NOT_FULL;
}
long long result = GetUIntLength(m_pReader, pos, len);
if (result < 0) // error
return static_cast<long>(result);
if (result > 0) // weird
return E_BUFFER_NOT_FULL;
if ((segment_stop >= 0) && ((pos + len) > segment_stop))
return E_FILE_FORMAT_INVALID;
if ((pos + len) > avail)
return E_BUFFER_NOT_FULL;
const long long id = ReadUInt(m_pReader, pos, len);
if (id != 0x0F43B675) // weird: not Cluster ID
return -1;
pos += len; // consume ID
if ((pos + 1) > avail) {
len = 1;
return E_BUFFER_NOT_FULL;
}
result = GetUIntLength(m_pReader, pos, len);
if (result < 0) // error
return static_cast<long>(result);
if (result > 0) // weird
return E_BUFFER_NOT_FULL;
if ((segment_stop >= 0) && ((pos + len) > segment_stop))
return E_FILE_FORMAT_INVALID;
if ((pos + len) > avail)
return E_BUFFER_NOT_FULL;
const long long size = ReadUInt(m_pReader, pos, len);
if (size < 0) // error
return static_cast<long>(size);
pos += len; // consume size field
const long long unknown_size = (1LL << (7 * len)) - 1;
if (size == unknown_size) // TODO: should never happen
return E_FILE_FORMAT_INVALID; // TODO: resolve this
if ((segment_stop >= 0) && ((pos + size) > segment_stop))
return E_FILE_FORMAT_INVALID;
pos += size; // consume payload (that is, the current cluster)
assert((segment_stop < 0) || (pos <= segment_stop));
}
for (;;) {
const long status = DoParseNext(pResult, pos, len);
if (status <= 1)
return status;
}
}
Commit Message: external/libvpx/libwebm: Update snapshot
Update libwebm snapshot. This update contains security fixes from upstream.
Upstream git hash: 229f49347d19b0ca0941e072b199a242ef6c5f2b
BUG=23167726
Change-Id: Id3e140e7b31ae11294724b1ecfe2e9c83b4d4207
(cherry picked from commit d0281a15b3c6bd91756e453cc9398c5ef412d99a)
CWE ID: CWE-20 | long Segment::ParseNext(const Cluster* pCurr, const Cluster*& pResult,
long long& pos, long& len) {
assert(pCurr);
assert(!pCurr->EOS());
assert(m_clusters);
pResult = 0;
if (pCurr->m_index >= 0) { // loaded (not merely preloaded)
assert(m_clusters[pCurr->m_index] == pCurr);
const long next_idx = pCurr->m_index + 1;
if (next_idx < m_clusterCount) {
pResult = m_clusters[next_idx];
return 0; // success
}
const long result = LoadCluster(pos, len);
if (result < 0) // error or underflow
return result;
if (result > 0) // no more clusters
{
return 1;
}
pResult = GetLast();
return 0; // success
}
assert(m_pos > 0);
long long total, avail;
long status = m_pReader->Length(&total, &avail);
if (status < 0) // error
return status;
assert((total < 0) || (avail <= total));
const long long segment_stop = (m_size < 0) ? -1 : m_start + m_size;
pos = pCurr->m_element_start;
if (pCurr->m_element_size >= 0)
pos += pCurr->m_element_size;
else {
if ((pos + 1) > avail) {
len = 1;
return E_BUFFER_NOT_FULL;
}
long long result = GetUIntLength(m_pReader, pos, len);
if (result < 0) // error
return static_cast<long>(result);
if (result > 0) // weird
return E_BUFFER_NOT_FULL;
if ((segment_stop >= 0) && ((pos + len) > segment_stop))
return E_FILE_FORMAT_INVALID;
if ((pos + len) > avail)
return E_BUFFER_NOT_FULL;
const long long id = ReadUInt(m_pReader, pos, len);
if (id != 0x0F43B675) // weird: not Cluster ID
return -1;
pos += len; // consume ID
if ((pos + 1) > avail) {
len = 1;
return E_BUFFER_NOT_FULL;
}
result = GetUIntLength(m_pReader, pos, len);
if (result < 0) // error
return static_cast<long>(result);
if (result > 0) // weird
return E_BUFFER_NOT_FULL;
if ((segment_stop >= 0) && ((pos + len) > segment_stop))
return E_FILE_FORMAT_INVALID;
if ((pos + len) > avail)
return E_BUFFER_NOT_FULL;
const long long size = ReadUInt(m_pReader, pos, len);
if (size < 0) // error
return static_cast<long>(size);
pos += len; // consume size field
const long long unknown_size = (1LL << (7 * len)) - 1;
if (size == unknown_size) // TODO: should never happen
return E_FILE_FORMAT_INVALID; // TODO: resolve this
if ((segment_stop >= 0) && ((pos + size) > segment_stop))
return E_FILE_FORMAT_INVALID;
pos += size; // consume payload (that is, the current cluster)
if (segment_stop >= 0 && pos > segment_stop)
return E_FILE_FORMAT_INVALID;
}
for (;;) {
const long status = DoParseNext(pResult, pos, len);
if (status <= 1)
return status;
}
}
| 4,386 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: sf_open_fd (int fd, int mode, SF_INFO *sfinfo, int close_desc)
{ SF_PRIVATE *psf ;
if ((SF_CONTAINER (sfinfo->format)) == SF_FORMAT_SD2)
{ sf_errno = SFE_SD2_FD_DISALLOWED ;
return NULL ;
} ;
if ((psf = calloc (1, sizeof (SF_PRIVATE))) == NULL)
{ sf_errno = SFE_MALLOC_FAILED ;
return NULL ;
} ;
psf_init_files (psf) ;
copy_filename (psf, "") ;
psf->file.mode = mode ;
psf_set_file (psf, fd) ;
psf->is_pipe = psf_is_pipe (psf) ;
psf->fileoffset = psf_ftell (psf) ;
if (! close_desc)
psf->file.do_not_close_descriptor = SF_TRUE ;
return psf_open_file (psf, sfinfo) ;
} /* sf_open_fd */
Commit Message: src/ : Move to a variable length header buffer
Previously, the `psf->header` buffer was a fixed length specified by
`SF_HEADER_LEN` which was set to `12292`. This was problematic for
two reasons; this value was un-necessarily large for the majority
of files and too small for some others.
Now the size of the header buffer starts at 256 bytes and grows as
necessary up to a maximum of 100k.
CWE ID: CWE-119 | sf_open_fd (int fd, int mode, SF_INFO *sfinfo, int close_desc)
{ SF_PRIVATE *psf ;
if ((SF_CONTAINER (sfinfo->format)) == SF_FORMAT_SD2)
{ sf_errno = SFE_SD2_FD_DISALLOWED ;
return NULL ;
} ;
if ((psf = psf_allocate ()) == NULL)
{ sf_errno = SFE_MALLOC_FAILED ;
return NULL ;
} ;
psf_init_files (psf) ;
copy_filename (psf, "") ;
psf->file.mode = mode ;
psf_set_file (psf, fd) ;
psf->is_pipe = psf_is_pipe (psf) ;
psf->fileoffset = psf_ftell (psf) ;
if (! close_desc)
psf->file.do_not_close_descriptor = SF_TRUE ;
return psf_open_file (psf, sfinfo) ;
} /* sf_open_fd */
| 1,329 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: int udf_get_filename(struct super_block *sb, uint8_t *sname, uint8_t *dname,
int flen)
{
struct ustr *filename, *unifilename;
int len = 0;
filename = kmalloc(sizeof(struct ustr), GFP_NOFS);
if (!filename)
return 0;
unifilename = kmalloc(sizeof(struct ustr), GFP_NOFS);
if (!unifilename)
goto out1;
if (udf_build_ustr_exact(unifilename, sname, flen))
goto out2;
if (UDF_QUERY_FLAG(sb, UDF_FLAG_UTF8)) {
if (!udf_CS0toUTF8(filename, unifilename)) {
udf_debug("Failed in udf_get_filename: sname = %s\n",
sname);
goto out2;
}
} else if (UDF_QUERY_FLAG(sb, UDF_FLAG_NLS_MAP)) {
if (!udf_CS0toNLS(UDF_SB(sb)->s_nls_map, filename,
unifilename)) {
udf_debug("Failed in udf_get_filename: sname = %s\n",
sname);
goto out2;
}
} else
goto out2;
len = udf_translate_to_linux(dname, filename->u_name, filename->u_len,
unifilename->u_name, unifilename->u_len);
out2:
kfree(unifilename);
out1:
kfree(filename);
return len;
}
Commit Message: udf: Check path length when reading symlink
Symlink reading code does not check whether the resulting path fits into
the page provided by the generic code. This isn't as easy as just
checking the symlink size because of various encoding conversions we
perform on path. So we have to check whether there is still enough space
in the buffer on the fly.
CC: [email protected]
Reported-by: Carl Henrik Lunde <[email protected]>
Signed-off-by: Jan Kara <[email protected]>
CWE ID: CWE-17 | int udf_get_filename(struct super_block *sb, uint8_t *sname, uint8_t *dname,
int udf_get_filename(struct super_block *sb, uint8_t *sname, int slen,
uint8_t *dname, int dlen)
{
struct ustr *filename, *unifilename;
int len = 0;
filename = kmalloc(sizeof(struct ustr), GFP_NOFS);
if (!filename)
return 0;
unifilename = kmalloc(sizeof(struct ustr), GFP_NOFS);
if (!unifilename)
goto out1;
if (udf_build_ustr_exact(unifilename, sname, slen))
goto out2;
if (UDF_QUERY_FLAG(sb, UDF_FLAG_UTF8)) {
if (!udf_CS0toUTF8(filename, unifilename)) {
udf_debug("Failed in udf_get_filename: sname = %s\n",
sname);
goto out2;
}
} else if (UDF_QUERY_FLAG(sb, UDF_FLAG_NLS_MAP)) {
if (!udf_CS0toNLS(UDF_SB(sb)->s_nls_map, filename,
unifilename)) {
udf_debug("Failed in udf_get_filename: sname = %s\n",
sname);
goto out2;
}
} else
goto out2;
len = udf_translate_to_linux(dname, dlen,
filename->u_name, filename->u_len,
unifilename->u_name, unifilename->u_len);
out2:
kfree(unifilename);
out1:
kfree(filename);
return len;
}
| 8,799 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: static int init_ssl_connection(SSL *con)
{
int i;
const char *str;
X509 *peer;
long verify_error;
MS_STATIC char buf[BUFSIZ];
#ifndef OPENSSL_NO_KRB5
char *client_princ;
#endif
#if !defined(OPENSSL_NO_TLSEXT) && !defined(OPENSSL_NO_NEXTPROTONEG)
const unsigned char *next_proto_neg;
unsigned next_proto_neg_len;
#endif
unsigned char *exportedkeymat;
i = SSL_accept(con);
#ifdef CERT_CB_TEST_RETRY
{
while (i <= 0 && SSL_get_error(con, i) == SSL_ERROR_WANT_X509_LOOKUP
&& SSL_state(con) == SSL3_ST_SR_CLNT_HELLO_C) {
fprintf(stderr,
"LOOKUP from certificate callback during accept\n");
i = SSL_accept(con);
}
}
#endif
#ifndef OPENSSL_NO_SRP
while (i <= 0 && SSL_get_error(con, i) == SSL_ERROR_WANT_X509_LOOKUP) {
BIO_printf(bio_s_out, "LOOKUP during accept %s\n",
srp_callback_parm.login);
srp_callback_parm.user =
SRP_VBASE_get_by_user(srp_callback_parm.vb,
srp_callback_parm.login);
if (srp_callback_parm.user)
BIO_printf(bio_s_out, "LOOKUP done %s\n",
while (i <= 0 && SSL_get_error(con, i) == SSL_ERROR_WANT_X509_LOOKUP) {
BIO_printf(bio_s_out, "LOOKUP during accept %s\n",
srp_callback_parm.login);
srp_callback_parm.user =
SRP_VBASE_get_by_user(srp_callback_parm.vb,
srp_callback_parm.login);
if (srp_callback_parm.user)
BIO_printf(bio_s_out, "LOOKUP done %s\n",
srp_callback_parm.user->info);
return (1);
}
BIO_printf(bio_err, "ERROR\n");
verify_error = SSL_get_verify_result(con);
if (verify_error != X509_V_OK) {
BIO_printf(bio_err, "verify error:%s\n",
X509_verify_cert_error_string(verify_error));
}
/* Always print any error messages */
ERR_print_errors(bio_err);
return (0);
}
if (s_brief)
print_ssl_summary(bio_err, con);
PEM_write_bio_SSL_SESSION(bio_s_out, SSL_get_session(con));
peer = SSL_get_peer_certificate(con);
if (peer != NULL) {
BIO_printf(bio_s_out, "Client certificate\n");
PEM_write_bio_X509(bio_s_out, peer);
X509_NAME_oneline(X509_get_subject_name(peer), buf, sizeof buf);
BIO_printf(bio_s_out, "subject=%s\n", buf);
X509_NAME_oneline(X509_get_issuer_name(peer), buf, sizeof buf);
BIO_printf(bio_s_out, "issuer=%s\n", buf);
X509_free(peer);
}
if (SSL_get_shared_ciphers(con, buf, sizeof buf) != NULL)
BIO_printf(bio_s_out, "Shared ciphers:%s\n", buf);
str = SSL_CIPHER_get_name(SSL_get_current_cipher(con));
ssl_print_sigalgs(bio_s_out, con);
#ifndef OPENSSL_NO_EC
ssl_print_point_formats(bio_s_out, con);
ssl_print_curves(bio_s_out, con, 0);
#endif
BIO_printf(bio_s_out, "CIPHER is %s\n", (str != NULL) ? str : "(NONE)");
#if !defined(OPENSSL_NO_TLSEXT) && !defined(OPENSSL_NO_NEXTPROTONEG)
SSL_get0_next_proto_negotiated(con, &next_proto_neg, &next_proto_neg_len);
if (next_proto_neg) {
BIO_printf(bio_s_out, "NEXTPROTO is ");
BIO_write(bio_s_out, next_proto_neg, next_proto_neg_len);
BIO_printf(bio_s_out, "\n");
}
#endif
#ifndef OPENSSL_NO_SRTP
{
SRTP_PROTECTION_PROFILE *srtp_profile
= SSL_get_selected_srtp_profile(con);
if (srtp_profile)
BIO_printf(bio_s_out, "SRTP Extension negotiated, profile=%s\n",
srtp_profile->name);
}
#endif
if (SSL_cache_hit(con))
BIO_printf(bio_s_out, "Reused session-id\n");
if (SSL_ctrl(con, SSL_CTRL_GET_FLAGS, 0, NULL) &
TLS1_FLAGS_TLS_PADDING_BUG)
BIO_printf(bio_s_out, "Peer has incorrect TLSv1 block padding\n");
#ifndef OPENSSL_NO_KRB5
client_princ = kssl_ctx_get0_client_princ(SSL_get0_kssl_ctx(con));
if (client_princ != NULL) {
BIO_printf(bio_s_out, "Kerberos peer principal is %s\n",
client_princ);
}
#endif /* OPENSSL_NO_KRB5 */
BIO_printf(bio_s_out, "Secure Renegotiation IS%s supported\n",
SSL_get_secure_renegotiation_support(con) ? "" : " NOT");
if (keymatexportlabel != NULL) {
BIO_printf(bio_s_out, "Keying material exporter:\n");
BIO_printf(bio_s_out, " Label: '%s'\n", keymatexportlabel);
BIO_printf(bio_s_out, " Length: %i bytes\n", keymatexportlen);
exportedkeymat = OPENSSL_malloc(keymatexportlen);
if (exportedkeymat != NULL) {
if (!SSL_export_keying_material(con, exportedkeymat,
keymatexportlen,
keymatexportlabel,
strlen(keymatexportlabel),
NULL, 0, 0)) {
BIO_printf(bio_s_out, " Error\n");
} else {
BIO_printf(bio_s_out, " Keying material: ");
for (i = 0; i < keymatexportlen; i++)
BIO_printf(bio_s_out, "%02X", exportedkeymat[i]);
BIO_printf(bio_s_out, "\n");
}
OPENSSL_free(exportedkeymat);
}
}
return (1);
}
Commit Message:
CWE ID: CWE-399 | static int init_ssl_connection(SSL *con)
{
int i;
const char *str;
X509 *peer;
long verify_error;
MS_STATIC char buf[BUFSIZ];
#ifndef OPENSSL_NO_KRB5
char *client_princ;
#endif
#if !defined(OPENSSL_NO_TLSEXT) && !defined(OPENSSL_NO_NEXTPROTONEG)
const unsigned char *next_proto_neg;
unsigned next_proto_neg_len;
#endif
unsigned char *exportedkeymat;
i = SSL_accept(con);
#ifdef CERT_CB_TEST_RETRY
{
while (i <= 0 && SSL_get_error(con, i) == SSL_ERROR_WANT_X509_LOOKUP
&& SSL_state(con) == SSL3_ST_SR_CLNT_HELLO_C) {
fprintf(stderr,
"LOOKUP from certificate callback during accept\n");
i = SSL_accept(con);
}
}
#endif
#ifndef OPENSSL_NO_SRP
while (i <= 0 && SSL_get_error(con, i) == SSL_ERROR_WANT_X509_LOOKUP) {
BIO_printf(bio_s_out, "LOOKUP during accept %s\n",
srp_callback_parm.login);
srp_callback_parm.user =
SRP_VBASE_get_by_user(srp_callback_parm.vb,
srp_callback_parm.login);
if (srp_callback_parm.user)
BIO_printf(bio_s_out, "LOOKUP done %s\n",
while (i <= 0 && SSL_get_error(con, i) == SSL_ERROR_WANT_X509_LOOKUP) {
BIO_printf(bio_s_out, "LOOKUP during accept %s\n",
srp_callback_parm.login);
SRP_user_pwd_free(srp_callback_parm.user);
srp_callback_parm.user =
SRP_VBASE_get1_by_user(srp_callback_parm.vb,
srp_callback_parm.login);
if (srp_callback_parm.user)
BIO_printf(bio_s_out, "LOOKUP done %s\n",
srp_callback_parm.user->info);
return (1);
}
BIO_printf(bio_err, "ERROR\n");
verify_error = SSL_get_verify_result(con);
if (verify_error != X509_V_OK) {
BIO_printf(bio_err, "verify error:%s\n",
X509_verify_cert_error_string(verify_error));
}
/* Always print any error messages */
ERR_print_errors(bio_err);
return (0);
}
if (s_brief)
print_ssl_summary(bio_err, con);
PEM_write_bio_SSL_SESSION(bio_s_out, SSL_get_session(con));
peer = SSL_get_peer_certificate(con);
if (peer != NULL) {
BIO_printf(bio_s_out, "Client certificate\n");
PEM_write_bio_X509(bio_s_out, peer);
X509_NAME_oneline(X509_get_subject_name(peer), buf, sizeof buf);
BIO_printf(bio_s_out, "subject=%s\n", buf);
X509_NAME_oneline(X509_get_issuer_name(peer), buf, sizeof buf);
BIO_printf(bio_s_out, "issuer=%s\n", buf);
X509_free(peer);
}
if (SSL_get_shared_ciphers(con, buf, sizeof buf) != NULL)
BIO_printf(bio_s_out, "Shared ciphers:%s\n", buf);
str = SSL_CIPHER_get_name(SSL_get_current_cipher(con));
ssl_print_sigalgs(bio_s_out, con);
#ifndef OPENSSL_NO_EC
ssl_print_point_formats(bio_s_out, con);
ssl_print_curves(bio_s_out, con, 0);
#endif
BIO_printf(bio_s_out, "CIPHER is %s\n", (str != NULL) ? str : "(NONE)");
#if !defined(OPENSSL_NO_TLSEXT) && !defined(OPENSSL_NO_NEXTPROTONEG)
SSL_get0_next_proto_negotiated(con, &next_proto_neg, &next_proto_neg_len);
if (next_proto_neg) {
BIO_printf(bio_s_out, "NEXTPROTO is ");
BIO_write(bio_s_out, next_proto_neg, next_proto_neg_len);
BIO_printf(bio_s_out, "\n");
}
#endif
#ifndef OPENSSL_NO_SRTP
{
SRTP_PROTECTION_PROFILE *srtp_profile
= SSL_get_selected_srtp_profile(con);
if (srtp_profile)
BIO_printf(bio_s_out, "SRTP Extension negotiated, profile=%s\n",
srtp_profile->name);
}
#endif
if (SSL_cache_hit(con))
BIO_printf(bio_s_out, "Reused session-id\n");
if (SSL_ctrl(con, SSL_CTRL_GET_FLAGS, 0, NULL) &
TLS1_FLAGS_TLS_PADDING_BUG)
BIO_printf(bio_s_out, "Peer has incorrect TLSv1 block padding\n");
#ifndef OPENSSL_NO_KRB5
client_princ = kssl_ctx_get0_client_princ(SSL_get0_kssl_ctx(con));
if (client_princ != NULL) {
BIO_printf(bio_s_out, "Kerberos peer principal is %s\n",
client_princ);
}
#endif /* OPENSSL_NO_KRB5 */
BIO_printf(bio_s_out, "Secure Renegotiation IS%s supported\n",
SSL_get_secure_renegotiation_support(con) ? "" : " NOT");
if (keymatexportlabel != NULL) {
BIO_printf(bio_s_out, "Keying material exporter:\n");
BIO_printf(bio_s_out, " Label: '%s'\n", keymatexportlabel);
BIO_printf(bio_s_out, " Length: %i bytes\n", keymatexportlen);
exportedkeymat = OPENSSL_malloc(keymatexportlen);
if (exportedkeymat != NULL) {
if (!SSL_export_keying_material(con, exportedkeymat,
keymatexportlen,
keymatexportlabel,
strlen(keymatexportlabel),
NULL, 0, 0)) {
BIO_printf(bio_s_out, " Error\n");
} else {
BIO_printf(bio_s_out, " Keying material: ");
for (i = 0; i < keymatexportlen; i++)
BIO_printf(bio_s_out, "%02X", exportedkeymat[i]);
BIO_printf(bio_s_out, "\n");
}
OPENSSL_free(exportedkeymat);
}
}
return (1);
}
| 2,477 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: secret_core_crt (gcry_mpi_t M, gcry_mpi_t C,
gcry_mpi_t D, unsigned int Nlimbs,
gcry_mpi_t P, gcry_mpi_t Q, gcry_mpi_t U)
{
gcry_mpi_t m1 = mpi_alloc_secure ( Nlimbs + 1 );
gcry_mpi_t m2 = mpi_alloc_secure ( Nlimbs + 1 );
gcry_mpi_t h = mpi_alloc_secure ( Nlimbs + 1 );
/* m1 = c ^ (d mod (p-1)) mod p */
mpi_sub_ui ( h, P, 1 );
mpi_fdiv_r ( h, D, h );
mpi_powm ( m1, C, h, P );
/* m2 = c ^ (d mod (q-1)) mod q */
mpi_sub_ui ( h, Q, 1 );
mpi_fdiv_r ( h, D, h );
mpi_powm ( m2, C, h, Q );
/* h = u * ( m2 - m1 ) mod q */
mpi_sub ( h, m2, m1 );
/* Remove superfluous leading zeroes from INPUT. */
mpi_normalize (input);
if (!skey->p || !skey->q || !skey->u)
{
secret_core_std (output, input, skey->d, skey->n);
}
else
{
secret_core_crt (output, input, skey->d, mpi_get_nlimbs (skey->n),
skey->p, skey->q, skey->u);
}
}
Commit Message:
CWE ID: CWE-310 | secret_core_crt (gcry_mpi_t M, gcry_mpi_t C,
gcry_mpi_t D, unsigned int Nlimbs,
gcry_mpi_t P, gcry_mpi_t Q, gcry_mpi_t U)
{
gcry_mpi_t m1 = mpi_alloc_secure ( Nlimbs + 1 );
gcry_mpi_t m2 = mpi_alloc_secure ( Nlimbs + 1 );
gcry_mpi_t h = mpi_alloc_secure ( Nlimbs + 1 );
gcry_mpi_t D_blind = mpi_alloc_secure ( Nlimbs + 1 );
gcry_mpi_t r;
unsigned int r_nbits;
r_nbits = mpi_get_nbits (P) / 4;
if (r_nbits < 96)
r_nbits = 96;
r = mpi_alloc_secure ( (r_nbits + BITS_PER_MPI_LIMB-1)/BITS_PER_MPI_LIMB );
/* d_blind = (d mod (p-1)) + (p-1) * r */
/* m1 = c ^ d_blind mod p */
_gcry_mpi_randomize (r, r_nbits, GCRY_WEAK_RANDOM);
mpi_set_highbit (r, r_nbits - 1);
mpi_sub_ui ( h, P, 1 );
mpi_mul ( D_blind, h, r );
mpi_fdiv_r ( h, D, h );
mpi_add ( D_blind, D_blind, h );
mpi_powm ( m1, C, D_blind, P );
/* d_blind = (d mod (q-1)) + (q-1) * r */
/* m2 = c ^ d_blind mod q */
_gcry_mpi_randomize (r, r_nbits, GCRY_WEAK_RANDOM);
mpi_set_highbit (r, r_nbits - 1);
mpi_sub_ui ( h, Q, 1 );
mpi_mul ( D_blind, h, r );
mpi_fdiv_r ( h, D, h );
mpi_add ( D_blind, D_blind, h );
mpi_powm ( m2, C, D_blind, Q );
mpi_free ( r );
mpi_free ( D_blind );
/* h = u * ( m2 - m1 ) mod q */
mpi_sub ( h, m2, m1 );
/* Remove superfluous leading zeroes from INPUT. */
mpi_normalize (input);
if (!skey->p || !skey->q || !skey->u)
{
secret_core_std (output, input, skey->d, skey->n);
}
else
{
secret_core_crt (output, input, skey->d, mpi_get_nlimbs (skey->n),
skey->p, skey->q, skey->u);
}
}
| 16,364 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: void RenderFrameObserverNatives::OnDocumentElementCreated(
const v8::FunctionCallbackInfo<v8::Value>& args) {
CHECK(args.Length() == 2);
CHECK(args[0]->IsInt32());
CHECK(args[1]->IsFunction());
int frame_id = args[0]->Int32Value();
content::RenderFrame* frame = content::RenderFrame::FromRoutingID(frame_id);
if (!frame) {
LOG(WARNING) << "No render frame found to register LoadWatcher.";
return;
}
new LoadWatcher(context(), frame, args[1].As<v8::Function>());
args.GetReturnValue().Set(true);
}
Commit Message: Fix re-entrancy and lifetime issue in RenderFrameObserverNatives::OnDocumentElementCreated
BUG=585268,568130
Review URL: https://codereview.chromium.org/1684953002
Cr-Commit-Position: refs/heads/master@{#374758}
CWE ID: | void RenderFrameObserverNatives::OnDocumentElementCreated(
const v8::FunctionCallbackInfo<v8::Value>& args) {
CHECK(args.Length() == 2);
CHECK(args[0]->IsInt32());
CHECK(args[1]->IsFunction());
int frame_id = args[0]->Int32Value();
content::RenderFrame* frame = content::RenderFrame::FromRoutingID(frame_id);
if (!frame) {
LOG(WARNING) << "No render frame found to register LoadWatcher.";
return;
}
v8::Global<v8::Function> v8_callback(context()->isolate(),
args[1].As<v8::Function>());
base::Callback<void(bool)> callback(
base::Bind(&RenderFrameObserverNatives::InvokeCallback,
weak_ptr_factory_.GetWeakPtr(), base::Passed(&v8_callback)));
if (ExtensionFrameHelper::Get(frame)->did_create_current_document_element()) {
// If the document element is already created, then we can call the callback
// immediately (though use PostTask to ensure that the callback is called
// asynchronously).
base::MessageLoop::current()->PostTask(FROM_HERE,
base::Bind(callback, true));
} else {
new LoadWatcher(frame, callback);
}
args.GetReturnValue().Set(true);
}
| 11,386 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: void RenderProcessHostImpl::OnCompositorSurfaceBuffersSwappedNoHost(
int32 surface_id,
uint64 surface_handle,
int32 route_id,
const gfx::Size& size,
int32 gpu_process_host_id) {
TRACE_EVENT0("renderer_host",
"RenderWidgetHostImpl::OnCompositorSurfaceBuffersSwappedNoHost");
RenderWidgetHostImpl::AcknowledgeBufferPresent(route_id,
gpu_process_host_id,
false,
0);
}
Commit Message: Implement TextureImageTransportSurface using texture mailbox
This has a couple of advantages:
- allow tearing down and recreating the UI parent context without
losing the renderer contexts
- do not require a context to be able to generate textures when
creating the GLSurfaceHandle
- clearer ownership semantics that potentially allows for more
robust and easier lost context handling/thumbnailing/etc., since a texture is at
any given time owned by either: UI parent, mailbox, or
TextureImageTransportSurface
- simplify frontbuffer protection logic;
the frontbuffer textures are now owned by RWHV where they are refcounted
The TextureImageTransportSurface informs RenderWidgetHostView of the
mailbox names for the front- and backbuffer textures by
associating them with a surface_handle (1 or 2) in the AcceleratedSurfaceNew message.
During SwapBuffers() or PostSubBuffer() cycles, it then uses
produceTextureCHROMIUM() and consumeTextureCHROMIUM()
to transfer ownership between renderer and browser compositor.
RWHV sends back the surface_handle of the buffer being returned with the Swap ACK
(or 0 if no buffer is being returned in which case TextureImageTransportSurface will
allocate a new texture - note that this could be used to
simply keep textures for thumbnailing).
BUG=154815,139616
[email protected]
Review URL: https://chromiumcodereview.appspot.com/11194042
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@171569 0039d316-1c4b-4281-b951-d872f2087c98
CWE ID: | void RenderProcessHostImpl::OnCompositorSurfaceBuffersSwappedNoHost(
int32 surface_id,
uint64 surface_handle,
int32 route_id,
const gfx::Size& size,
int32 gpu_process_host_id) {
TRACE_EVENT0("renderer_host",
"RenderWidgetHostImpl::OnCompositorSurfaceBuffersSwappedNoHost");
RenderWidgetHostImpl::AcknowledgeBufferPresent(route_id,
gpu_process_host_id,
surface_handle,
0);
}
| 12,752 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: long long Cluster::GetPosition() const
{
const long long pos = m_element_start - m_pSegment->m_start;
assert(pos >= 0);
return pos;
}
Commit Message: libwebm: Pull from upstream
Rolling mkvparser from upstream. Primarily for fixing a bug on parsing
failures with certain Opus WebM files.
Upstream commit hash of this pull: 574045edd4ecbeb802ee3f1d214b5510269852ae
The diff is so huge because there were some style clean ups upstream.
But it was ensured that there were no breaking changes when the style
clean ups was done upstream.
Change-Id: Ib6e907175484b4b0ae1b55ab39522ea3188ad039
CWE ID: CWE-119 | long long Cluster::GetPosition() const
long Cluster::GetIndex() const { return m_index; }
long long Cluster::GetPosition() const {
const long long pos = m_element_start - m_pSegment->m_start;
assert(pos >= 0);
return pos;
}
| 4,839 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: bool ID3::removeUnsynchronizationV2_4(bool iTunesHack) {
size_t oldSize = mSize;
size_t offset = 0;
while (mSize >= 10 && offset <= mSize - 10) {
if (!memcmp(&mData[offset], "\0\0\0\0", 4)) {
break;
}
size_t dataSize;
if (iTunesHack) {
dataSize = U32_AT(&mData[offset + 4]);
} else if (!ParseSyncsafeInteger(&mData[offset + 4], &dataSize)) {
return false;
}
if (dataSize > mSize - 10 - offset) {
return false;
}
uint16_t flags = U16_AT(&mData[offset + 8]);
uint16_t prevFlags = flags;
if (flags & 1) {
if (mSize < 14 || mSize - 14 < offset || dataSize < 4) {
return false;
}
memmove(&mData[offset + 10], &mData[offset + 14], mSize - offset - 14);
mSize -= 4;
dataSize -= 4;
flags &= ~1;
}
if (flags & 2) {
size_t readOffset = offset + 11;
size_t writeOffset = offset + 11;
for (size_t i = 0; i + 1 < dataSize; ++i) {
if (mData[readOffset - 1] == 0xff
&& mData[readOffset] == 0x00) {
++readOffset;
--mSize;
--dataSize;
}
mData[writeOffset++] = mData[readOffset++];
}
memmove(&mData[writeOffset], &mData[readOffset], oldSize - readOffset);
flags &= ~2;
}
if (flags != prevFlags || iTunesHack) {
WriteSyncsafeInteger(&mData[offset + 4], dataSize);
mData[offset + 8] = flags >> 8;
mData[offset + 9] = flags & 0xff;
}
offset += 10 + dataSize;
}
memset(&mData[mSize], 0, oldSize - mSize);
return true;
}
Commit Message: Fix out of bounds access
Bug: 34618607
Change-Id: I84f0ef948414d0b2d54e8948b6c30b8ae4da2b36
(cherry picked from commit d1c19c57f66d91ea8033c8fa6510a8760a6e663b)
CWE ID: CWE-119 | bool ID3::removeUnsynchronizationV2_4(bool iTunesHack) {
size_t oldSize = mSize;
size_t offset = 0;
while (mSize >= 10 && offset <= mSize - 10) {
if (!memcmp(&mData[offset], "\0\0\0\0", 4)) {
break;
}
size_t dataSize;
if (iTunesHack) {
dataSize = U32_AT(&mData[offset + 4]);
} else if (!ParseSyncsafeInteger(&mData[offset + 4], &dataSize)) {
return false;
}
if (dataSize > mSize - 10 - offset) {
return false;
}
uint16_t flags = U16_AT(&mData[offset + 8]);
uint16_t prevFlags = flags;
if (flags & 1) {
if (mSize < 14 || mSize - 14 < offset || dataSize < 4) {
return false;
}
memmove(&mData[offset + 10], &mData[offset + 14], mSize - offset - 14);
mSize -= 4;
dataSize -= 4;
flags &= ~1;
}
if ((flags & 2) && (dataSize >= 2)) {
size_t readOffset = offset + 11;
size_t writeOffset = offset + 11;
for (size_t i = 0; i + 1 < dataSize; ++i) {
if (mData[readOffset - 1] == 0xff
&& mData[readOffset] == 0x00) {
++readOffset;
--mSize;
--dataSize;
}
mData[writeOffset++] = mData[readOffset++];
}
if (readOffset <= oldSize) {
memmove(&mData[writeOffset], &mData[readOffset], oldSize - readOffset);
} else {
ALOGE("b/34618607 (%zu %zu %zu %zu)", readOffset, writeOffset, oldSize, mSize);
android_errorWriteLog(0x534e4554, "34618607");
}
}
flags &= ~2;
if (flags != prevFlags || iTunesHack) {
WriteSyncsafeInteger(&mData[offset + 4], dataSize);
mData[offset + 8] = flags >> 8;
mData[offset + 9] = flags & 0xff;
}
offset += 10 + dataSize;
}
memset(&mData[mSize], 0, oldSize - mSize);
return true;
}
| 8,954 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: __imlib_MergeUpdate(ImlibUpdate * u, int w, int h, int hgapmax)
{
ImlibUpdate *nu = NULL, *uu;
struct _tile *t;
int tw, th, x, y, i;
int *gaps = NULL;
/* if theres no rects to process.. return NULL */
if (!u)
return NULL;
tw = w >> TB;
if (w & TM)
tw++;
th = h >> TB;
if (h & TM)
th++;
t = malloc(tw * th * sizeof(struct _tile));
/* fill in tiles to be all not used */
for (i = 0, y = 0; y < th; y++)
{
for (x = 0; x < tw; x++)
t[i++].used = T_UNUSED;
}
/* fill in all tiles */
for (uu = u; uu; uu = uu->next)
{
CLIP(uu->x, uu->y, uu->w, uu->h, 0, 0, w, h);
for (y = uu->y >> TB; y <= ((uu->y + uu->h - 1) >> TB); y++)
{
for (x = uu->x >> TB; x <= ((uu->x + uu->w - 1) >> TB); x++)
T(x, y).used = T_USED;
}
}
/* scan each line - if > hgapmax gaps between tiles, then fill smallest */
gaps = malloc(tw * sizeof(int));
for (y = 0; y < th; y++)
{
int hgaps = 0, start = -1, min;
char have = 1, gap = 0;
for (x = 0; x < tw; x++)
gaps[x] = 0;
for (x = 0; x < tw; x++)
{
if ((have) && (T(x, y).used == T_UNUSED))
{
start = x;
gap = 1;
have = 0;
}
else if ((!have) && (gap) && (T(x, y).used & T_USED))
{
gap = 0;
hgaps++;
have = 1;
gaps[start] = x - start;
}
else if (T(x, y).used & T_USED)
have = 1;
}
while (hgaps > hgapmax)
{
start = -1;
min = tw;
for (x = 0; x < tw; x++)
{
if ((gaps[x] > 0) && (gaps[x] < min))
{
start = x;
min = gaps[x];
}
}
if (start >= 0)
{
gaps[start] = 0;
for (x = start;
T(x, y).used == T_UNUSED; T(x++, y).used = T_USED);
hgaps--;
}
}
}
free(gaps);
/* coalesce tiles into larger blocks and make new rect list */
for (y = 0; y < th; y++)
{
for (x = 0; x < tw; x++)
{
if (T(x, y).used & T_USED)
{
int xx, yy, ww, hh, ok, xww;
for (xx = x + 1, ww = 1;
(T(xx, y).used & T_USED) && (xx < tw); xx++, ww++);
xww = x + ww;
for (yy = y + 1, hh = 1, ok = 1;
(yy < th) && (ok); yy++, hh++)
{
for (xx = x; xx < xww; xx++)
{
if (!(T(xx, yy).used & T_USED))
{
ok = 0;
hh--;
break;
}
}
}
for (yy = y; yy < (y + hh); yy++)
{
for (xx = x; xx < xww; xx++)
T(xx, yy).used = T_UNUSED;
}
nu = __imlib_AddUpdate(nu, (x << TB), (y << TB),
(ww << TB), (hh << TB));
}
}
}
free(t);
__imlib_FreeUpdates(u);
return nu;
}
Commit Message:
CWE ID: CWE-119 | __imlib_MergeUpdate(ImlibUpdate * u, int w, int h, int hgapmax)
{
ImlibUpdate *nu = NULL, *uu;
struct _tile *t;
int tw, th, x, y, i;
int *gaps = NULL;
/* if theres no rects to process.. return NULL */
if (!u)
return NULL;
tw = w >> TB;
if (w & TM)
tw++;
th = h >> TB;
if (h & TM)
th++;
t = malloc(tw * th * sizeof(struct _tile));
/* fill in tiles to be all not used */
for (i = 0, y = 0; y < th; y++)
{
for (x = 0; x < tw; x++)
t[i++].used = T_UNUSED;
}
/* fill in all tiles */
for (uu = u; uu; uu = uu->next)
{
CLIP(uu->x, uu->y, uu->w, uu->h, 0, 0, w, h);
for (y = uu->y >> TB; y <= ((uu->y + uu->h - 1) >> TB); y++)
{
for (x = uu->x >> TB; x <= ((uu->x + uu->w - 1) >> TB); x++)
T(x, y).used = T_USED;
}
}
/* scan each line - if > hgapmax gaps between tiles, then fill smallest */
gaps = malloc(tw * sizeof(int));
for (y = 0; y < th; y++)
{
int hgaps = 0, start = -1, min;
char have = 1, gap = 0;
for (x = 0; x < tw; x++)
gaps[x] = 0;
for (x = 0; x < tw; x++)
{
if ((have) && (T(x, y).used == T_UNUSED))
{
start = x;
gap = 1;
have = 0;
}
else if ((!have) && (gap) && (T(x, y).used & T_USED))
{
gap = 0;
hgaps++;
have = 1;
gaps[start] = x - start;
}
else if (T(x, y).used & T_USED)
have = 1;
}
while (hgaps > hgapmax)
{
start = -1;
min = tw;
for (x = 0; x < tw; x++)
{
if ((gaps[x] > 0) && (gaps[x] < min))
{
start = x;
min = gaps[x];
}
}
if (start >= 0)
{
gaps[start] = 0;
for (x = start;
T(x, y).used == T_UNUSED; T(x++, y).used = T_USED);
hgaps--;
}
}
}
free(gaps);
/* coalesce tiles into larger blocks and make new rect list */
for (y = 0; y < th; y++)
{
for (x = 0; x < tw; x++)
{
if (T(x, y).used & T_USED)
{
int xx, yy, ww, hh, ok, xww;
for (xx = x + 1, ww = 1;
(xx < tw) && (T(xx, y).used & T_USED); xx++, ww++);
xww = x + ww;
for (yy = y + 1, hh = 1, ok = 1;
(yy < th) && (ok); yy++, hh++)
{
for (xx = x; xx < xww; xx++)
{
if (!(T(xx, yy).used & T_USED))
{
ok = 0;
hh--;
break;
}
}
}
for (yy = y; yy < (y + hh); yy++)
{
for (xx = x; xx < xww; xx++)
T(xx, yy).used = T_UNUSED;
}
nu = __imlib_AddUpdate(nu, (x << TB), (y << TB),
(ww << TB), (hh << TB));
}
}
}
free(t);
__imlib_FreeUpdates(u);
return nu;
}
| 6,335 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: void BrowserViewRenderer::SetTotalRootLayerScrollOffset(
gfx::Vector2dF scroll_offset_dip) {
if (scroll_offset_dip_ == scroll_offset_dip)
return;
scroll_offset_dip_ = scroll_offset_dip;
gfx::Vector2d max_offset = max_scroll_offset();
gfx::Vector2d scroll_offset;
if (max_scroll_offset_dip_.x()) {
scroll_offset.set_x((scroll_offset_dip.x() * max_offset.x()) /
max_scroll_offset_dip_.x());
}
if (max_scroll_offset_dip_.y()) {
scroll_offset.set_y((scroll_offset_dip.y() * max_offset.y()) /
max_scroll_offset_dip_.y());
}
DCHECK_LE(0, scroll_offset.x());
DCHECK_LE(0, scroll_offset.y());
DCHECK_LE(scroll_offset.x(), max_offset.x());
DCHECK_LE(scroll_offset.y(), max_offset.y());
client_->ScrollContainerViewTo(scroll_offset);
}
Commit Message: sync compositor: pass simple gfx types by const ref
See bug for reasoning
BUG=159273
Review URL: https://codereview.chromium.org/1417893006
Cr-Commit-Position: refs/heads/master@{#356653}
CWE ID: CWE-399 | void BrowserViewRenderer::SetTotalRootLayerScrollOffset(
const gfx::Vector2dF& scroll_offset_dip) {
if (scroll_offset_dip_ == scroll_offset_dip)
return;
scroll_offset_dip_ = scroll_offset_dip;
gfx::Vector2d max_offset = max_scroll_offset();
gfx::Vector2d scroll_offset;
if (max_scroll_offset_dip_.x()) {
scroll_offset.set_x((scroll_offset_dip.x() * max_offset.x()) /
max_scroll_offset_dip_.x());
}
if (max_scroll_offset_dip_.y()) {
scroll_offset.set_y((scroll_offset_dip.y() * max_offset.y()) /
max_scroll_offset_dip_.y());
}
DCHECK_LE(0, scroll_offset.x());
DCHECK_LE(0, scroll_offset.y());
DCHECK_LE(scroll_offset.x(), max_offset.x());
DCHECK_LE(scroll_offset.y(), max_offset.y());
client_->ScrollContainerViewTo(scroll_offset);
}
| 15,376 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: BuildTestPacket(uint16_t id, uint16_t off, int mf, const char content,
int content_len)
{
Packet *p = NULL;
int hlen = 20;
int ttl = 64;
uint8_t *pcontent;
IPV4Hdr ip4h;
p = SCCalloc(1, sizeof(*p) + default_packet_size);
if (unlikely(p == NULL))
return NULL;
PACKET_INITIALIZE(p);
gettimeofday(&p->ts, NULL);
ip4h.ip_verhl = 4 << 4;
ip4h.ip_verhl |= hlen >> 2;
ip4h.ip_len = htons(hlen + content_len);
ip4h.ip_id = htons(id);
ip4h.ip_off = htons(off);
if (mf)
ip4h.ip_off = htons(IP_MF | off);
else
ip4h.ip_off = htons(off);
ip4h.ip_ttl = ttl;
ip4h.ip_proto = IPPROTO_ICMP;
ip4h.s_ip_src.s_addr = 0x01010101; /* 1.1.1.1 */
ip4h.s_ip_dst.s_addr = 0x02020202; /* 2.2.2.2 */
/* copy content_len crap, we need full length */
PacketCopyData(p, (uint8_t *)&ip4h, sizeof(ip4h));
p->ip4h = (IPV4Hdr *)GET_PKT_DATA(p);
SET_IPV4_SRC_ADDR(p, &p->src);
SET_IPV4_DST_ADDR(p, &p->dst);
pcontent = SCCalloc(1, content_len);
if (unlikely(pcontent == NULL))
return NULL;
memset(pcontent, content, content_len);
PacketCopyDataOffset(p, hlen, pcontent, content_len);
SET_PKT_LEN(p, hlen + content_len);
SCFree(pcontent);
p->ip4h->ip_csum = IPV4CalculateChecksum((uint16_t *)GET_PKT_DATA(p), hlen);
/* Self test. */
if (IPV4_GET_VER(p) != 4)
goto error;
if (IPV4_GET_HLEN(p) != hlen)
goto error;
if (IPV4_GET_IPLEN(p) != hlen + content_len)
goto error;
if (IPV4_GET_IPID(p) != id)
goto error;
if (IPV4_GET_IPOFFSET(p) != off)
goto error;
if (IPV4_GET_MF(p) != mf)
goto error;
if (IPV4_GET_IPTTL(p) != ttl)
goto error;
if (IPV4_GET_IPPROTO(p) != IPPROTO_ICMP)
goto error;
return p;
error:
if (p != NULL)
SCFree(p);
return NULL;
}
Commit Message: defrag - take protocol into account during re-assembly
The IP protocol was not being used to match fragments with
their packets allowing a carefully constructed packet
with a different protocol to be matched, allowing re-assembly
to complete, creating a packet that would not be re-assembled
by the destination host.
CWE ID: CWE-358 | BuildTestPacket(uint16_t id, uint16_t off, int mf, const char content,
BuildTestPacket(uint8_t proto, uint16_t id, uint16_t off, int mf,
const char content, int content_len)
{
Packet *p = NULL;
int hlen = 20;
int ttl = 64;
uint8_t *pcontent;
IPV4Hdr ip4h;
p = SCCalloc(1, sizeof(*p) + default_packet_size);
if (unlikely(p == NULL))
return NULL;
PACKET_INITIALIZE(p);
gettimeofday(&p->ts, NULL);
ip4h.ip_verhl = 4 << 4;
ip4h.ip_verhl |= hlen >> 2;
ip4h.ip_len = htons(hlen + content_len);
ip4h.ip_id = htons(id);
ip4h.ip_off = htons(off);
if (mf)
ip4h.ip_off = htons(IP_MF | off);
else
ip4h.ip_off = htons(off);
ip4h.ip_ttl = ttl;
ip4h.ip_proto = proto;
ip4h.s_ip_src.s_addr = 0x01010101; /* 1.1.1.1 */
ip4h.s_ip_dst.s_addr = 0x02020202; /* 2.2.2.2 */
/* copy content_len crap, we need full length */
PacketCopyData(p, (uint8_t *)&ip4h, sizeof(ip4h));
p->ip4h = (IPV4Hdr *)GET_PKT_DATA(p);
SET_IPV4_SRC_ADDR(p, &p->src);
SET_IPV4_DST_ADDR(p, &p->dst);
pcontent = SCCalloc(1, content_len);
if (unlikely(pcontent == NULL))
return NULL;
memset(pcontent, content, content_len);
PacketCopyDataOffset(p, hlen, pcontent, content_len);
SET_PKT_LEN(p, hlen + content_len);
SCFree(pcontent);
p->ip4h->ip_csum = IPV4CalculateChecksum((uint16_t *)GET_PKT_DATA(p), hlen);
/* Self test. */
if (IPV4_GET_VER(p) != 4)
goto error;
if (IPV4_GET_HLEN(p) != hlen)
goto error;
if (IPV4_GET_IPLEN(p) != hlen + content_len)
goto error;
if (IPV4_GET_IPID(p) != id)
goto error;
if (IPV4_GET_IPOFFSET(p) != off)
goto error;
if (IPV4_GET_MF(p) != mf)
goto error;
if (IPV4_GET_IPTTL(p) != ttl)
goto error;
if (IPV4_GET_IPPROTO(p) != proto)
goto error;
return p;
error:
if (p != NULL)
SCFree(p);
return NULL;
}
| 18,330 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: void SkiaOutputSurfaceImpl::Reshape(const gfx::Size& size,
float device_scale_factor,
const gfx::ColorSpace& color_space,
bool has_alpha,
bool use_stencil) {
DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
if (initialize_waitable_event_) {
initialize_waitable_event_->Wait();
initialize_waitable_event_ = nullptr;
}
SkSurfaceCharacterization* characterization = nullptr;
if (characterization_.isValid()) {
characterization_ =
characterization_.createResized(size.width(), size.height());
RecreateRootRecorder();
} else {
characterization = &characterization_;
initialize_waitable_event_ = std::make_unique<base::WaitableEvent>(
base::WaitableEvent::ResetPolicy::MANUAL,
base::WaitableEvent::InitialState::NOT_SIGNALED);
}
auto callback = base::BindOnce(
&SkiaOutputSurfaceImplOnGpu::Reshape,
base::Unretained(impl_on_gpu_.get()), size, device_scale_factor,
std::move(color_space), has_alpha, use_stencil, pre_transform_,
characterization, initialize_waitable_event_.get());
ScheduleGpuTask(std::move(callback), std::vector<gpu::SyncToken>());
}
Commit Message: SkiaRenderer: Support changing color space
SkiaOutputSurfaceImpl did not handle the color space changing after it
was created previously. The SkSurfaceCharacterization color space was
only set during the first time Reshape() ran when the charactization is
returned from the GPU thread. If the color space was changed later the
SkSurface and SkDDL color spaces no longer matched and draw failed.
Bug: 1009452
Change-Id: Ib6d2083efc7e7eb6f94782342e92a809b69d6fdc
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/1841811
Reviewed-by: Peng Huang <[email protected]>
Commit-Queue: kylechar <[email protected]>
Cr-Commit-Position: refs/heads/master@{#702946}
CWE ID: CWE-704 | void SkiaOutputSurfaceImpl::Reshape(const gfx::Size& size,
float device_scale_factor,
const gfx::ColorSpace& color_space,
bool has_alpha,
bool use_stencil) {
DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
if (initialize_waitable_event_) {
initialize_waitable_event_->Wait();
initialize_waitable_event_.reset();
}
SkSurfaceCharacterization* characterization = nullptr;
if (characterization_.isValid()) {
sk_sp<SkColorSpace> sk_color_space = color_space.ToSkColorSpace();
if (!SkColorSpace::Equals(characterization_.refColorSpace().get(),
sk_color_space.get())) {
characterization_ = characterization_.createColorSpace(sk_color_space);
}
if (size.width() != characterization_.width() ||
size.height() != characterization_.height()) {
characterization_ =
characterization_.createResized(size.width(), size.height());
}
// TODO(kylechar): Update |characterization_| if |use_alpha| changes.
RecreateRootRecorder();
} else {
characterization = &characterization_;
initialize_waitable_event_ = std::make_unique<base::WaitableEvent>(
base::WaitableEvent::ResetPolicy::MANUAL,
base::WaitableEvent::InitialState::NOT_SIGNALED);
}
auto task = base::BindOnce(&SkiaOutputSurfaceImplOnGpu::Reshape,
base::Unretained(impl_on_gpu_.get()), size,
device_scale_factor, color_space, has_alpha,
use_stencil, pre_transform_, characterization,
initialize_waitable_event_.get());
ScheduleGpuTask(std::move(task), {});
}
| 29,547 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: int tls1_process_ticket(SSL *s, unsigned char *session_id, int len,
const unsigned char *limit, SSL_SESSION **ret)
{
/* Point after session ID in client hello */
const unsigned char *p = session_id + len;
unsigned short i;
*ret = NULL;
s->tlsext_ticket_expected = 0;
/*
* If tickets disabled behave as if no ticket present to permit stateful
* resumption.
*/
if (SSL_get_options(s) & SSL_OP_NO_TICKET)
return 0;
if ((s->version <= SSL3_VERSION) || !limit)
return 0;
if (p >= limit)
return -1;
/* Skip past DTLS cookie */
if (SSL_IS_DTLS(s)) {
i = *(p++);
p += i;
if (p >= limit)
return -1;
}
/* Skip past cipher list */
n2s(p, i);
p += i;
if (p >= limit)
return -1;
/* Skip past compression algorithm list */
i = *(p++);
p += i;
if (p > limit)
return -1;
/* Now at start of extensions */
if ((p + 2) >= limit)
return 0;
n2s(p, i);
while ((p + 4) <= limit) {
unsigned short type, size;
n2s(p, type);
n2s(p, size);
if (p + size > limit)
return 0;
if (type == TLSEXT_TYPE_session_ticket) {
int r;
*/
s->tlsext_ticket_expected = 1;
return 1;
}
if (s->tls_session_secret_cb) {
/*
* Indicate that the ticket couldn't be decrypted rather than
* generating the session from ticket now, trigger
* abbreviated handshake based on external mechanism to
* calculate the master secret later.
*/
return 2;
}
r = tls_decrypt_ticket(s, p, size, session_id, len, ret);
switch (r) {
case 2: /* ticket couldn't be decrypted */
s->tlsext_ticket_expected = 1;
return 2;
case 3: /* ticket was decrypted */
return r;
case 4: /* ticket decrypted but need to renew */
s->tlsext_ticket_expected = 1;
return 3;
default: /* fatal error */
return -1;
}
}
p += size;
}
Commit Message:
CWE ID: CWE-190 | int tls1_process_ticket(SSL *s, unsigned char *session_id, int len,
const unsigned char *limit, SSL_SESSION **ret)
{
/* Point after session ID in client hello */
const unsigned char *p = session_id + len;
unsigned short i;
*ret = NULL;
s->tlsext_ticket_expected = 0;
/*
* If tickets disabled behave as if no ticket present to permit stateful
* resumption.
*/
if (SSL_get_options(s) & SSL_OP_NO_TICKET)
return 0;
if ((s->version <= SSL3_VERSION) || !limit)
return 0;
if (p >= limit)
return -1;
/* Skip past DTLS cookie */
if (SSL_IS_DTLS(s)) {
i = *(p++);
if (limit - p <= i)
return -1;
p += i;
}
/* Skip past cipher list */
n2s(p, i);
if (limit - p <= i)
return -1;
p += i;
/* Skip past compression algorithm list */
i = *(p++);
if (limit - p < i)
return -1;
p += i;
/* Now at start of extensions */
if (limit - p <= 2)
return 0;
n2s(p, i);
while (limit - p >= 4) {
unsigned short type, size;
n2s(p, type);
n2s(p, size);
if (limit - p < size)
return 0;
if (type == TLSEXT_TYPE_session_ticket) {
int r;
*/
s->tlsext_ticket_expected = 1;
return 1;
}
if (s->tls_session_secret_cb) {
/*
* Indicate that the ticket couldn't be decrypted rather than
* generating the session from ticket now, trigger
* abbreviated handshake based on external mechanism to
* calculate the master secret later.
*/
return 2;
}
r = tls_decrypt_ticket(s, p, size, session_id, len, ret);
switch (r) {
case 2: /* ticket couldn't be decrypted */
s->tlsext_ticket_expected = 1;
return 2;
case 3: /* ticket was decrypted */
return r;
case 4: /* ticket decrypted but need to renew */
s->tlsext_ticket_expected = 1;
return 3;
default: /* fatal error */
return -1;
}
}
p += size;
}
| 16,216 |