winedbg: Cache GDB qXfer command result for chunked fetching.

GDB does not retrieve the result of a qXfer command at once; instead, it
issues a series of requests to obtain the result one "chunk" at a time,
and concatenates those chunks internally.  Each request contains offset
and length variables that specify which portion of the result shall be
retrieved.

Today, Winedbg handles this by generating the entire result data each
time a request is received and slicing out the requested range for the
response.  This is not only inefficient due to repeated computation,
but also prone to race condition since the result may change between
successive chunk requests due to the dynamic nature of some commands
such as "libraries" and "threads."

Fix this by cacheing the result into a buffer at the first request, and
use the buffer to serve successive chunk requests.  The cache is
invalidated when the remote requests a different object, or the debugger
reaches the end of the result cache buffer.

Signed-off-by: Jinoh Kang <jinoh.kang.kr@gmail.com>
Signed-off-by: Rémi Bernon <rbernon@codeweavers.com>
Signed-off-by: Alexandre Julliard <julliard@winehq.org>
This commit is contained in:
Jinoh Kang 2021-11-24 00:11:08 +09:00 committed by Alexandre Julliard
parent 595bfdee71
commit f18c0db314
1 changed files with 32 additions and 10 deletions

View File

@ -931,15 +931,17 @@ static inline void packet_reply_register_hex_to(struct gdb_context* gdbctx, dbg_
packet_reply_hex_to(gdbctx, cpu_register_ptr(gdbctx, ctx, idx), cpu_register_map[idx].length);
}
static void packet_reply_xfer(struct gdb_context* gdbctx, size_t off, size_t len)
static void packet_reply_xfer(struct gdb_context* gdbctx, size_t off, size_t len, BOOL* more_p)
{
BOOL more;
size_t data_len, trunc_len;
packet_reply_open(gdbctx);
data_len = gdbctx->qxfer_buffer.len;
/* check if off + len would overflow */
if (off < data_len && off + len < data_len)
more = off < data_len && off + len < data_len;
if (more)
packet_reply_add(gdbctx, "m");
else
packet_reply_add(gdbctx, "l");
@ -951,6 +953,8 @@ static void packet_reply_xfer(struct gdb_context* gdbctx, size_t off, size_t len
}
packet_reply_close(gdbctx);
*more_p = more;
}
/* =============================================== *
@ -2071,6 +2075,7 @@ static enum packet_return packet_query(struct gdb_context* gdbctx)
{
enum packet_return result;
int i;
BOOL more;
for (i = 0; i < ARRAY_SIZE(qxfer_handlers); i++)
{
@ -2086,21 +2091,38 @@ static enum packet_return packet_query(struct gdb_context* gdbctx)
TRACE("qXfer %s read %s %u,%u\n", debugstr_a(object_name), debugstr_a(annex), off, len);
gdbctx->qxfer_object_idx = i;
strcpy(gdbctx->qxfer_object_annex, annex);
if (off > 0 &&
gdbctx->qxfer_buffer.len > 0 &&
gdbctx->qxfer_object_idx == i &&
strcmp(gdbctx->qxfer_object_annex, annex) == 0)
{
result = packet_send_buffer;
TRACE("qXfer read result = %d (cached)\n", result);
}
else
{
reply_buffer_clear(&gdbctx->qxfer_buffer);
result = (*qxfer_handlers[i].handler)(gdbctx);
TRACE("qXfer read result = %d\n", result);
gdbctx->qxfer_object_idx = i;
strcpy(gdbctx->qxfer_object_annex, annex);
result = (*qxfer_handlers[i].handler)(gdbctx);
TRACE("qXfer read result = %d\n", result);
}
more = FALSE;
if ((result & ~packet_last_f) == packet_send_buffer)
{
packet_reply_xfer(gdbctx, off, len);
packet_reply_xfer(gdbctx, off, len, &more);
result = (result & packet_last_f) | packet_done;
}
gdbctx->qxfer_object_idx = -1;
gdbctx->qxfer_object_annex[0] = '\0';
reply_buffer_clear(&gdbctx->qxfer_buffer);
if (!more)
{
gdbctx->qxfer_object_idx = -1;
gdbctx->qxfer_object_annex[0] = '\0';
reply_buffer_clear(&gdbctx->qxfer_buffer);
}
return result;
}