From 406e44895d577dfc0240117a08a2fd942470bc06 Mon Sep 17 00:00:00 2001 From: Evgeniy Naydanov Date: Wed, 24 Jan 2024 10:35:00 +0300 Subject: [PATCH] target/riscv: reset delays during batch scans This commit is related to testing how OpenOCD responds to `dmi.busy`. Consider testing on Spike (e.g. `riscv-tests/debug` testsuite). Spike returns `dmi.busy` if there were less then a given number of RTI cycles (`required_rti_cycles`) between DR_UPDATE and DR_CAPTURE: https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/jtag_dtm.cc#L145 https://github.com/riscv-software-src/riscv-isa-sim/blob/master/riscv/jtag_dtm.cc#L202 `required_rti_cycles` gets it's value from `--dmi-rti` CLI argument and is constant throughout the run. OpenOCD learns this required number of RTI cycles by starting with zero and increasing it if `dmi.busy` is encountered. So the required number of RTI cycles is learned during the first DMI access in the `examine()`. To induce `dmi.busy` on demand `riscv reset_delays ` command is provided. This command initializes `riscv_info::reset_delays_wait` counter to the provided `` value. The counter is decreased before a DMI access and when it reaches zero the learned value of RTI cycles required is reset, so the DMI access results in `dmi.busy`. Now consider running a batch of accesses. Before the change all the accesses in the batch had the same number of RIT cycles in between them. So either: * Number of accesses in the batch was greater then the value of `riscv_info::reset_delays_wait` counter and there was no `dmi.busy` throughout the batch. * Number of accesses in the batch was less or equal then the value of `riscv_info::reset_delays_wait` counter and the first access of the batch resulted in `dmi.busy`. Therefore it was impossible to encounter `dmi.busy` on any scan of the batch except the first one. Change-Id: Ib0714ecaf7d2e11878140d16d9aa6152ff20f1e9 Signed-off-by: Evgeniy Naydanov --- src/target/riscv/batch.c | 108 ++++++++++++++++++++++++++++++----- src/target/riscv/batch.h | 8 ++- src/target/riscv/riscv-013.c | 32 ++++++++--- src/target/riscv/riscv.c | 7 +-- src/target/riscv/riscv.h | 2 +- 5 files changed, 127 insertions(+), 30 deletions(-) diff --git a/src/target/riscv/batch.c b/src/target/riscv/batch.c index faa92568b6..a49da397c8 100644 --- a/src/target/riscv/batch.c +++ b/src/target/riscv/batch.c @@ -18,7 +18,13 @@ static void dump_field(int idle, const struct scan_field *field); -struct riscv_batch *riscv_batch_alloc(struct target *target, size_t scans, size_t idle) +typedef struct { + struct list_head list; + size_t idle_count; + size_t until_scan; +} idle_count_info_t; + +struct riscv_batch *riscv_batch_alloc(struct target *target, size_t scans, size_t idle_count) { scans += BATCH_RESERVED_SCANS; struct riscv_batch *out = calloc(1, sizeof(*out)); @@ -29,7 +35,7 @@ struct riscv_batch *riscv_batch_alloc(struct target *target, size_t scans, size_ out->target = target; out->allocated_scans = scans; - out->idle_count = idle; + INIT_LIST_HEAD(&out->idle_counts); out->last_scan = RISCV_SCAN_TYPE_INVALID; out->data_out = NULL; @@ -38,6 +44,15 @@ struct riscv_batch *riscv_batch_alloc(struct target *target, size_t scans, size_ out->bscan_ctxt = NULL; out->read_keys = NULL; + idle_count_info_t * const new_entry = malloc(sizeof(*new_entry)); + if (!new_entry) { + LOG_ERROR("Out of memory!"); + goto alloc_error; + } + new_entry->until_scan = scans; + new_entry->idle_count = idle_count; + list_add(&new_entry->list, &out->idle_counts); + /* FIXME: There is potential for memory usage reduction. We could allocate * smaller buffers than DMI_SCAN_BUF_SIZE (that is, buffers that correspond to * the real DR scan length on the given target) */ @@ -78,6 +93,10 @@ struct riscv_batch *riscv_batch_alloc(struct target *target, size_t scans, size_ void riscv_batch_free(struct riscv_batch *batch) { + idle_count_info_t *entry, *tmp; + list_for_each_entry_safe(entry, tmp, &batch->idle_counts, list) + free(entry); + free(batch->data_in); free(batch->data_out); free(batch->fields); @@ -91,6 +110,43 @@ bool riscv_batch_full(struct riscv_batch *batch) return riscv_batch_available_scans(batch) == 0; } +static void fill_jtag_queue(const struct riscv_batch *batch) +{ + size_t i = 0; + const idle_count_info_t *idle_counts_entry; + list_for_each_entry(idle_counts_entry, &batch->idle_counts, list) { + const size_t idle_count = idle_counts_entry->idle_count; + const size_t until = MIN(batch->used_scans, + idle_counts_entry->until_scan); + for (; i < until; ++i) { + const struct scan_field * const field = batch->fields + i; + if (bscan_tunnel_ir_width != 0) + riscv_add_bscan_tunneled_scan(batch->target, field, + batch->bscan_ctxt + i); + else + jtag_add_dr_scan(batch->target->tap, 1, field, TAP_IDLE); + + if (idle_count > 0) + jtag_add_runtest(idle_count, TAP_IDLE); + } + } + assert(i == batch->used_scans); +} + +static void batch_dump_fields(const struct riscv_batch *batch) +{ + size_t i = 0; + const idle_count_info_t *idle_counts_entry; + list_for_each_entry(idle_counts_entry, &batch->idle_counts, list) { + const size_t idle_count = idle_counts_entry->idle_count; + const size_t until = MIN(batch->used_scans, + idle_counts_entry->until_scan); + for (; i < until; ++i) + dump_field(idle_count, batch->fields + i); + } + assert(i == batch->used_scans); +} + int riscv_batch_run(struct riscv_batch *batch) { if (batch->used_scans == 0) { @@ -100,15 +156,7 @@ int riscv_batch_run(struct riscv_batch *batch) riscv_batch_add_nop(batch); - for (size_t i = 0; i < batch->used_scans; ++i) { - if (bscan_tunnel_ir_width != 0) - riscv_add_bscan_tunneled_scan(batch->target, batch->fields + i, batch->bscan_ctxt + i); - else - jtag_add_dr_scan(batch->target->tap, 1, batch->fields + i, TAP_IDLE); - - if (batch->idle_count > 0) - jtag_add_runtest(batch->idle_count, TAP_IDLE); - } + fill_jtag_queue(batch); keep_alive(); @@ -127,9 +175,7 @@ int riscv_batch_run(struct riscv_batch *batch) } } - for (size_t i = 0; i < batch->used_scans; ++i) - dump_field(batch->idle_count, batch->fields + i); - + batch_dump_fields(batch); return ERROR_OK; } @@ -249,3 +295,37 @@ bool riscv_batch_dmi_busy_encountered(const struct riscv_batch *batch) const uint64_t in = buf_get_u64(field->in_value, 0, field->num_bits); return get_field(in, DTM_DMI_OP) == DTM_DMI_OP_BUSY; } + +/* Returns the entry in `batch->idle_counts` that defines idle_count for the scan. */ +static idle_count_info_t *find_idle_counts_entry(struct riscv_batch *batch, size_t scan_idx) +{ + assert(!list_empty(&batch->idle_counts)); + + idle_count_info_t *entry; + list_for_each_entry(entry, &batch->idle_counts, list) + if (entry->until_scan > scan_idx) + break; + assert(!list_entry_is_head(entry, &batch->idle_counts, list)); + return entry; +} + +int riscv_batch_change_idle_used_from_scan(struct riscv_batch *batch, size_t new_idle, size_t scan_idx) +{ + idle_count_info_t * const new_entry = malloc(sizeof(*new_entry)); + if (!new_entry) { + LOG_ERROR("Out of memory!"); + return ERROR_FAIL; + } + new_entry->until_scan = scan_idx; + idle_count_info_t *old_entry = find_idle_counts_entry(batch, scan_idx); + /* Add new entry before the old one. */ + list_add_tail(&new_entry->list, &old_entry->list); + assert(new_entry->until_scan < old_entry->until_scan); + /* new entry now defines the range until the scan (non-inclusive) */ + new_entry->idle_count = old_entry->idle_count; + /* old entry now defines the range from the scan (inclusive) */ + old_entry->idle_count = new_idle; + LOG_DEBUG("Will use idle == %zu from scan %zu until scan %zu.", old_entry->idle_count, + new_entry->until_scan, old_entry->until_scan); + return ERROR_OK; +} diff --git a/src/target/riscv/batch.h b/src/target/riscv/batch.h index 839e13e827..d1626c652a 100644 --- a/src/target/riscv/batch.h +++ b/src/target/riscv/batch.h @@ -24,7 +24,7 @@ struct riscv_batch { size_t allocated_scans; size_t used_scans; - size_t idle_count; + struct list_head idle_counts; uint8_t *data_out; uint8_t *data_in; @@ -49,7 +49,7 @@ struct riscv_batch { /* Allocates (or frees) a new scan set. "scans" is the maximum number of JTAG * scans that can be issued to this object, and idle is the number of JTAG idle * cycles between every real scan. */ -struct riscv_batch *riscv_batch_alloc(struct target *target, size_t scans, size_t idle); +struct riscv_batch *riscv_batch_alloc(struct target *target, size_t scans, size_t idle_count); void riscv_batch_free(struct riscv_batch *batch); /* Checks to see if this batch is full. */ @@ -78,4 +78,8 @@ size_t riscv_batch_available_scans(struct riscv_batch *batch); /* Return true iff the last scan in the batch returned DMI_OP_BUSY. */ bool riscv_batch_dmi_busy_encountered(const struct riscv_batch *batch); +/* Change the number of idle cycles used starting from the given scan. */ +int riscv_batch_change_idle_used_from_scan(struct riscv_batch *batch, + size_t new_idle, size_t scan_idx); + #endif diff --git a/src/target/riscv/riscv-013.c b/src/target/riscv/riscv-013.c index 0134548b1c..edb03ace47 100644 --- a/src/target/riscv/riscv-013.c +++ b/src/target/riscv/riscv-013.c @@ -2514,18 +2514,32 @@ static int sb_write_address(struct target *target, target_addr_t address, (uint32_t)address, false, ensure_success); } -static int batch_run(const struct target *target, struct riscv_batch *batch) +static int maybe_add_delays_reset(const struct target *target, struct riscv_batch *batch) { - RISCV013_INFO(info); RISCV_INFO(r); - if (r->reset_delays_wait >= 0) { - r->reset_delays_wait -= batch->used_scans; - if (r->reset_delays_wait <= 0) { - batch->idle_count = 0; - info->dmi_busy_delay = 0; - info->ac_busy_delay = 0; - } + if (r->reset_delays_wait < 0) + return ERROR_OK; + /* TODO: riscv_batch_run() adds a nop to the end od the batch. This + * behavior is a bit peculiar and should probably be changed. */ + const size_t scans_to_run = batch->used_scans + 1; + if ((size_t)r->reset_delays_wait >= scans_to_run) { + r->reset_delays_wait -= scans_to_run; + return ERROR_OK; } + const int result = riscv_batch_change_idle_used_from_scan(batch, 0, r->reset_delays_wait); + r->reset_delays_wait = -1; + LOG_TARGET_DEBUG(target, "reset_delays_wait done"); + RISCV013_INFO(info); + info->dmi_busy_delay = 0; + info->ac_busy_delay = 0; + return result; +} + +static int batch_run(const struct target *target, struct riscv_batch *batch) +{ + int result = maybe_add_delays_reset(target, batch); + if (result != ERROR_OK) + return result; return riscv_batch_run(batch); } diff --git a/src/target/riscv/riscv.c b/src/target/riscv/riscv.c index 60c26baeb5..f27723a2dd 100644 --- a/src/target/riscv/riscv.c +++ b/src/target/riscv/riscv.c @@ -450,6 +450,7 @@ static int riscv_init_target(struct command_context *cmd_ctx, LOG_TARGET_DEBUG(target, "riscv_init_target()"); RISCV_INFO(info); info->cmd_ctx = cmd_ctx; + info->reset_delays_wait = -1; select_dtmcontrol.num_bits = target->tap->ir_length; select_dbus.num_bits = target->tap->ir_length; @@ -3850,10 +3851,8 @@ COMMAND_HANDLER(riscv_reset_delays) { int wait = 0; - if (CMD_ARGC > 1) { - LOG_ERROR("Command takes at most one argument"); + if (CMD_ARGC > 1) return ERROR_COMMAND_SYNTAX_ERROR; - } if (CMD_ARGC == 1) COMMAND_PARSE_NUMBER(int, CMD_ARGV[0], wait); @@ -6454,7 +6453,7 @@ int riscv_init_registers(struct target *target) return ERROR_OK; } -void riscv_add_bscan_tunneled_scan(struct target *target, struct scan_field *field, +void riscv_add_bscan_tunneled_scan(struct target *target, const struct scan_field *field, riscv_bscan_tunneled_scan_context_t *ctxt) { jtag_add_ir_scan(target->tap, &select_user4, TAP_IDLE); diff --git a/src/target/riscv/riscv.h b/src/target/riscv/riscv.h index 389860c210..2f2e4b24cd 100644 --- a/src/target/riscv/riscv.h +++ b/src/target/riscv/riscv.h @@ -452,7 +452,7 @@ void riscv_semihosting_init(struct target *target); enum semihosting_result riscv_semihosting(struct target *target, int *retval); -void riscv_add_bscan_tunneled_scan(struct target *target, struct scan_field *field, +void riscv_add_bscan_tunneled_scan(struct target *target, const struct scan_field *field, riscv_bscan_tunneled_scan_context_t *ctxt); int riscv_read_by_any_size(struct target *target, target_addr_t address, uint32_t size, uint8_t *buffer);