From 84467c1e8f3c755a7af5dce97a1e77fdccc49151 Mon Sep 17 00:00:00 2001 From: Antonio Martinez Zambrana Date: Mon, 9 Dec 2024 19:06:13 +0000 Subject: [PATCH 1/5] [dv, aon_timer] Wait for writes to cross the CDC boundary In order to simplify the TB model the sequences wait until the changes make it to the AON-side Signed-off-by: Antonio Martinez Zambrana --- .../dv/env/seq_lib/aon_timer_base_vseq.sv | 67 ++++++++++++++++--- .../dv/env/seq_lib/aon_timer_jump_vseq.sv | 4 +- .../env/seq_lib/aon_timer_prescaler_vseq.sv | 6 ++ 3 files changed, 64 insertions(+), 13 deletions(-) diff --git a/hw/ip/aon_timer/dv/env/seq_lib/aon_timer_base_vseq.sv b/hw/ip/aon_timer/dv/env/seq_lib/aon_timer_base_vseq.sv index f7d3fdc36832d..fd5fe257f6109 100644 --- a/hw/ip/aon_timer/dv/env/seq_lib/aon_timer_base_vseq.sv +++ b/hw/ip/aon_timer/dv/env/seq_lib/aon_timer_base_vseq.sv @@ -59,6 +59,7 @@ class aon_timer_base_vseq extends cip_base_vseq #( extern virtual task apply_reset(string kind = "HARD"); extern virtual task apply_resets_concurrently(int reset_duration_ps = 0); extern task wait_for_interrupt(bit intr_state_read = 1); + extern task write_wkup_reg(input uvm_object ptr, input uvm_reg_data_t value); endclass : aon_timer_base_vseq @@ -98,13 +99,16 @@ task aon_timer_base_vseq::aon_timer_shutdown(); `uvm_info(`gfn, "Shutting down AON Timer...", UVM_LOW) `uvm_info(`gfn, "Writing 0 to WKUP_CTRL and WDOG_CTRL to disable AON timer", UVM_HIGH) - csr_utils_pkg::csr_wr(ral.wkup_ctrl.enable, 1'b0); + write_wkup_reg(ral.wkup_ctrl.enable, 1'b0); + `uvm_info(`gfn, "write_reg wdog_ctr.enable", UVM_DEBUG); csr_utils_pkg::csr_wr(ral.wdog_ctrl.enable, 1'b0); `uvm_info(`gfn, "Clearing interrupts, count registers and wakeup request.", UVM_HIGH) // Clear wake-up request if we have any csr_utils_pkg::csr_wr(ral.wkup_cause, 1'b0); + // We need to ensure the prediction has kicked in before we read the intr_state + wait (ral.intr_state.is_busy() == 0); // Clear the interrupts csr_utils_pkg::csr_wr(ral.intr_state, 2'b11); @@ -113,9 +117,9 @@ task aon_timer_base_vseq::aon_timer_shutdown(); "to WDOG_COUNT."}, wkup_count, wdog_count), UVM_LOW) // Register Write - csr_utils_pkg::csr_wr(ral.wkup_count_lo, wkup_count[31:0]); - csr_utils_pkg::csr_wr(ral.wkup_count_hi, wkup_count[63:32]); - csr_utils_pkg::csr_wr(ral.wdog_count, wdog_count); + write_wkup_reg(ral.wkup_count_lo, wkup_count[31:0]); + write_wkup_reg(ral.wkup_count_hi, wkup_count[63:32]); + write_wkup_reg(ral.wdog_count, wdog_count); // Wait to settle registers on AON timer domain cfg.aon_clk_rst_vif.wait_clks(5); @@ -123,27 +127,27 @@ endtask : aon_timer_shutdown // setup basic aon_timer features task aon_timer_base_vseq::aon_timer_init(); - + bit wkup_thold_lo_we, wkup_thold_hi_we; // Clear the interrupts csr_utils_pkg::csr_wr(ral.intr_state, 2'b11); `uvm_info(`gfn, "Initializating AON Timer. Writing 0 to WKUP_COUNT and WDOG_COUNT", UVM_LOW) // Register Write - csr_utils_pkg::csr_wr(ral.wkup_count_lo, 32'h0000_0000); - csr_utils_pkg::csr_wr(ral.wkup_count_hi, 32'h0000_0000); + write_wkup_reg(ral.wkup_count_lo, 32'h0000_0000); + write_wkup_reg(ral.wkup_count_hi, 32'h0000_0000); csr_utils_pkg::csr_wr(ral.wdog_count, 32'h0000_0000); `uvm_info(`gfn, "Randomizing AON Timer thresholds", UVM_HIGH) `uvm_info(`gfn, $sformatf("Writing 0x%0h to wkup_thold", wkup_thold), UVM_HIGH) - csr_utils_pkg::csr_wr(ral.wkup_thold_lo, wkup_thold[31:0]); - csr_utils_pkg::csr_wr(ral.wkup_thold_hi, wkup_thold[63:32]); + write_wkup_reg(ral.wkup_thold_lo, wkup_thold[31:0]); + write_wkup_reg(ral.wkup_thold_hi, wkup_thold[63:32]); `uvm_info(`gfn, $sformatf("Writing 0x%0h to wdog_bark_thold", wdog_bark_thold), UVM_HIGH) - csr_utils_pkg::csr_wr(ral.wdog_bark_thold, wdog_bark_thold); + write_wkup_reg(ral.wdog_bark_thold, wdog_bark_thold); `uvm_info(`gfn, $sformatf("Writing 0x%0h to wdog_bite_thold", wdog_bite_thold), UVM_HIGH) - csr_utils_pkg::csr_wr(ral.wdog_bite_thold, wdog_bite_thold); + write_wkup_reg(ral.wdog_bite_thold, wdog_bite_thold); cfg.lc_escalate_en_vif.drive(0); @@ -188,6 +192,9 @@ task aon_timer_base_vseq::wait_for_interrupt(bit intr_state_read = 1); if (intr_state_read) begin // Wait 2 clocks to ensure interrupt is visible on intr_state read cfg.aon_clk_rst_vif.wait_clks(2); + + // We need to ensure the prediction has kicked in before we read the intr_state + wait (ral.intr_state.is_busy()==0); csr_utils_pkg::csr_rd(ral.intr_state, intr_state_value); end @@ -197,3 +204,41 @@ task aon_timer_base_vseq::wait_for_interrupt(bit intr_state_read = 1); end end endtask : wait_for_interrupt + +// Use only to write wkup regs, since some wdog regs won't see the WE signal go high until REGWEN +// is sent +task aon_timer_base_vseq::write_wkup_reg(input uvm_object ptr, input uvm_reg_data_t value); + bit we; + string path_to_we; + csr_field_t csr_or_fld = decode_csr_or_field(ptr); + + if (csr_or_fld.csr == null) + `uvm_fatal(`gfn, "Couldn't decode argument into CSR reg") + path_to_we = {"tb.dut.u_reg.aon_", csr_or_fld.csr.get_name(), "_we"}; + // Fork killable by reset + fork + begin : iso_fork + fork + wait(cfg.under_reset); + begin + csr_utils_pkg::csr_wr(ptr, value); + // After write we wait for WE to go high and then low + do begin + if (! uvm_hdl_read(path_to_we, we)) + `uvm_error (`gfn, $sformatf("HDL Read from %s failed", path_to_we)) + if (we === 0) + cfg.aon_clk_rst_vif.wait_clks(1); // enabled is synchronised to the aon domain + end while (!we); + do begin + if (! uvm_hdl_read(path_to_we, we)) + `uvm_error (`gfn, $sformatf("HDL Read from %s failed", path_to_we)) + if (we === 1) + cfg.aon_clk_rst_vif.wait_clks(1); // enabled is synchronised to the aon domain + end while (we); + end + join_any + disable fork; + end : iso_fork + join + +endtask : write_wkup_reg diff --git a/hw/ip/aon_timer/dv/env/seq_lib/aon_timer_jump_vseq.sv b/hw/ip/aon_timer/dv/env/seq_lib/aon_timer_jump_vseq.sv index e32e57709e217..0a3fcd8cef31a 100644 --- a/hw/ip/aon_timer/dv/env/seq_lib/aon_timer_jump_vseq.sv +++ b/hw/ip/aon_timer/dv/env/seq_lib/aon_timer_jump_vseq.sv @@ -41,8 +41,8 @@ endtask : body task aon_timer_jump_vseq::jump_configure(); // Write random value to the COUNT registers - csr_utils_pkg::csr_wr(ral.wkup_count_lo, wkup_count[31:0]); - csr_utils_pkg::csr_wr(ral.wkup_count_hi, wkup_count[63:32]); + write_wkup_reg(ral.wkup_count_lo, wkup_count[31:0]); + write_wkup_reg(ral.wkup_count_hi, wkup_count[63:32]); `uvm_info(`gfn, $sformatf("\n\t Writing random COUNT value of %d to WKUP", wkup_count), UVM_HIGH) diff --git a/hw/ip/aon_timer/dv/env/seq_lib/aon_timer_prescaler_vseq.sv b/hw/ip/aon_timer/dv/env/seq_lib/aon_timer_prescaler_vseq.sv index 22149bdef6a48..29c3acbc61786 100644 --- a/hw/ip/aon_timer/dv/env/seq_lib/aon_timer_prescaler_vseq.sv +++ b/hw/ip/aon_timer/dv/env/seq_lib/aon_timer_prescaler_vseq.sv @@ -18,6 +18,7 @@ class aon_timer_prescaler_vseq extends aon_timer_base_vseq; endclass : aon_timer_prescaler_vseq + constraint aon_timer_prescaler_vseq::thold_count_c { wkup_thold inside {[1:2]}; wdog_bark_thold inside {[1:2]}; @@ -31,10 +32,12 @@ function aon_timer_prescaler_vseq::new (string name=""); endfunction : new task aon_timer_prescaler_vseq::body(); + aon_timer_init(); prescaler_configure(); wait_for_interrupt(); aon_timer_shutdown(); + endtask : body task aon_timer_prescaler_vseq::prescaler_configure(); @@ -45,6 +48,9 @@ task aon_timer_prescaler_vseq::prescaler_configure(); $sformatf("\n\t Writing random prescaler value of %d to WKUP CTRL", prescaler), UVM_HIGH) + csr_utils_pkg::csr_spinwait(.ptr(ral.wkup_ctrl.prescaler), .exp_data(prescaler), .backdoor(1)); + `uvm_info(`gfn, "Written values (wkup_prescaler) has propagated through the CDC", UVM_DEBUG) + `uvm_info(`gfn, "Enabling AON Timer (WKUP ONLY). Writing 1 to WKUP_CTRL", UVM_HIGH) csr_utils_pkg::csr_wr(ral.wkup_ctrl.enable, 1'b1); From 90f648b9b8695a2d607b4462cb44cadc96707b8c Mon Sep 17 00:00:00 2001 From: Antonio Martinez Zambrana Date: Mon, 9 Dec 2024 18:49:56 +0000 Subject: [PATCH 2/5] [dv, dv_base_reg] Adding flag to register base The flag is intended to be set/unset before/after a register is predicted. In addition, there is also a UVM event to notify when the prediction finishes Signed-off-by: Antonio Martinez Zambrana --- hw/dv/sv/dv_base_reg/dv_base_reg.sv | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/hw/dv/sv/dv_base_reg/dv_base_reg.sv b/hw/dv/sv/dv_base_reg/dv_base_reg.sv index d7a9c053550f0..5bb6993730bb7 100644 --- a/hw/dv/sv/dv_base_reg/dv_base_reg.sv +++ b/hw/dv/sv/dv_base_reg/dv_base_reg.sv @@ -35,6 +35,10 @@ class dv_base_reg extends uvm_reg; // through the 1st/2nd (or both) writes semaphore atomic_en_shadow_wr; + // Can be set before predicting a value for the case the register 'is_busy==1' + bit predicting_value; + uvm_event value_predicted_ev = new(); // Can be triggered to know when a prediction's finalised + function new(string name = "", int unsigned n_bits, int has_coverage); @@ -479,4 +483,14 @@ class dv_base_reg extends uvm_reg; return retval; endfunction + // Blocks until a value is predicted assuming the flag 'predicting_value' was set prior to + // calling '.predict()' method + task wait_for_prediction(); + if (predicting_value ) begin + value_predicted_ev.wait_ptrigger(); + `uvm_info(`gfn, $sformatf("Reg_name: %0s, value=0x%0x; value_predicted_ev event triggered", + get_name(), get_mirrored_value()), UVM_DEBUG) + end + endtask + endclass From e18eca8a00023d13ca9353e2a7315f5aba22793a Mon Sep 17 00:00:00 2001 From: Antonio Martinez Zambrana Date: Mon, 9 Dec 2024 18:51:12 +0000 Subject: [PATCH 3/5] [dv, cip_lib] In intr_test wait until mirrored value is predicted If the prediction is ongoing, the comparison will fail. Hence we pause until the prediction finishes Signed-off-by: Antonio Martinez Zambrana --- hw/dv/sv/cip_lib/seq_lib/cip_base_vseq.sv | 20 +++++++++++++++++--- 1 file changed, 17 insertions(+), 3 deletions(-) diff --git a/hw/dv/sv/cip_lib/seq_lib/cip_base_vseq.sv b/hw/dv/sv/cip_lib/seq_lib/cip_base_vseq.sv index 7b2dc006f1db3..f5a6d75333237 100644 --- a/hw/dv/sv/cip_lib/seq_lib/cip_base_vseq.sv +++ b/hw/dv/sv/cip_lib/seq_lib/cip_base_vseq.sv @@ -420,6 +420,7 @@ class cip_base_vseq #( import dv_utils_pkg::interrupt_t; dv_base_reg intr_csrs[$]; dv_base_reg intr_test_csrs[$]; + dv_base_reg intr_state[$]; // convenience handle to intr_state regs foreach (all_csrs[i]) begin string csr_name = all_csrs[i].get_name(); @@ -431,6 +432,8 @@ class cip_base_vseq #( if (!uvm_re_match("intr_test*", csr_name)) begin intr_test_csrs.push_back(get_interrupt_csr(csr_name)); end + if (!uvm_re_match("intr_state*", csr_name)) + intr_state.push_back(get_interrupt_csr(csr_name)); end num_times = num_times * intr_csrs.size(); @@ -444,13 +447,15 @@ class cip_base_vseq #( foreach (intr_csrs[i]) begin uvm_reg_data_t data = $urandom(); `uvm_info(`gfn, $sformatf("Write %s: 0x%0h", intr_csrs[i].`gfn, data), UVM_MEDIUM) + foreach(intr_state[i]) + intr_state[i].wait_for_prediction(); csr_wr(.ptr(intr_csrs[i]), .value(data)); end // Read all intr related csr and check interrupt pins intr_csrs.shuffle(); foreach (intr_csrs[i]) begin - uvm_reg_data_t exp_val = `gmv(intr_csrs[i]); + uvm_reg_data_t exp_val; uvm_reg_data_t act_val; interrupt_t irq_ro_mask = '0; @@ -461,7 +466,16 @@ class cip_base_vseq #( irq_ro_mask = intr_csrs[i].get_ro_mask(); end - exp_val &= ~irq_ro_mask; + // There may be a current intr_state access which impedes the TB from updating intr_state + // mirrored value. TB blocks here to ensure the predictions kick in time. + // We need to ensure the prediction has kicked in before we read the intr_state + // Users may set 'reg_name.predicting_value' before calling 'reg.predict' method, and + // unsetting it after 'reg.predict' returns to track whether a given reg is being predicted + foreach(intr_state[i]) + intr_state[i].wait_for_prediction(); + // we may need to update the exp_value here, since there may be a delay from the + // update above to ' exp_val'until here before we call the csr_rd + exp_val = `gmv(intr_csrs[i]) & ~irq_ro_mask; csr_rd(.ptr(intr_csrs[i]), .value(act_val)); act_val &= ~irq_ro_mask; @@ -478,7 +492,7 @@ class cip_base_vseq #( `DV_CHECK_CASE_EQ(exp_intr_pin, act_intr_pin) end // if (!uvm_re_match end // foreach (intr_csrs[i]) - end // for (int trans = 1; ... + end // Write 0 to intr_test to clean up status interrupts, otherwise, status interrupts may remain // active. And writing any value to a status interrupt CSR (intr_state) can't clear its value. foreach (intr_test_csrs[i]) begin From 878c858282f0d56b5f215f2a9bf51f7799990237 Mon Sep 17 00:00:00 2001 From: Antonio Martinez Zambrana Date: Mon, 9 Dec 2024 18:52:45 +0000 Subject: [PATCH 4/5] [dv, aon_timer] Adding CDC-aware intr_state register In order to improve the model, the TB needs to keep track of the backdoor value of intr_state fields, otherwise there are mismatches. The CDC-timed registers from USBDEV have been re-used for this pupose. Signed-off-by: Antonio Martinez Zambrana --- hw/ip/aon_timer/dv/env/aon_timer_env.core | 2 + hw/ip/aon_timer/dv/env/aon_timer_env_pkg.sv | 3 +- .../dv/env/aon_timer_intr_timed_regs.sv | 96 +++++++++++++++++++ 3 files changed, 100 insertions(+), 1 deletion(-) create mode 100644 hw/ip/aon_timer/dv/env/aon_timer_intr_timed_regs.sv diff --git a/hw/ip/aon_timer/dv/env/aon_timer_env.core b/hw/ip/aon_timer/dv/env/aon_timer_env.core index 84b2fe4219cef..02a6f3091d56e 100644 --- a/hw/ip/aon_timer/dv/env/aon_timer_env.core +++ b/hw/ip/aon_timer/dv/env/aon_timer_env.core @@ -9,11 +9,13 @@ filesets: depend: - lowrisc:dv:ralgen - lowrisc:dv:cip_lib + - lowrisc:dv:usbdev_env files: - aon_timer_env_pkg.sv - aon_timer_env_cfg.sv: {is_include_file: true} - aon_timer_env_cov.sv: {is_include_file: true} - aon_timer_virtual_sequencer.sv: {is_include_file: true} + - aon_timer_intr_timed_regs.sv: {is_include_file: true} - aon_timer_scoreboard.sv: {is_include_file: true} - aon_timer_env.sv: {is_include_file: true} - seq_lib/aon_timer_vseq_list.sv: {is_include_file: true} diff --git a/hw/ip/aon_timer/dv/env/aon_timer_env_pkg.sv b/hw/ip/aon_timer/dv/env/aon_timer_env_pkg.sv index dd7f14f70cf9b..d0f3f25411789 100644 --- a/hw/ip/aon_timer/dv/env/aon_timer_env_pkg.sv +++ b/hw/ip/aon_timer/dv/env/aon_timer_env_pkg.sv @@ -13,7 +13,7 @@ package aon_timer_env_pkg; import dv_base_reg_pkg::*; import csr_utils_pkg::*; import aon_timer_ral_pkg::*; - + import usbdev_env_pkg::*; // macro includes `include "uvm_macros.svh" `include "dv_macros.svh" @@ -31,6 +31,7 @@ package aon_timer_env_pkg; `include "aon_timer_env_cfg.sv" `include "aon_timer_env_cov.sv" `include "aon_timer_virtual_sequencer.sv" + `include "aon_timer_intr_timed_regs.sv" `include "aon_timer_scoreboard.sv" `include "aon_timer_env.sv" `include "aon_timer_vseq_list.sv" diff --git a/hw/ip/aon_timer/dv/env/aon_timer_intr_timed_regs.sv b/hw/ip/aon_timer/dv/env/aon_timer_intr_timed_regs.sv new file mode 100644 index 0000000000000..10507e6449825 --- /dev/null +++ b/hw/ip/aon_timer/dv/env/aon_timer_intr_timed_regs.sv @@ -0,0 +1,96 @@ +// Copyright lowRISC contributors (OpenTitan project). +// Licensed under the Apache License, Version 2.0, see LICENSE for details. +// SPDX-License-Identifier: Apache-2.0 + +// `Timed registers` cannot be guaranteed to change at an exact time, so a certain amount of +// variability must be expected in the timing. +class aon_timer_intr_timed_regs extends uvm_object; + `uvm_object_utils(aon_timer_intr_timed_regs) + + extern function new (string name=""); + + typedef enum { + TimedIntrStateWkupExpired = 0, + TimedIntrStateWdogBark + } timed_reg_e; + + // Access to DUT clock. + virtual clk_rst_if clk_rst_vif; + // Access to DUT registers. + aon_timer_reg_block ral; + + timed_reg timed[timed_reg_e]; + + // Monotonic, wrapping cycle count; used to detect when expectations have not been met. + // Incremented in 'check_predictions' task + int time_now = 0; + + // Perform a read of the actual DUT register state, for checking against expectations. + // Note: we perform a backdoor read to avoid impacting the timing of the DUT and DV. + extern task read_act_data(timed_reg_e r, output uvm_reg_data_t act_data); + // Add a timed, predicted state change to the list of expectations for the given register. + extern function void predict(timed_reg_e r, uvm_reg_data_t prev_data, uvm_reg_data_t new_data); + // Check a DUT read from the specified register against any timed expectations. + extern function uvm_reg_data_t read(timed_reg_e r, uvm_reg_data_t act_data); + // This process runs forever checking every prediction that is made, using backdoor csr_rd in zero + // time to avoid interfering with actual CSR reads and the timing of the simulation. + // It also resets the registers if a reset is seen. + extern task check_predictions(ref bit under_reset); + +endclass : aon_timer_intr_timed_regs + +function aon_timer_intr_timed_regs::new (string name=""); + super.new(name); +endfunction : new + +task aon_timer_intr_timed_regs::read_act_data(timed_reg_e r, output uvm_reg_data_t act_data); + case (r) + TimedIntrStateWkupExpired : begin + csr_rd(.ptr(ral.intr_state.wkup_timer_expired), .value(act_data), .backdoor(1)); + end + TimedIntrStateWdogBark : begin + csr_rd(.ptr(ral.intr_state.wdog_timer_bark), .value(act_data), .backdoor(1)); + end + default: `uvm_fatal(`gfn, "Invalid/unrecognized register") + endcase + `uvm_info(`gfn, $sformatf("Backdoor read of reg %p yielded 0x%0x", r, act_data), UVM_HIGH) +endtask : read_act_data + +function void aon_timer_intr_timed_regs::predict(timed_reg_e r, uvm_reg_data_t prev_data, + uvm_reg_data_t new_data); + `uvm_info(`gfn, $sformatf("Expecting reg %p <= 0x%0x, from 0x%0x (mask 0x%0x), time_now %0d", + r, new_data, prev_data, prev_data ^ new_data, time_now), UVM_MEDIUM) + timed[r].predict(time_now, new_data, prev_data); +endfunction : predict + +function uvm_reg_data_t aon_timer_intr_timed_regs::read(timed_reg_e r, uvm_reg_data_t act_data); + `uvm_info(`gfn, $sformatf("Producing prediction for %p, act_data 0x%0x", r, act_data), + UVM_MEDIUM) + return timed[r].read(time_now, act_data); +endfunction : read + +task aon_timer_intr_timed_regs::check_predictions(ref bit under_reset); + // Collect the initial values post-reset. + wait (clk_rst_vif.rst_n === 1'b1); + forever begin + timed_reg_e r; + // Check on every negedge to avoid races with CSR changes + @(negedge clk_rst_vif.clk); + time_now++; + // Check each of the timed registers for expired expectations. + r = r.first(); + for (int i=0; i < r.num(); i++) begin + if (under_reset) begin + `uvm_info(`gfn, $sformatf("Resetting timed register predictions at 0x%0x", time_now), + UVM_MEDIUM) + timed[r].reset(); + end else if (timed[r].preds_pending()) begin + uvm_reg_data_t act_data; + // Something is expected to happen to this register field. + read_act_data(r, act_data); + timed[r].check_pred(time_now, $sformatf("%p", r), act_data); + end + r = r.next(); + end // for (int i=0; i < r.num(); i++) + end +endtask : check_predictions From 313c9607a36657de1db6751c949452be1f4112dc Mon Sep 17 00:00:00 2001 From: Antonio Martinez Zambrana Date: Mon, 9 Dec 2024 19:11:31 +0000 Subject: [PATCH 5/5] [dv, aon_timer] scoreboard model and checks refactor The scoreboard now checks when different configuration values have kicked-in and then computes the number of cycles it takes for any of the interrupts to raise. In addition, the TB now aims to predict the mirrored value of interrupt state register in order to improve the passing rate (specially those related to the buil-in intr_tests in the cip_base_vseq) Signed-off-by: Antonio Martinez Zambrana --- .../aon_timer/dv/env/aon_timer_scoreboard.sv | 1175 ++++++++++++++--- 1 file changed, 1026 insertions(+), 149 deletions(-) diff --git a/hw/ip/aon_timer/dv/env/aon_timer_scoreboard.sv b/hw/ip/aon_timer/dv/env/aon_timer_scoreboard.sv index 9e51ad415fa12..ed9539cebbc76 100644 --- a/hw/ip/aon_timer/dv/env/aon_timer_scoreboard.sv +++ b/hw/ip/aon_timer/dv/env/aon_timer_scoreboard.sv @@ -20,6 +20,7 @@ class aon_timer_scoreboard extends cip_base_scoreboard #( local bit wdog_regwen; local bit wdog_pause_in_sleep; local bit wdog_num_update_due; + local uint wdog_count; local uint bark_thold; local uint bite_thold; @@ -30,7 +31,7 @@ class aon_timer_scoreboard extends cip_base_scoreboard #( local bit [31:0] wdog_bite_num; // expected values - local bit intr_status_exp [2]; + local bit [1:0] intr_status_exp; local bit wdog_rst_req_exp = 0; typedef enum logic { @@ -38,38 +39,295 @@ class aon_timer_scoreboard extends cip_base_scoreboard #( WDOG = 1'b1 } timers_e; - // TLM agent fifos + // Prediction and checking of the loosely-timed registers. + aon_timer_intr_timed_regs timed_regs; + + // BFM state information for the loosely-timed registers. + typedef struct { + uvm_reg_data_t r[aon_timer_intr_timed_regs::timed_reg_e]; + } bfm_timed_regs_t; + + // Previous state + bfm_timed_regs_t prev_timed_regs; + + // UVM events to trigger the count for WDOG/WKUP timer to avoid any potential race condition + uvm_event wkup_count_ev = new(); + uvm_event wdog_count_ev = new(); - // local queues to hold incoming packets pending comparison + bit predicted_wkup_intr_q[$]; + bit predicted_wdog_intr_q[$]; + bit ongoing_intr_state_read; + string path_to_rtl = "tb.dut"; + + // A write to wkup_cause(0x0) can be taken with some delay due to CDC crossing + // it can happen the write is absorved at the same time the TB predicts a WDOG_intr. + // Since it's difficult to predict what will happen in this case the TB expects + // wkup_req_o will be 0 or the latest prediction + int unsigned aon_clk_cycle = 0; + int unsigned last_wkup_cause_write_aon_clk_cycle = 0; extern function new (string name="", uvm_component parent=null); + extern function void build_phase(uvm_phase phase); extern task run_phase(uvm_phase phase); - extern task monitor_interrupts(); + // Convenience task which keeps count of a given AON clock cycle. Currently only used to + // distinguish the case when a write to wkup_cause occurs at the same time as a interrupt being + // high + extern task track_aon_clk_cycle(); + // Collects interrupt coverage from RTL interrupts by directly sampling the RTL output + extern task collect_fcov_from_rtl_interrupts(); extern virtual task process_tl_access(tl_seq_item item, tl_channels_e channel, string ral_name); + // Model the timers and interrupts for WKUP/WDOG and compare them against the actual values + // the task doesn't return extern virtual task check_interrupt(); + // This tasks runs forever and it calculates the number of clock cycles until an interrupt is + // raised. The number of clock cycles is stored in variables wkup_num and wdog_bark/bite_num. + // The WKUP timer takes the prescaler into consideration when it comes to calculating clock cycles extern virtual task compute_num_clks(); + // Blocks when the 'pause_in_sleep' signal is set as well as the internal RTL aon_sleep_mode + // signal. Otherwise, the predictions may be late/early with regards to the RTL. extern virtual task wait_for_sleep(); + // The run_wkup_timer and run_wdog_bark/bite_timer are independent tasks which run forever and + // can safely be killed. + // Run wkup/wdog timers predicts the interrupt of each kind (or subkind for WDOG timer with bite + // and bark) and check against the actual value when an interrupt is due. + // The threads also contribute towards coverage and get killed whenever there is a reset or the + // enable bit for their respective counter is not set extern virtual task run_wkup_timer(); extern virtual task run_wdog_bark_timer(); extern virtual task run_wdog_bite_timer(); + // The three collect_*_coverage below tasks run forever and can be safely killed at any time. + // + // collect_wkup_timer_coverage: Whenever the sample_coverage event fires, sample the 64-bit + // wkup_count counter, threshold value and interrupt for wake_up_timer_thold_hit_cg covergroup. + extern task collect_wkup_timer_coverage(ref event sample_coverage); + // collect_wdog_bark_timer_coverage: Whenever the sample_coverage event fires, sample the 32-bit + // wdog_count counter, threshold value and interrupt for watchdog_timer_bark_thold_hit_cg + // covergroup. + extern task collect_wdog_bark_timer_coverage(ref event sample_coverage); + // collect_wdog_bite_timer_coverage: Whenever the sample_coverage event fires, sample the 32-bit + // wdog_count counter, threshold value and interrupt for watchdog_timer_bite_thold_hit_cg + // covergroup. + extern task collect_wdog_bite_timer_coverage(ref event sample_coverage); + + // Sets both predicted timed register (WKUP/WDOG) values, captured from intr_state_exp. + extern function void capture_timed_regs(output bfm_timed_regs_t state); + // The argument passed by reference `state` stores the value predicted for intr_state for each + // type of timer, `r` and tmr refer to the type of timer to predict for the timed and the expected + // interrupts respectively + extern function void capture_timed_regs_independently(ref bfm_timed_regs_t state, + aon_timer_intr_timed_regs::timed_reg_e r, + timers_e tmr); + // Initialise the timed regs (called in the build_phase) + extern function void init_timed_regs(); + // need separate wdog/wkup update functions in case they are called at the same time + extern function void update_timed_regs_independently(aon_timer_intr_timed_regs:: + timed_reg_e r, timers_e tmr); + // Update the model side of the WKUP/WDOG(or both) timed registers + extern function void update_timed_regs(); + extern function void update_timed_regs_wdog(); + extern function void update_timed_regs_wkup(); + + // Convenience wrapper function to avoid calling explictly uvm_hdl_read multiple times + extern function bit hdl_read_bit(string path); + // Waits for AON signal to become 'value' + extern task wait_for_aon_signal(string path, bit value); + // Waits for the WE to rise and fall after a TL-UL write access + // Does HDL reads to be in sync with when the values kick-in the RTL + // It needs to be called the moment the TL-access occurs, otherwise the thread may hang if the WE + // has risen already + extern task wait_for_we_pulse(input string path); + // Predicts register wkup_cause whenever the busy flag is 0 and blocks other parts of the TB to + // predict it's value until an ongoing prediction finishes + // In addition, the prediction can be delayed by setting 'wait_for_we' if the TB wants the + // prediction to proceed after the wkup_cause write crosses the CDC boundary to be in sync with + // the RTL. + // The advice is to wrap a fork...join_none around this task to avoid blocking + extern task predict_wkup_cause(bit wkup_cause, bit wait_for_we); + // Predicts register intr_state whenever the busy flag is 0. It checks the prediction went through + // and blocks other parts of the TB to predict its value until an ongoing prediction finishes + // The advice is to wrap a fork...join_none around this task to avoid blocking + extern task predict_intr_state(bit [1:0] pred_intr_state); + + // Update functions wait for the value to cross CDC boundaries by checking the write enable + // before the TB starts counting + extern task update_wdog_or_wkup_reg_timely(string path, timers_e timer_type); + extern task update_wdog_count_timely(); + extern task update_wdog_bite_thold_timely(); + extern task update_wdog_bark_thold_timely(); + extern task update_wkup_count_lo_timely(); + extern task update_wkup_count_hi_timely(); + extern task update_wkup_thold_lo_timely(); + extern task update_wkup_thold_hi_timely(); + + // Pushes predicted values for wdog/wkup_interrupt and monitors the value being updated in the + // RTL advice is to wrap a fork... join_none around the task + extern task wkup_intr_predicted_values(bit exp_wkup_intr); + extern task wdog_intr_predicted_values(bit exp_wdog_intr); + // Checks the predicted interrupt value is the current RTL value by backdoor reads + extern task check_intr_value_propagated(timers_e timer_type); + + // Converts from timers_e to 'timed_reg_e' + extern function aon_timer_intr_timed_regs::timed_reg_e timers_e2time_reg_e(timers_e timer_type); + // Check the value of `intr_wdog_timer_bark/wkup_timer_expired_o' matched the one in the + // intr_state register via using the timed registers. + // intr_status_exp[WDOG] can't be used to compare since its value may have been wiped out by the + // time the comparison is carried out + extern function void check_aon_domain_interrupt(timers_e timer_type); + // Returns either `predicted_wdog_intr_q` or `predicted_wkup_intr_q` by reference depending on the + // timer type passed + extern function void return_pred_intr_q(timers_e timer_type, ref bit pred_q[$]); + // Checks the expected mirrored value against the value returned in the TL-UL read for intr_state + extern function void check_intr_state_bit(timers_e timer_type, bit actual_value); + + // Convenience function to check if the wkup/wdog_ctrl.enable bits are set or not depending on + // whether the enable argument is 0 or 1. + extern task wait_for_wdog_enable_matching(bit enable); + extern task wait_for_wkup_enable_matching(bit enable); + + extern virtual function void reset(string kind = "HARD"); + endclass : aon_timer_scoreboard function aon_timer_scoreboard::new (string name="", uvm_component parent=null); super.new(name, parent); endfunction : new +function void aon_timer_scoreboard::build_phase(uvm_phase phase); + super.build_phase(phase); + + // Set-up timed registers (intr_state): + timed_regs = aon_timer_intr_timed_regs::type_id::create("timed_regs"); + timed_regs.clk_rst_vif = cfg.clk_rst_vif; + timed_regs.ral = ral; + init_timed_regs(); +endfunction : build_phase + task aon_timer_scoreboard::run_phase(uvm_phase phase); super.run_phase(phase); fork + track_aon_clk_cycle(); compute_num_clks(); check_interrupt(); - monitor_interrupts(); + collect_fcov_from_rtl_interrupts(); + timed_regs.check_predictions(cfg.under_reset); join_none endtask : run_phase -task aon_timer_scoreboard::monitor_interrupts(); +task aon_timer_scoreboard::track_aon_clk_cycle(); + forever begin + cfg.aon_clk_rst_vif.wait_clks(1); // enabled is synchronised to the aon domain + aon_clk_cycle++; + end +endtask + +function void aon_timer_scoreboard::capture_timed_regs(output bfm_timed_regs_t state); + aon_timer_intr_timed_regs::timed_reg_e r = aon_timer_intr_timed_regs::TimedIntrStateWdogBark; + timers_e tmr_type = WDOG; + capture_timed_regs_independently(.state(state), .r(r), .tmr(tmr_type)); + r = aon_timer_intr_timed_regs::TimedIntrStateWkupExpired; + tmr_type = WKUP; + capture_timed_regs_independently(.state(state), .r(r), .tmr(tmr_type)); +endfunction : capture_timed_regs + +function void aon_timer_scoreboard::capture_timed_regs_independently(ref bfm_timed_regs_t state, + aon_timer_intr_timed_regs:: + timed_reg_e r, timers_e tmr); + state.r[r] = intr_status_exp[tmr]; +endfunction : capture_timed_regs_independently + +function void aon_timer_scoreboard::init_timed_regs(); + bfm_timed_regs_t init_regs; + aon_timer_intr_timed_regs::timed_reg_e r; + // Maximum delay (in DUT clock cycles) for a prediction to be met; most delays should take + // only a few cycles for internal changes to propagate, but some are substantially longer + // oweing to the immediate operation of the functional model. + int unsigned max_delay = 5; + // Capture the initial state of the loosely-timed registers. + capture_timed_regs(init_regs); + + // Remember the register state. + prev_timed_regs = init_regs; + // Create the register descriptions. + r = r.first(); + for (int i = 0; i < r.num(); i++) begin + timed_reg tr = timed_reg::type_id::create("tr"); + uvm_reg_data_t init_val = 0; + dv_base_reg_field fields[$]; + // Collect the field descriptions for this register. + if (!(r inside {aon_timer_intr_timed_regs::TimedIntrStateWdogBark, + aon_timer_intr_timed_regs::TimedIntrStateWkupExpired})) + `uvm_fatal(`gfn, "Invalid/unknown register") + ral.intr_state.get_dv_base_reg_fields(fields); + + // Report the initial value of this register as predicted by the BFM. + `uvm_info(`gfn, $sformatf("Reg %p : initial value 0x%0x", r, init_regs.r[r]), UVM_MEDIUM) + // Collect the initial values of the register fields, dropping any that we cannot predict. + foreach(fields[f]) begin + string field_name = fields[f].get_name(); + // Extract the initial value of this register field from the modeled register state. + uvm_reg_data_t mask = (1 << fields[f].get_n_bits()) - 1; + init_val = (init_regs.r[r] >> fields[f].get_lsb_pos()) & mask; + tr.add_field(fields[f], init_val, max_delay); + `uvm_info(`gfn, $sformatf("Register %p field %s : initially 0x%0x", r, + field_name, init_val), UVM_MEDIUM) + end + timed_regs.timed[r] = tr; + r = r.next(); + end +endfunction : init_timed_regs + +// Update the expectations for the timed registers; this should be called after any operation on +// the BFM that could affect the state of one or more of the timed registers. +function void aon_timer_scoreboard::update_timed_regs(); + aon_timer_intr_timed_regs::timed_reg_e r = r.first(); + bfm_timed_regs_t new_regs; + capture_timed_regs(new_regs); + `uvm_info(`gfn, "After capturing timed regs", UVM_DEBUG) + r = r.first(); + for (int i = 0; i < r.num(); i++) begin + // Has there been a change in the bits that we can predict? + uvm_reg_data_t unpred_mask = timed_regs.timed[r].unpred_mask; + if ((new_regs.r[r] & ~unpred_mask) != (prev_timed_regs.r[r] & ~unpred_mask)) begin + timed_regs.predict(r, prev_timed_regs.r[r], new_regs.r[r]); + end + r = r.next(); + end + // Remember the register state. + prev_timed_regs = new_regs; +endfunction : update_timed_regs + +// Updates timed register independently +function void aon_timer_scoreboard::update_timed_regs_independently(aon_timer_intr_timed_regs:: + timed_reg_e r, timers_e tmr); + bfm_timed_regs_t new_regs; + uvm_reg_data_t unpred_mask = timed_regs.timed[r].unpred_mask; + + capture_timed_regs_independently(.state(new_regs), .r(r), .tmr(tmr)); + `uvm_info(`gfn, $sformatf("Updating Timed regs #intr_state - %0s",tmr.name()), UVM_DEBUG) + + // Has there been a change in the bits that we can predict? + if ((new_regs.r[r] & ~unpred_mask) != (prev_timed_regs.r[r] & ~unpred_mask)) begin + timed_regs.predict(r, prev_timed_regs.r[r], new_regs.r[r]); + end + // Remember the register state. + prev_timed_regs = new_regs; +endfunction : update_timed_regs_independently + +// need separate wdog/wkup update functions in case they are called at the same time +function void aon_timer_scoreboard::update_timed_regs_wdog(); + update_timed_regs_independently(.r(aon_timer_intr_timed_regs::TimedIntrStateWdogBark), + .tmr(timers_e'(WDOG))); +endfunction : update_timed_regs_wdog + +function void aon_timer_scoreboard::update_timed_regs_wkup(); + update_timed_regs_independently(.r(aon_timer_intr_timed_regs::TimedIntrStateWkupExpired), + .tmr(timers_e'(WKUP))); + +endfunction : update_timed_regs_wkup + +task aon_timer_scoreboard::collect_fcov_from_rtl_interrupts(); forever begin @(cfg.aon_intr_vif.pins); // Sample interrupt pin coverage for interrupt pins @@ -79,7 +337,244 @@ task aon_timer_scoreboard::monitor_interrupts(); end end end -endtask : monitor_interrupts +endtask : collect_fcov_from_rtl_interrupts + +task aon_timer_scoreboard::predict_wkup_cause(bit wkup_cause, bit wait_for_we); + fork + begin : iso_fork + fork + begin : wkup_cause_timely + // Blocks only if 'predicting_value' = 1 + ral.wkup_cause.wait_for_prediction(); + if (wait_for_we) begin + wait_for_aon_signal(".u_reg.aon_wkup_cause_we", 1); + ral.wkup_cause.predicting_value = 1; + wait_for_aon_signal(".u_reg.aon_wkup_cause_we", 0); + end // if (wait_for_we) + else + ral.wkup_cause.predicting_value = 1; + + // The predict method may fail if the register has just been accessed: + // block here to avoid error + while (ral.wkup_cause.is_busy() == 1) + cfg.clk_rst_vif.wait_n_clks(1); + + if (!ral.wkup_cause.predict(.value(wkup_cause), .kind(UVM_PREDICT_DIRECT))) + `uvm_fatal(`gfn, $sformatf("%s prediction failed", `gmv(ral.wkup_cause))) + `uvm_info(`gfn, $sformatf("Updated predicted WKUP-CAUSE to 0x%0x", wkup_cause), UVM_DEBUG) + if (wait_for_we) + last_wkup_cause_write_aon_clk_cycle = aon_clk_cycle; + `uvm_info(`gfn, "Triggering UVM event 'ral.wkup_cause.value_predicted_ev'", UVM_DEBUG) + ral.wkup_cause.value_predicted_ev.trigger(); + ral.wkup_cause.predicting_value = 0; + end : wkup_cause_timely + begin : reset_kill + wait (under_reset); + end : reset_kill + join_any + disable fork; + end : iso_fork + join +endtask : predict_wkup_cause + +// Convenience function to avoid repeating same boilerplate code +// It takes a path relative to the RTL and appends the RTL instance to it +function bit aon_timer_scoreboard::hdl_read_bit(string path); + bit hdl_bit; + if (! uvm_hdl_read({path_to_rtl,path}, hdl_bit)) + `uvm_error (`gfn, $sformatf("HDL Read from %s failed", path)) + return hdl_bit; +endfunction : hdl_read_bit + +// Does a uvm_hdl_read until the read value becomes 0/1 in the AON domain +task aon_timer_scoreboard::wait_for_aon_signal(string path, bit value); + bit actual_value; + do begin + actual_value = hdl_read_bit(path); + if (actual_value !== value) + cfg.aon_clk_rst_vif.wait_clks(1); + end while (actual_value !== value); +endtask + +task aon_timer_scoreboard::wait_for_we_pulse(input string path); + wait_for_aon_signal(path, 1); + wait_for_aon_signal(path, 0); +endtask : wait_for_we_pulse + +// Update functions waits for the value to cross CDC boundaries before the TB starts counting +task aon_timer_scoreboard::update_wdog_or_wkup_reg_timely(string path, timers_e timer_type); + fork + begin: iso_fork + fork + begin : hdl_read_wdog_wkup_reg_we + wait_for_we_pulse(path); + // Update once the changes have kicked in. + case(timer_type) + WKUP: wkup_num_update_due = 1; + WDOG: wdog_num_update_due = 1; + default: `uvm_fatal(`gfn, $sformatf("Incorrect timer type:%0s", timer_type.name())) + endcase + end : hdl_read_wdog_wkup_reg_we + begin : reset_kill + wait (under_reset); + end : reset_kill + join_any + disable fork; + end : iso_fork + join +endtask : update_wdog_or_wkup_reg_timely + +// Update functions waits for the value to cross CDC boundaries before the TB starts counting +task aon_timer_scoreboard::update_wdog_count_timely(); + update_wdog_or_wkup_reg_timely(.path(".u_reg.aon_wdog_count_we"), .timer_type(WDOG)); +endtask : update_wdog_count_timely + +task aon_timer_scoreboard::update_wdog_bite_thold_timely(); + update_wdog_or_wkup_reg_timely(.path(".u_reg.aon_wdog_bite_thold_gated_we"), .timer_type(WDOG)); +endtask : update_wdog_bite_thold_timely + +task aon_timer_scoreboard::update_wdog_bark_thold_timely(); + update_wdog_or_wkup_reg_timely(.path(".u_reg.aon_wdog_bark_thold_gated_we"), .timer_type(WDOG)); +endtask : update_wdog_bark_thold_timely + +task aon_timer_scoreboard::update_wkup_count_lo_timely(); + update_wdog_or_wkup_reg_timely(.path(".u_reg.aon_wkup_count_lo_we"), .timer_type(WKUP)); +endtask : update_wkup_count_lo_timely + +task aon_timer_scoreboard::update_wkup_count_hi_timely(); + update_wdog_or_wkup_reg_timely(.path(".u_reg.aon_wkup_count_hi_we"), .timer_type(WKUP)); +endtask : update_wkup_count_hi_timely + +task aon_timer_scoreboard::update_wkup_thold_lo_timely(); + update_wdog_or_wkup_reg_timely(.path(".u_reg.aon_wkup_thold_lo_we"), .timer_type(WKUP)); +endtask : update_wkup_thold_lo_timely + +task aon_timer_scoreboard::update_wkup_thold_hi_timely(); + update_wdog_or_wkup_reg_timely(.path(".u_reg.aon_wkup_thold_hi_we"), .timer_type(WKUP)); +endtask : update_wkup_thold_hi_timely + +task aon_timer_scoreboard::predict_intr_state(bit [1:0] pred_intr_state); + `uvm_info(`gfn, $sformatf("%m - pred_intr_state = 0x%0x",pred_intr_state), UVM_DEBUG) + fork + begin: iso_fork + fork + begin : intr_state_timely + ral.intr_state.predicting_value = 1; + while (ral.intr_state.is_busy() == 1) + cfg.clk_rst_vif.wait_n_clks(1); + if (!ral.intr_state.predict(.value(pred_intr_state), .kind(UVM_PREDICT_DIRECT))) + `uvm_fatal(`gfn, $sformatf("%s prediction failed", ral.intr_state.get_name())) + + `uvm_info(`gfn, "Triggering UVM event 'ral.intr_state.value_predicted_ev'", UVM_DEBUG) + ral.intr_state.value_predicted_ev.trigger(); + ral.intr_state.predicting_value = 0; + end : intr_state_timely + begin : reset_kill + wait (under_reset); + ral.intr_state.predicting_value = 0; + end : reset_kill + join_any + disable fork; + end : iso_fork + join +endtask : predict_intr_state + +task aon_timer_scoreboard::wkup_intr_predicted_values(bit exp_wkup_intr); + static int unsigned last_cycle_count = 0; + + if (last_cycle_count != timed_regs.time_now) begin + `uvm_info(`gfn, $sformatf("%m - Predicted wkup_intr = 0x%0x", exp_wkup_intr), UVM_DEBUG) + predicted_wkup_intr_q.push_back(exp_wkup_intr); + last_cycle_count = timed_regs.time_now; + fork + begin: iso_fork + fork + begin : wait_values_to_propagate + // do backdoor read and delete values no longer valid. + check_intr_value_propagated(WKUP); + end : wait_values_to_propagate + begin : reset_kill + wait (under_reset); + end : reset_kill + join_any + disable fork; + end : iso_fork + join + end +endtask : wkup_intr_predicted_values + +task aon_timer_scoreboard::wdog_intr_predicted_values(bit exp_wdog_intr); + static int unsigned last_cycle_count = 0; + + if (last_cycle_count != timed_regs.time_now) begin + `uvm_info(`gfn, $sformatf("%m - Predicted wdog_intr = 0x%0x", exp_wdog_intr), UVM_DEBUG) + predicted_wdog_intr_q.push_back(exp_wdog_intr); + last_cycle_count = timed_regs.time_now; + fork + begin: iso_fork + fork + begin : wait_values_to_propagate + // do backdoor read and delete values no longer valid. + check_intr_value_propagated(WDOG); + end : wait_values_to_propagate + begin : reset_kill + wait (under_reset); + end : reset_kill + join_any + disable fork; + end : iso_fork + join + end +endtask : wdog_intr_predicted_values + +task aon_timer_scoreboard::check_intr_value_propagated(timers_e timer_type); + int unsigned idx; // Idx to the latest prediction + bit exp_value; + bit act_data; + bit value_matched; + + case(timer_type) + WKUP: begin + if (predicted_wkup_intr_q.size == 0) + `uvm_fatal(`gfn, "'predicted_wkup_intr_q' Queue is empty") + idx = predicted_wkup_intr_q.size -1; + // Synchronise with the next neg-edge + cfg.clk_rst_vif.wait_n_clks(1); + do begin + csr_rd(.ptr(ral.intr_state.wkup_timer_expired), .value(act_data), .backdoor(1)); + if (predicted_wkup_intr_q[idx] == act_data && ongoing_intr_state_read == 0) begin + value_matched = 1; + // Remove all the other predictions no longer valid: + for (int i = 0; i < idx; i++) + void'(predicted_wkup_intr_q.pop_front()); + end + else //negedge to avoid a race condition + cfg.clk_rst_vif.wait_n_clks(1); + end + while (value_matched == 0); + end + WDOG: begin + if (predicted_wdog_intr_q.size == 0) + `uvm_fatal(`gfn, "'predicted_wdog_intr_q' Queue is empty") + idx = predicted_wdog_intr_q.size -1; + // Synchronise with the next neg-edge + cfg.clk_rst_vif.wait_n_clks(1); + do begin + csr_rd(.ptr(ral.intr_state.wdog_timer_bark), .value(act_data), .backdoor(1)); + if (predicted_wdog_intr_q[idx] == act_data && ongoing_intr_state_read == 0) begin + value_matched = 1; + // Remove all the other predictions no longer valid: + for (int i = 0; i < idx; i++) + void'(predicted_wdog_intr_q.pop_front()); + end + else + cfg.clk_rst_vif.wait_n_clks(1); + end + while (value_matched == 0); + + end + endcase +endtask : check_intr_value_propagated task aon_timer_scoreboard::process_tl_access(tl_seq_item item, tl_channels_e channel, string ral_name); @@ -88,10 +583,10 @@ task aon_timer_scoreboard::process_tl_access(tl_seq_item item, tl_channels_e cha bit write = item.is_write(); uvm_reg_addr_t csr_addr = cfg.ral_models[ral_name].get_word_aligned_addr(item.a_addr); - bit addr_phase_read = (!write && channel == AddrChannel); - bit addr_phase_write = (write && channel == AddrChannel); - bit data_phase_read = (!write && channel == DataChannel); - bit data_phase_write = (write && channel == DataChannel); + bit addr_phase_read = (!write && channel == AddrChannel); + bit addr_phase_write = (write && channel == AddrChannel); + bit data_phase_read = (!write && channel == DataChannel); + bit data_phase_write = (write && channel == DataChannel); // if access was to a valid csr, get the csr handle if (csr_addr inside {cfg.ral_models[ral_name].csr_addrs}) begin @@ -104,7 +599,14 @@ task aon_timer_scoreboard::process_tl_access(tl_seq_item item, tl_channels_e cha // if incoming access is a write to a valid csr, then make updates right away if (addr_phase_write) begin - void'(csr.predict(.value(item.a_data), .kind(UVM_PREDICT_WRITE), .be(item.a_mask))); + // intr_state / wkup_cause are predicted in their own process + if(csr.get_name() == "intr_state" || csr.get_name() == "wkup_cause") + `uvm_info(`gfn, $sformatf("Write to %s", csr.get_name()), UVM_DEBUG) + else begin + // TODO: check the predict function return all OK for any other register to guarantee correct + // mirrored value prediction + void'(csr.predict(.value(item.a_data), .kind(UVM_PREDICT_WRITE), .be(item.a_mask))); + end if (cfg.en_cov) begin //Sample configuration coverage cov.timer_cfg_cg.sample(prescaler, bark_thold, bite_thold, @@ -112,17 +614,55 @@ task aon_timer_scoreboard::process_tl_access(tl_seq_item item, tl_channels_e cha end end + // Flag to let the model know if a current read is being carried out. Otherwise, the TB may + // think the actual RTL intr_state value is different from the value being reported back if the + // change happens right around the same time as the access + if (csr.get_name() == "intr_state" && !write) + ongoing_intr_state_read = 1; + else + ongoing_intr_state_read = 0; + // process the csr req // for write, update local variable and fifo at address phase // for read, update predication at address phase and compare at data phase case (csr.get_name()) // add individual case item for each csr "intr_state": begin + // Interrupt state reads are checked here do_read_check = 1'b0; - if (data_phase_write) begin - uint intr_state_val = item.a_data; - if (intr_state_val[WKUP]) intr_status_exp[WKUP] = 1'b0; - if (intr_state_val[WDOG]) intr_status_exp[WDOG] = 1'b0; + + if (addr_phase_write) begin + uint intr_state_val = item.a_mask[0] ? item.a_data : 0; + bit update_prediction; + if (intr_state_val[WKUP]) begin + intr_status_exp[WKUP] = 1'b0; + update_prediction = 1; + fork wkup_intr_predicted_values(intr_status_exp[WKUP]); join_none + end + if (intr_state_val[WDOG]) begin + intr_status_exp[WDOG] = 1'b0; + update_prediction = 1; + fork wdog_intr_predicted_values(intr_status_exp[WDOG]); join_none + end + + if (update_prediction) begin + uvm_reg_data_t predicted_intr_status; + predicted_intr_status[WDOG] = intr_status_exp[WDOG]; + predicted_intr_status[WKUP] = intr_status_exp[WKUP]; + `uvm_info(`gfn, $sformatf("Updated intr_status_exp = 0x%0x",intr_status_exp), + UVM_DEBUG) + fork predict_intr_state(predicted_intr_status); join_none + + // The timed registers predictions need to be independent of each other + if (intr_state_val[WDOG]) + update_timed_regs_wdog(); + if (intr_state_val[WKUP]) + update_timed_regs_wkup(); + end + end + else if (data_phase_read) begin + check_intr_state_bit(.timer_type(WKUP), .actual_value(item.d_data[WKUP])); + check_intr_state_bit(.timer_type(WDOG), .actual_value(item.d_data[WDOG])); end // INTR_EN register does not exists in AON timer because the interrupts are // enabled as long as timers are enabled. @@ -132,64 +672,106 @@ task aon_timer_scoreboard::process_tl_access(tl_seq_item item, tl_channels_e cha end end "wkup_ctrl": begin + `uvm_info(`gfn, "ACCESSING Wkup_ctrl", UVM_DEBUG) prescaler = get_reg_fld_mirror_value(ral, csr.get_name(), "prescaler"); wkup_en = get_reg_fld_mirror_value(ral, csr.get_name(), "enable"); if (data_phase_write) wkup_num_update_due = 1; end "wkup_cause": begin - wkup_cause = csr.get_mirrored_value(); - intr_status_exp[WKUP] = item.a_data; + if (data_phase_read) begin + // Check mirrored value matched + `DV_CHECK_EQ(`gmv(ral.wkup_cause), item.d_data[0], + $sformatf("reg name: %0s", csr.get_full_name())) + end else if (data_phase_write && item.a_mask[0] && item.a_data[0] == 0) begin + // Predict new wkup_cause after the write + fork + predict_wkup_cause(.wkup_cause(0), .wait_for_we(1)); + begin + ral.wkup_cause.wait_for_prediction(); + wkup_cause = 0; + end + join_none + end end "wkup_count_lo": begin wkup_count[31:0] = csr.get_mirrored_value(); - if (data_phase_write) wkup_num_update_due = 1; + if (data_phase_write) + fork update_wkup_count_lo_timely(); join_none end "wkup_count_hi": begin wkup_count[63:32] = csr.get_mirrored_value(); - if (data_phase_write) wkup_num_update_due = 1; + if (data_phase_write) + fork update_wkup_count_hi_timely(); join_none end "wkup_thold_lo": begin wkup_thold[31:0] = csr.get_mirrored_value(); - if (data_phase_write) wkup_num_update_due = 1; + `uvm_info(`gfn, $sformatf("Written wkup_thold[31:0]=0x%0x",wkup_thold[31:0]), UVM_DEBUG) + if (data_phase_write) + fork update_wkup_thold_lo_timely(); join_none end "wkup_thold_hi": begin wkup_thold[63:32] = csr.get_mirrored_value(); - if (data_phase_write) wkup_num_update_due = 1; + `uvm_info(`gfn, $sformatf("Written wkup_thold[63:22]=0x%0x",wkup_thold[63:22]), UVM_DEBUG) + if (data_phase_write) + fork update_wkup_thold_hi_timely(); join_none end "wdog_ctrl": begin + `uvm_info(`gfn, "Accessing Wdog_ctrl", UVM_DEBUG) wdog_en = get_reg_fld_mirror_value(ral, csr.get_name(), "enable"); wdog_pause_in_sleep = get_reg_fld_mirror_value(ral, csr.get_name(), "pause_in_sleep"); end "wdog_count": begin wdog_count = csr.get_mirrored_value(); - if (data_phase_write) wdog_num_update_due = 1; + if (data_phase_write) + fork update_wdog_count_timely(); join_none end "wdog_regwen": begin wdog_regwen = csr.get_mirrored_value(); end "wdog_bark_thold": begin bark_thold = csr.get_mirrored_value(); - if (data_phase_write) wdog_num_update_due = 1; + if (data_phase_write) + fork update_wdog_bark_thold_timely(); join_none end "wdog_bite_thold": begin bite_thold = csr.get_mirrored_value(); - if (data_phase_write) wdog_num_update_due = 1; + if (data_phase_write) + fork update_wdog_bite_thold_timely(); join_none end "intr_test": begin uint intr_test_val = item.a_data; - if (intr_test_val[WKUP]) intr_status_exp[WKUP] = 1'b1; - if (intr_test_val[WDOG]) intr_status_exp[WDOG] = 1'b1; - if (cfg.en_cov) begin - cov.intr_test_cg.sample(WKUP, intr_test_val[WKUP], - wkup_en, intr_status_exp[WKUP]); - cov.intr_test_cg.sample(WDOG, intr_test_val[WDOG], - wdog_en, intr_status_exp[WDOG]); - end - end + if (addr_phase_write) begin + // The timed registers predictions need to be independent of each other + if (intr_test_val[WKUP]) begin + intr_status_exp[WKUP] = 1'b1; + `uvm_info(`gfn, "Setting intr_status_exp[WKUP]", UVM_DEBUG) + update_timed_regs_wkup(); + fork wkup_intr_predicted_values(intr_status_exp[WKUP]); join_none + end + if (intr_test_val[WDOG]) begin + intr_status_exp[WDOG] = 1'b1; + `uvm_info(`gfn, "Setting intr_status_exp[WDOG]", UVM_DEBUG) + update_timed_regs_wdog(); + fork wdog_intr_predicted_values(intr_status_exp[WDOG]); join_none + end + + if (intr_test_val[WDOG] | intr_test_val[WKUP]) begin + `uvm_info(`gfn, "Updating intr_state due to intr_test write", UVM_DEBUG) + // The call to intr_state.busy() may block and hence we update when possible + fork predict_intr_state(intr_status_exp); join_none + end + if (cfg.en_cov) begin + cov.intr_test_cg.sample(WKUP, intr_test_val[WKUP], + wkup_en, intr_status_exp[WKUP]); + cov.intr_test_cg.sample(WDOG, intr_test_val[WDOG], + wdog_en, intr_status_exp[WDOG]); + end + end // if (addr_phase_write) + end // case: "intr_test" default: begin // No other special behaviour for writes end - endcase + endcase // case (csr.get_name()) // On reads, if do_read_check, is set, then check mirrored_value against item.d_data if (data_phase_read) begin @@ -201,8 +783,6 @@ task aon_timer_scoreboard::process_tl_access(tl_seq_item item, tl_channels_e cha end endtask : process_tl_access -// Task : check_interrupt -// wait for expected # of clocks and check for interrupt state reg and pin task aon_timer_scoreboard::check_interrupt(); forever begin wait (!under_reset); @@ -224,166 +804,420 @@ task aon_timer_scoreboard::check_interrupt(); endtask : check_interrupt task aon_timer_scoreboard::compute_num_clks(); - forever begin : compute_num_clks - // calculate number of clocks required to have interrupt from wkup - @(wkup_num_update_due or wdog_num_update_due); - wait(!under_reset); - if (wkup_num_update_due) begin - if (wkup_count <= wkup_thold) begin - wkup_num = ((wkup_thold - wkup_count + 1) * (prescaler + 1)); - end - else begin - wkup_num = 0; - end - `uvm_info(`gfn, $sformatf("Calculated WKUP_NUM: %d", wkup_num), UVM_HIGH) - end - if (wdog_num_update_due) begin - // calculate wdog bark - if (wdog_count < bark_thold) begin - wdog_bark_num = bark_thold - wdog_count; - end - else begin - wdog_bark_num = 0; - end - `uvm_info(`gfn, $sformatf("Calculated wdog_bark_num: %d", wdog_bark_num), UVM_HIGH) - if (wdog_count < bite_thold) begin - wdog_bite_num = bite_thold - wdog_count; - end - else begin - wdog_bite_num = 0; - end - `uvm_info(`gfn, $sformatf("Calculated wdog_bite_num: %d", wdog_bite_num), UVM_HIGH) + fork + begin : WKUP_thread + bit enable; + forever begin + @(wkup_num_update_due); + wait(!under_reset); + if (wkup_num_update_due) begin + wait_for_wkup_enable_matching(.enable(1)); + + if (wkup_count <= wkup_thold) begin + wkup_num = ((wkup_thold - wkup_count) * (prescaler + 1)); + end + else begin + wkup_num = 0; + end + `uvm_info(`gfn, $sformatf("Calculated WKUP_NUM: %d (wkup_count: %0d, wkup_thold = %0d", + wkup_num, wkup_count, wkup_thold), UVM_HIGH) + end + wkup_num_update_due = 0; + + `uvm_info(`gfn, "Triggering UVM event 'wkup_count_ev'", UVM_DEBUG) + wkup_count_ev.trigger(); + end // forever begin end - wkup_num_update_due = 0; - wdog_num_update_due = 0; - end // compute_num_clks + begin : WDOG_thread + // WDOG: + forever begin + @(wdog_num_update_due); + if (wdog_num_update_due) begin + // calculate wdog bark/bite + wait_for_wdog_enable_matching(.enable(1)); + + if (wdog_count < bark_thold) begin + wdog_bark_num = bark_thold - wdog_count; + end + else begin + wdog_bark_num = 0; + end + `uvm_info(`gfn, $sformatf("Calculated wdog_bark_num: %d", wdog_bark_num), UVM_HIGH) + + if (wdog_count < bite_thold) begin + wdog_bite_num = bite_thold - wdog_count; + end + else begin + wdog_bite_num = 0; + end + `uvm_info(`gfn, $sformatf("Calculated wdog_bite_num: %d", wdog_bite_num), UVM_HIGH) + end + wdog_num_update_due = 0; + `uvm_info(`gfn, "Triggering UVM event 'wdog_count_ev'", UVM_DEBUG) + wdog_count_ev.trigger(); + end // forever begin + end // block: WDOG_thread + join endtask : compute_num_clks task aon_timer_scoreboard::wait_for_sleep(); - wait ( !(wdog_pause_in_sleep & cfg.sleep_vif.sample_pin())); + // Using the RTL's internal synchronised signal to ensure we make the predictions in + // sync with the rtl + bit synchronised_sleep_mode_i; + do begin + synchronised_sleep_mode_i = hdl_read_bit(.path(".aon_sleep_mode")); + if ( (wdog_pause_in_sleep & synchronised_sleep_mode_i)) + cfg.aon_clk_rst_vif.wait_clks(1); + end while ( (wdog_pause_in_sleep & synchronised_sleep_mode_i)); endtask : wait_for_sleep +task aon_timer_scoreboard::collect_wkup_timer_coverage(ref event sample_coverage); + forever begin + @ (sample_coverage); + if (cfg.en_cov) begin + bit [63:0] rtl_count; + //reading RTL value since TB doesn't keep track of count + csr_rd(.ptr(ral.wkup_count_lo), .value(rtl_count[31:0]), .backdoor(1)); + csr_rd(.ptr(ral.wkup_count_lo), .value(rtl_count[63:32]), .backdoor(1)); + cov.wake_up_timer_thold_hit_cg.sample(intr_status_exp[WKUP], wkup_thold, rtl_count); + end + end +endtask : collect_wkup_timer_coverage + task aon_timer_scoreboard::run_wkup_timer(); event sample_coverage; + bit wkup_enabled; forever begin - wait (wkup_en); + wait_for_wkup_enable_matching(.enable(1)); + `uvm_info(`gfn, "WKUP ctrl.enable signal is set", UVM_DEBUG) + fork begin - forever begin - @(sample_coverage.triggered); - if (cfg.en_cov) begin - bit [63:0] rtl_count; - //reading RTL value since TB doesn't keep track of count - csr_rd(.ptr(ral.wkup_count_lo), .value(rtl_count[31:0]), .backdoor(1)); - csr_rd(.ptr(ral.wkup_count_lo), .value(rtl_count[63:32]), .backdoor(1)); - cov.wake_up_timer_thold_hit_cg.sample(intr_status_exp[WKUP], - wkup_thold, - rtl_count); - end - end + collect_wkup_timer_coverage(sample_coverage); end begin // trying to count how many cycles we need to count - uint count = 0; - // It takes 4 aon clks from the write enabling the watchdog to take effect due to the CDC - // logic. - cfg.aon_clk_rst_vif.wait_clks(4); + uint count = 0; + bit local_interrupt = 0; + bit local_intr_exp = 0; + bit cdc_reg_compared = 0; + bit [63:0] count_backdoor_value = 0; + bit [11:0] preloaded_prescaler_value; + + wkup_count_ev.wait_ptrigger(); + `uvm_info(`gfn, "Start WKUP timer UVM event 'wkup_count_ev' Received", UVM_DEBUG) + + `uvm_info(`gfn, $sformatf("WKUP: Start the count (count=%0d < wkup_num=%0d)", + count, wkup_num), UVM_DEBUG) while (count < wkup_num) begin cfg.aon_clk_rst_vif.wait_clks(1); // reset the cycle counter when we update the cycle count needed count = wkup_num_update_due ? 0 : (count + 1); - `uvm_info(`gfn, $sformatf("WKUP Timer count: %d", count), UVM_HIGH) + `uvm_info(`gfn, $sformatf("WKUP Timer count: %d (wkup_num=%0d)", + count, wkup_num), UVM_HIGH) -> sample_coverage; end + wait_for_wkup_enable_matching(.enable(1)); `uvm_info(`gfn, $sformatf("WKUP Timer expired check for interrupts"), UVM_HIGH) - intr_status_exp[WKUP] = 1'b1; + + // Using a local interrupt flag in case 'intr_status_exp[WKUP]' changes + // due to TL-UL accesses + local_interrupt = 1; + intr_status_exp[WKUP] = local_interrupt; + + wkup_cause = `gmv(ral.wkup_cause); + wkup_cause |= intr_status_exp[WKUP]; + `uvm_info(`gfn, $sformatf("Predicting wkup_cause = 0x%0x",wkup_cause), UVM_DEBUG) + fork predict_wkup_cause(.wkup_cause(wkup_cause), .wait_for_we(0)); join_none + + -> sample_coverage; + // TODO: fix after RTL fix (RTL currently doesn't reset the prescale_count_q when + // wkup_ctrl is written + if (! uvm_hdl_read({path_to_rtl,".u_core.prescale_count_q"}, preloaded_prescaler_value)) + `uvm_error (`gfn, "HDL Read from tb.dut.u_core.prescale_count_q") + // Interrupt should happen N+1 clock ticks after count == wkup_num. - cfg.aon_clk_rst_vif.wait_clks(prescaler+1); - // Wait for 2 extra cycles in AON clock domain to account for CDC randomization delay - // since wakeup interrupt is synchronized to AON clock domain - if (cfg.en_dv_cdc) begin - cfg.aon_clk_rst_vif.wait_clks(2); + // Oriignal delay: + // cfg.aon_clk_rst_vif.wait_clks(prescaler+1); + if (prescaler==0) begin + cfg.aon_clk_rst_vif.wait_clks(1); end + // TODO: applies to else_if/ese fix after RTL fix (RTL currently doesn't reset the + // prescale_count_q when wkup_ctrl is written + else if ((prescaler + 1) > preloaded_prescaler_value) begin + cfg.aon_clk_rst_vif.wait_clks((prescaler + 1 - preloaded_prescaler_value)); + end + else begin + bit wkup_incr; + bit [63:0] wkup_count, wkup_thold; + // Note: This is a bug/feature in the RTL. If the RTL was counting with + // wkup_ctrl.enable=1, prescaler = X, and by the time SW sends a wup_ctrl.enable=0 + // the internal prescaler counter `prescale_count_q` has a value of Y, if once we enable + // the wkup_ctrl.enable=1, the value written to wkup_ctrl.prescaler is lower than the + // value already in `prescaler_count_q`, the prescaler will have to overflow before the + // new prescaler value becomes effective + + // RTL's `prescale_count_q` may have a "pre-counted" value in it, and unless + // there's a reset, the interrupt won't propagate to the output until: + // prescale_count_q == reg2hw_i.wkup_ctrl.prescaler.q (In other words until + // wkup_incr is true + + // Read threshold, and count and see if we've incremented + do begin + csr_rd(.ptr(ral.wkup_count_lo), .value(wkup_count[31:0]), .backdoor(1)); + csr_rd(.ptr(ral.wkup_count_hi), .value(wkup_count[63:32]), .backdoor(1)); + + csr_rd(.ptr(ral.wkup_thold_lo), .value(wkup_thold[31:0]), .backdoor(1)); + csr_rd(.ptr(ral.wkup_thold_hi), .value(wkup_thold[63:32]), .backdoor(1)); + + if (wkup_count < wkup_thold) + cfg.aon_clk_rst_vif.wait_clks(1); + end while (wkup_count < wkup_thold); + + do begin + if (! uvm_hdl_read({path_to_rtl,".u_core.wkup_incr"},wkup_incr)) + `uvm_error (`gfn, "HDL Read from tb.dut.u_core.wkup_incr failed") + if (!wkup_incr) + cfg.aon_clk_rst_vif.wait_clks(1); + + end while(!wkup_incr); + // Extra delay to allow the interrupt to propagate + cfg.aon_clk_rst_vif.wait_clks(1); + end // else: !if((prescaler + 1) > preloaded_prescaler_value) + + // WKUP interrupt ('intr_status_exp[WKUP]') may have been cleared by the time we reach + // the AON delay, so we overwrite it and revert it to its original value after + // updating timed regs + local_interrupt = intr_status_exp[WKUP]; + intr_status_exp[WKUP] = 1; + + // Push the local_interrupt + fork wkup_intr_predicted_values(intr_status_exp[WKUP]); join_none + + local_intr_exp = intr_status_exp; + fork predict_intr_state(local_intr_exp); join_none + + update_timed_regs_wkup(); + // Restoring the value for 'intr_status_exp[WKUP]' + intr_status_exp[WKUP] = local_interrupt; + // Wait a further 5 clocks for the interrupt to propagate through logic in the clk domain // to become visible on the top-level pins. cfg.clk_rst_vif.wait_clks(5); - // Check interrupt pin - `DV_CHECK_CASE_EQ(intr_status_exp[WKUP], - cfg.intr_vif.sample_pin(.idx(WKUP))) + check_aon_domain_interrupt(.timer_type(WKUP)); + // Check wakeup pin - `DV_CHECK_CASE_EQ(1, + `DV_CHECK_CASE_EQ(ral.wkup_cause.predicting_value ? + intr_status_exp[WKUP] : `gmv(ral.wkup_cause), cfg.aon_intr_vif.sample_pin(.idx(1))) - `uvm_info(`gfn, $sformatf("WKUP Timer check passed."), UVM_HIGH) + + `uvm_info(`gfn, $sformatf("WKUP Timer checks passed."), UVM_HIGH) end begin wait (!wkup_en || !cfg.aon_clk_rst_vif.rst_n); - `uvm_info(`gfn, $sformatf("WKUP Timer disabled, quit scoring"), UVM_HIGH) wkup_en = 0; + wait_for_wkup_enable_matching(.enable(0)); + `uvm_info(`gfn, $sformatf("WKUP Timer disabled, quit scoring"), UVM_HIGH) end join_any disable fork; end endtask : run_wkup_timer +function aon_timer_intr_timed_regs::timed_reg_e aon_timer_scoreboard::timers_e2time_reg_e(timers_e + timer_type + ); + aon_timer_intr_timed_regs::timed_reg_e return_value; + + if (timer_type == WDOG) + return_value = aon_timer_intr_timed_regs::TimedIntrStateWdogBark; + else if (timer_type == WKUP) + return_value = aon_timer_intr_timed_regs::TimedIntrStateWkupExpired; + else + `uvm_fatal(`gfn, $sformatf("Wrong timer index passed (%s)",timer_type.name)) + + return return_value; +endfunction + +function void aon_timer_scoreboard::check_aon_domain_interrupt(timers_e timer_type); + bit cdc_reg_compared = 0; + aon_timer_intr_timed_regs::timed_reg_e reg_idx = timers_e2time_reg_e(timer_type); + + for(int i=0; i < timed_regs.timed[reg_idx].fields.size(); i++) begin + if (timed_regs.timed[reg_idx].fields[i].pred_valid) begin + `DV_CHECK_CASE_EQ(timed_regs.timed[reg_idx].fields[i].pred_latest.val_new, + cfg.intr_vif.sample_pin(.idx(timer_type))) + cdc_reg_compared = 1; + end + end + + if (!cdc_reg_compared) begin + `uvm_fatal(`gfn, $sformatf({"TB failed to compare sys-clock %s interrupt pin - ", + "likely due to the CDC timed register not having a prediction"}, + timer_type.name)) + end + `uvm_info(`gfn, $sformatf("'intr_%0s=0x%0x' comparison matched", + timer_type == WKUP ? "wkup_timer_expired_o" : "wdog_timer_bark_o", + cfg.intr_vif.sample_pin(.idx(timer_type))), UVM_DEBUG) +endfunction : check_aon_domain_interrupt + +function void aon_timer_scoreboard::return_pred_intr_q(timers_e timer_type, ref bit pred_q[$]); + if (timer_type == WDOG) + pred_q = predicted_wdog_intr_q; + else if (timer_type == WKUP) + pred_q = predicted_wkup_intr_q; + else + `uvm_fatal(`gfn, $sformatf("Wrong timer index passed (%s)",timer_type.name)) + + if (pred_q.size == 0) + `uvm_fatal(`gfn, {"'predicted_", timer_type == WKUP ? "wkup" : "wdog", "_intr_q.size = 0'"}) +endfunction + +function void aon_timer_scoreboard::check_intr_state_bit(timers_e timer_type, bit actual_value); + bit cdc_reg_compared = 0; + bit pred_q[$]; + + return_pred_intr_q(timer_type, pred_q); + `DV_CHECK_CASE_EQ(pred_q[0], // Comparing against the oldes predicted value + actual_value) + + `uvm_info(`gfn, $sformatf("'intr_state.%0s=0x%0x' comparison matched", + timer_type.name, actual_value), UVM_DEBUG) +endfunction : check_intr_state_bit + +task aon_timer_scoreboard::collect_wdog_bark_timer_coverage(ref event sample_coverage); + forever begin + @ (sample_coverage); + if (cfg.en_cov) begin + bit [31:0] rtl_count; + //reading RTL value since TB doesn't keep track of count + csr_rd(.ptr(ral.wdog_count), .value(rtl_count), .backdoor(1)); + cov.watchdog_timer_bark_thold_hit_cg.sample(intr_status_exp[WDOG], bark_thold, rtl_count); + end + end +endtask : collect_wdog_bark_timer_coverage task aon_timer_scoreboard::run_wdog_bark_timer(); event sample_coverage; + // Used as a flag when enable=0 at the same time the interrupt propagates + bit predicting_interrupt; forever begin - wait (wdog_en); + wait_for_wdog_enable_matching(.enable(1)); + `uvm_info(`gfn, "wdog_ctrol.WDOG_EN = 1, allow watchdog to count", UVM_DEBUG) fork begin - forever begin - @(sample_coverage.triggered); - if (cfg.en_cov) begin - bit [31:0] rtl_count; - //reading RTL value since TB doesn't keep track of count - csr_rd(.ptr(ral.wdog_count), .value(rtl_count), .backdoor(1)); - cov.watchdog_timer_bark_thold_hit_cg.sample(intr_status_exp[WDOG], - bark_thold, - rtl_count); - end - end + collect_wdog_bark_timer_coverage(sample_coverage); end begin // trying to count how many cycles we need to count uint count = 0; - // It takes 4 aon clks from the write enabling the watchdog to take effect due to the CDC - // logic. - cfg.aon_clk_rst_vif.wait_clks(4); + bit is_enabled = 0; + bit local_interrupt = 0; + bit cdc_reg_compared = 0; + bit [31:0] count_backdoor_value; + bit wkup_cause_updated; + // Used to ensure the correct value is passed to predict intr_state + bit [1:0] local_intr_exp; + bit [1:0] backdoor_intr_state; + + wdog_count_ev.wait_ptrigger(); + `uvm_info(`gfn, "Start WDOG - bark timer UVM event 'wdog_count_ev' Triggered", UVM_DEBUG) + + `uvm_info(`gfn, $sformatf("WDOG: Start the count (count=%0d < wdog_num=%0d)", + count, wdog_bark_num),UVM_DEBUG) + while (count < wdog_bark_num) begin wait_for_sleep(); cfg.aon_clk_rst_vif.wait_clks(1); // reset the cycle counter when we update the cycle count needed count = wdog_num_update_due ? 0 : (count + 1); - `uvm_info(`gfn, $sformatf("WDOG Bark Timer count: %d", count), UVM_HIGH) + `uvm_info(`gfn, $sformatf("WDOG Bark Timer count: %d (wdog_bark_num=%0d)", + count, wdog_bark_num), UVM_HIGH) -> sample_coverage; end + + wait_for_wdog_enable_matching(.enable(1)); `uvm_info(`gfn, $sformatf("WDOG Bark Timer expired check for interrupts"), UVM_HIGH) + `uvm_info(`gfn, "Setting 'intr_status_exp[WDOG]'", UVM_DEBUG) intr_status_exp[WDOG] = 1'b1; + + wkup_cause = `gmv(ral.wkup_cause); + wkup_cause |= intr_status_exp[WDOG]; + + + // Avoid blocking in case "is_busy==1" so the rest of the timely checks don't stall + fork predict_wkup_cause(.wkup_cause(wkup_cause), .wait_for_we(0)); join_none -> sample_coverage; // Propagation delay of one cycle from aon_core to interrupt pins. cfg.aon_clk_rst_vif.wait_clks(1); + // If the enable becomes unset at this time the prediction needs to finish + predicting_interrupt = 1; + + // Using flag to predict the interrupt + local_interrupt = intr_status_exp[WDOG]; + intr_status_exp[WDOG] = 1; + // If not using aux variable 'intr_status_exp' may update prior to the call to + // predict_intr_state and end up predicting wrong value + local_intr_exp = intr_status_exp; + fork wdog_intr_predicted_values(local_intr_exp[WDOG]); join_none + + fork predict_intr_state(local_intr_exp); join_none + + update_timed_regs_wdog(); + intr_status_exp[WDOG] = local_interrupt; + // Wait a further 5 clocks for the interrupt to propagate through logic in the clk domain // to become visible on the top-level pins. cfg.clk_rst_vif.wait_clks(5); - // Check interrupt and reset_req pins - `DV_CHECK_CASE_EQ(intr_status_exp[WDOG], - cfg.intr_vif.sample_pin(.idx(WDOG))) + // Check `intr_wdog_timer_bark_o` + check_aon_domain_interrupt(.timer_type(WDOG)); - // Check wakeup pin - `DV_CHECK_CASE_EQ(intr_status_exp[WDOG], - cfg.aon_intr_vif.sample_pin(.idx(1))) - `uvm_info(`gfn, - $sformatf("WDOG INTR Bark: %d", - intr_status_exp[WDOG]), - UVM_HIGH) + // It could happen intr_state reg was written betwen the time intr_status_exp[WDOG] was + // set until the output 'intr_wdog_timer_bark' was compared and set, if that's the case + // we set the variable to the value it should be + csr_rd(.ptr(ral.intr_state), .value(backdoor_intr_state), .backdoor(1)); + + if (backdoor_intr_state[WDOG] == 1 && intr_status_exp[WDOG]==0) begin + `uvm_info(`gfn, {"Tweaking 'intr_status_exp[WDOG]=1' due to a write", + "since the value was predicted"}, UVM_DEBUG) + intr_status_exp[WDOG] = 1; // Set again to ensure TB is in sync + end + // Reading actual enable to see if the output will be set + csr_rd(.ptr(ral.wdog_ctrl.enable), .value(is_enabled), .backdoor(1)); + // Check reset_req pins: + if (is_enabled) begin + bit predicted_wkup_req = (ral.wkup_cause.predicting_value==0) ? + `gmv(ral.wkup_cause) : wkup_cause; + // If the write to wkup_cause wasn't absorved in this same cycle, we compare against the + // prediction + if (last_wkup_cause_write_aon_clk_cycle != aon_clk_cycle) begin + `DV_CHECK_CASE_EQ(predicted_wkup_req, cfg.aon_intr_vif.sample_pin(.idx(1))) + end + else if (!(cfg.aon_intr_vif.sample_pin(.idx(1)) inside {0, predicted_wkup_req})) begin + // Otherwise, check whether the write to wkup_cause(0x0) unset the interrupt or + // whether it matches the predicted value + `uvm_fatal(`gfn, $sformatf("aon_wkup_req_o comparison failed (not matching 0/%0d)", + predicted_wkup_req)) + end + end // if (is_enabled) + else begin + // If disabled, wkup_output will be 0/1 depending on the value the flop ended up latching. + // Hence we don't compare + end + `uvm_info(`gfn,$sformatf("WDOG INTR Bark: %d", intr_status_exp[WDOG]), UVM_HIGH) + predicting_interrupt = 0; end begin - wait (!wdog_en || !cfg.aon_clk_rst_vif.rst_n); + wait (cfg.aon_clk_rst_vif.rst_n); + wait_for_wdog_enable_matching(.enable(0)); `uvm_info(`gfn, $sformatf("WDOG Timer disabled, quit scoring"), UVM_HIGH) + // Waiting a sys clock to see if the interrupt will propagate after the module + // just got disabled + cfg.clk_rst_vif.wait_clks(1); + wait (predicting_interrupt==0 || !cfg.aon_clk_rst_vif.rst_n); wdog_en = 0; end join_any @@ -391,36 +1225,66 @@ task aon_timer_scoreboard::run_wdog_bark_timer(); end endtask : run_wdog_bark_timer +task aon_timer_scoreboard::wait_for_wdog_enable_matching(bit enable); + bit local_enable; + do begin + csr_rd(.ptr(ral.wdog_ctrl.enable), .value(local_enable), .backdoor(1)); + `uvm_info(`gfn, $sformatf("[backdoor read] : WDOG_CTRL.enable = 0x%0x",local_enable), + UVM_DEBUG) + if (local_enable != enable) + cfg.aon_clk_rst_vif.wait_clks(1); + end + while(local_enable != enable); +endtask : wait_for_wdog_enable_matching + +task aon_timer_scoreboard::wait_for_wkup_enable_matching(bit enable); + bit local_enable; + do begin + csr_rd(.ptr(ral.wkup_ctrl.enable), .value(local_enable), .backdoor(1)); + `uvm_info(`gfn, $sformatf("[backdoor read] : WKUP_CTRL.enable = 0x%0x",local_enable), + UVM_DEBUG) + if (local_enable != enable) + cfg.aon_clk_rst_vif.wait_clks(1); + end + while(local_enable != enable); +endtask : wait_for_wkup_enable_matching + +task aon_timer_scoreboard::collect_wdog_bite_timer_coverage(ref event sample_coverage); + forever begin + @ (sample_coverage); + if (cfg.en_cov) begin + bit [31:0] rtl_count; + //reading RTL value since TB doesn't keep track of count + csr_rd(.ptr(ral.wdog_count), .value(rtl_count), .backdoor(1)); + cov.watchdog_timer_bite_thold_hit_cg.sample(wdog_rst_req_exp, bite_thold, rtl_count); + end + end +endtask : collect_wdog_bite_timer_coverage + task aon_timer_scoreboard::run_wdog_bite_timer(); event sample_coverage; forever begin - wait (wdog_en); + wait_for_wdog_enable_matching(.enable(1)); + `uvm_info(`gfn, "WDOG ctrl.enable signal is set", UVM_DEBUG) fork begin - forever begin - @(sample_coverage.triggered); - if (cfg.en_cov) begin - bit [31:0] rtl_count; - //reading RTL value since TB doesn't keep track of count - csr_rd(.ptr(ral.wdog_count), .value(rtl_count), .backdoor(1)); - cov.watchdog_timer_bite_thold_hit_cg.sample(wdog_rst_req_exp, - bite_thold, - rtl_count); - end - end + collect_wdog_bite_timer_coverage(sample_coverage); end begin // trying to count how many cycles we need to count uint count = 0; - // It takes 4 aon clks from the write enabling the watchdog to take effect due to the CDC - // logic. - cfg.aon_clk_rst_vif.wait_clks(4); + + wdog_count_ev.wait_ptrigger(); + `uvm_info(`gfn, "Start WDOG - bite timer UVM event 'wdog_count_ev' Received", UVM_DEBUG) + `uvm_info(`gfn, "Start WDOG - bark Start to count", UVM_DEBUG) while (count < wdog_bite_num) begin wait_for_sleep(); cfg.aon_clk_rst_vif.wait_clks(1); // reset the cycle counter when we update the cycle count needed + // OG: count = wdog_num_update_due ? 0 : (count + 1); - `uvm_info(`gfn, $sformatf("WDOG Bite Timer count: %d", count), UVM_HIGH) + `uvm_info(`gfn, $sformatf("WDOG Bite Timer count: %d, wdog_bite_num: %0d", + count, wdog_bite_num), UVM_HIGH) -> sample_coverage; end `uvm_info(`gfn, $sformatf("WDOG Bite Timer expired check for interrupts"), UVM_HIGH) @@ -435,18 +1299,31 @@ task aon_timer_scoreboard::run_wdog_bite_timer(); // Check reset_req pin `DV_CHECK_CASE_EQ(wdog_rst_req_exp, cfg.aon_intr_vif.sample_pin(.idx(0))) - - `uvm_info(`gfn, - $sformatf("WDOG INTR Bite: %d", - wdog_rst_req_exp), - UVM_HIGH) + `uvm_info(`gfn,$sformatf("WDOG INTR Bite: %d", wdog_rst_req_exp), UVM_HIGH) end begin wait (!wdog_en || !cfg.aon_clk_rst_vif.rst_n); - `uvm_info(`gfn, $sformatf("WDOG Timer disabled, quit scoring"), UVM_HIGH) wdog_en = 0; + + wait_for_wdog_enable_matching(.enable(0)); + `uvm_info(`gfn, $sformatf("WDOG Timer disabled, quit scoring"), UVM_HIGH) end join_any disable fork; end endtask : run_wdog_bite_timer + +function void aon_timer_scoreboard::reset(string kind = "HARD"); + super.reset(kind); + // reset local fifos queues and variables + `uvm_info(`gfn, "Resetting scoreboard", UVM_DEBUG) + intr_status_exp = 0; + wkup_cause = 0; + + ongoing_intr_state_read = 0; + predicted_wkup_intr_q = {}; + predicted_wdog_intr_q = {}; + // the register is initialised to 0x0 + predicted_wdog_intr_q.push_back(0); + predicted_wkup_intr_q.push_back(0); +endfunction : reset