diff --git a/vendor/lowrisc_ibex.lock.hjson b/vendor/lowrisc_ibex.lock.hjson
index f6cdd2aa..57b3fb6a 100644
--- a/vendor/lowrisc_ibex.lock.hjson
+++ b/vendor/lowrisc_ibex.lock.hjson
@@ -9,6 +9,6 @@
upstream:
{
url: https://github.com/lowRISC/ibex.git
- rev: 7139313ad3964353898ed446105f13916925a1ed
+ rev: df88055aa38868451ce90d7fcc2bbf172a2dc8a0
}
}
diff --git a/vendor/lowrisc_ibex/.github/ISSUE_TEMPLATE/bug.md b/vendor/lowrisc_ibex/.github/ISSUE_TEMPLATE/bug.md
deleted file mode 100644
index 50de765b..00000000
--- a/vendor/lowrisc_ibex/.github/ISSUE_TEMPLATE/bug.md
+++ /dev/null
@@ -1,57 +0,0 @@
----
-name: Report a bug in Ibex
-title: Report a bug in Ibex
-about: Have you found a bug in Ibex or associated tooling?
-labels: Type:Bug
----
-
-
-
-## Observed Behavior
-
-
-## Expected Behavior
-
-
-## Steps to reproduce the issue
-
-
-## My Environment
-
-
-
-**EDA tool and version:**
-
-
-**Operating system:**
-
-
-**Version of the Ibex source code:**
-
diff --git a/vendor/lowrisc_ibex/.github/ISSUE_TEMPLATE/question.md b/vendor/lowrisc_ibex/.github/ISSUE_TEMPLATE/question.md
deleted file mode 100644
index 116a4731..00000000
--- a/vendor/lowrisc_ibex/.github/ISSUE_TEMPLATE/question.md
+++ /dev/null
@@ -1,45 +0,0 @@
----
-name: Ask a question related to Ibex
-title: Ask a question related to Ibex
-about: Do you have a question about (the use of) Ibex?
-labels: Type:Question
----
-
-
-
-
-
-
-## My Environment
-
-
-
-**EDA tool and version:**
-
-
-**Operating system:**
-
-
-**Version of the Ibex source code:**
-
diff --git a/vendor/lowrisc_ibex/.github/workflows/pr_lint_review.yml b/vendor/lowrisc_ibex/.github/workflows/pr_lint_review.yml
deleted file mode 100644
index 513e3874..00000000
--- a/vendor/lowrisc_ibex/.github/workflows/pr_lint_review.yml
+++ /dev/null
@@ -1,49 +0,0 @@
-# Copyright lowRISC contributors.
-# Licensed under the Apache License, Version 2.0, see LICENSE for details.
-# SPDX-License-Identifier: Apache-2.0
-name: pr-lint-review
-
-on:
- workflow_run:
- workflows: ["pr-trigger"]
- types:
- - completed
-
-jobs:
- review_triggered:
- runs-on: ubuntu-latest
- steps:
- - uses: actions/checkout@v2
-
- # this workflow does not run in a PR context
- # download 'event.json' file from a PR-tiggered workflow
- # to mock the PR context and make a review
- - name: 'Download artifact'
- id: get-artifacts
- uses: actions/github-script@v3.1.0
- with:
- script: |
- var artifacts = await github.actions.listWorkflowRunArtifacts({
- owner: context.repo.owner,
- repo: context.repo.repo,
- run_id: ${{github.event.workflow_run.id }},
- });
- var matchArtifact = artifacts.data.artifacts.filter((artifact) => {
- return artifact.name == "event.json"
- })[0];
- var download = await github.actions.downloadArtifact({
- owner: context.repo.owner,
- repo: context.repo.repo,
- artifact_id: matchArtifact.id,
- archive_format: 'zip',
- });
- var fs = require('fs');
- fs.writeFileSync('${{github.workspace}}/event.json.zip', Buffer.from(download.data));
- - run: |
- unzip event.json.zip
- - name: Run Verible linter action
- uses: chipsalliance/verible-linter-action@main
- with:
- github_token: ${{ secrets.GITHUB_TOKEN }}
- suggest_fixes: 'false'
- config_file: 'vendor/lowrisc_ip/lint/tools/veriblelint/lowrisc-styleguide.rules.verible_lint'
diff --git a/vendor/lowrisc_ibex/.github/workflows/pr_trigger.yml b/vendor/lowrisc_ibex/.github/workflows/pr_trigger.yml
deleted file mode 100644
index b969698b..00000000
--- a/vendor/lowrisc_ibex/.github/workflows/pr_trigger.yml
+++ /dev/null
@@ -1,25 +0,0 @@
-# Copyright lowRISC contributors.
-# Licensed under the Apache License, Version 2.0, see LICENSE for details.
-# SPDX-License-Identifier: Apache-2.0
-name: pr-trigger
-
-on:
- pull_request:
-
-jobs:
- upload:
- runs-on: ubuntu-latest
- steps:
- - uses: actions/checkout@v2
- - name: Copy event file
- run: cp "$GITHUB_EVENT_PATH" ./event.json
-
- # If this workflow is triggered by a PR from a fork
- # it won't have sufficient access rights to make a review
- # so we just save the file needed to do the review
- # in a context with proper access rights
- - name: Upload event file as artifact
- uses: actions/upload-artifact@v2
- with:
- name: event.json
- path: event.json
diff --git a/vendor/lowrisc_ibex/CREDITS.md b/vendor/lowrisc_ibex/CREDITS.md
index f98e6900..c1d249a2 100644
--- a/vendor/lowrisc_ibex/CREDITS.md
+++ b/vendor/lowrisc_ibex/CREDITS.md
@@ -13,34 +13,68 @@ in the form of source code, bug reports, testing, marketing, or any other form,
please feel free to open a pull request to get your name added to this file.
- Alex Bradbury
+- Andreas Kurth
- Andreas Traber
- Antonio Pullini
+- Bryan Cantrill
+- Canberk Topal
+- Cathal Minnock
+- Daniel Mlynek
+- Dawid Zimonczyk
- Eunchan Kim
+- Felix Yan
+- Flavian Solt
- Florian Zaruba
- Francesco Conti
+- Gary Guo
- Germain Haugou
- Greg Chadwick
+- Harry Callahan
+- Hai Hoang Dang
+- Henner Zeller
+- Hodjat Asghari Esfeden
- Igor Loi
- Ioannis Karageorgos
-- Markus Wegmann
- Ivan Ribeiro
+- Karol Gugala
+- Leon Woestenberg
+- Luís Marques
+- Marek Pikuła
+- Markus Wegmann
+- Marno van der Maas
- Matthias Baer
+- Mehmet Burak Aykenar
- Michael Gautschi
+- Michael Gielda
+- Michael Munday
+- Michael Platzer
- Michael Schaffner
- Nils Graf
- Noah Huesser
- Noam Gallmann
- Pasquale Davide Schiavone
+- Paul O'Keeffe
- Philipp Wagner
- Pirmin Vogel
+- Prajwala Puttappa
- Rahul Behl
- Rhys Thomas
- Renzo Andri
- Robert Schilling
+- Rupert Swarbick
+- Sam Elliott
- Scott Johnson
+- Stefan Mach
+- Stefan Tauner
- Stefan Wallentowitz
- Sven Stucki
- Tao Liu
- Tobias Wölfel
- Tom Roberts
+- Tudor Timi
- Udi Jonnalagadda
+- Vladimir Rozic
+- Yuichi Sugiyama
+- Yusef Karim
+- Zachary Snow
+- Zeeshan Rafique
diff --git a/vendor/lowrisc_ibex/ci/install-build-deps.sh b/vendor/lowrisc_ibex/ci/install-build-deps.sh
index 96e197ba..1da949ae 100755
--- a/vendor/lowrisc_ibex/ci/install-build-deps.sh
+++ b/vendor/lowrisc_ibex/ci/install-build-deps.sh
@@ -21,6 +21,10 @@ if [ "$(id -u)" -ne 0 ]; then
SUDO_CMD="sudo "
fi
+if [ -z "$GITHUB_ACTIONS" ]; then
+ GITHUB_PATH=/dev/null
+fi
+
case "$ID-$VERSION_ID" in
ubuntu-16.04|ubuntu-18.04|ubuntu-20.04)
# Curl must be available to get the repo key below.
@@ -57,12 +61,14 @@ case "$ID-$VERSION_ID" in
$SUDO_CMD chmod 777 /tools/riscv-isa-sim
$SUDO_CMD tar -C /tools/riscv-isa-sim -xvzf ibex-cosim-"$IBEX_COSIM_VERSION".tar.gz --strip-components=1
echo "##vso[task.prependpath]/tools/riscv-isa-sim/bin"
+ echo "/tools/riscv-isa-sim/bin" >> $GITHUB_PATH
wget https://storage.googleapis.com/verilator-builds/verilator-"$VERILATOR_VERSION".tar.gz
$SUDO_CMD mkdir -p /tools/verilator
$SUDO_CMD chmod 777 /tools/verilator
$SUDO_CMD tar -C /tools/verilator -xvzf verilator-"$VERILATOR_VERSION".tar.gz
echo "##vso[task.prependpath]/tools/verilator/$VERILATOR_VERSION/bin"
+ echo "/tools/verilator/$VERILATOR_VERSION/bin" >> $GITHUB_PATH
# Python dependencies
#
# Updating pip and setuptools is required to have these tools properly
@@ -81,6 +87,7 @@ case "$ID-$VERSION_ID" in
$SUDO_CMD mkdir -p /tools/verible && $SUDO_CMD chmod 777 /tools/verible
tar -C /tools/verible -xf verible.tar.gz --strip-components=1
echo "##vso[task.prependpath]/tools/verible/bin"
+ echo "/tools/verible/bin" >> $GITHUB_PATH
;;
*)
@@ -96,3 +103,4 @@ curl -Ls -o build/toolchain/rv32-toolchain.tar.xz "$TOOLCHAIN_URL"
$SUDO_CMD mkdir -p /tools/riscv && $SUDO_CMD chmod 777 /tools/riscv
tar -C /tools/riscv -xf build/toolchain/rv32-toolchain.tar.xz --strip-components=1
echo "##vso[task.prependpath]/tools/riscv/bin"
+echo "/tools/riscv/bin" >> $GITHUB_PATH
diff --git a/vendor/lowrisc_ibex/ci/run-cosim-test.sh b/vendor/lowrisc_ibex/ci/run-cosim-test.sh
index 254ca8bc..83e8b1d2 100755
--- a/vendor/lowrisc_ibex/ci/run-cosim-test.sh
+++ b/vendor/lowrisc_ibex/ci/run-cosim-test.sh
@@ -27,12 +27,14 @@ echo "Running $TEST_NAME with co-simulation"
build/lowrisc_ibex_ibex_simple_system_cosim_0/sim-verilator/Vibex_simple_system --meminit=ram,$TEST_ELF
if [ $? != 0 ]; then
echo "##vso[task.logissue type=error]Running % failed co-simulation testing"
+ echo "::error::Running % failed co-simulation testing"
exit 1
fi
grep 'FAILURE' ibex_simple_system.log
if [ $? != 1 ]; then
echo "##vso[task.logissue type=error]Failure seen in $TEST_NAME log"
+ echo "::error::Failure seen in $TEST_NAME log"
echo "Log contents:"
cat ibex_simple_system.log
exit 1
@@ -42,6 +44,7 @@ if [ $SKIP_PASS_CHECK != 1 ]; then
grep 'PASS' ibex_simple_system.log
if [ $? != 0 ]; then
echo "##vso[task.logissue type=error]No pass seen in $TEST_NAME log"
+ echo "::error::No pass seen in $TEST_NAME log"
echo "Log contents:"
cat ibex_simple_system.log
exit 1
diff --git a/vendor/lowrisc_ibex/ci/vars.env b/vendor/lowrisc_ibex/ci/vars.env
new file mode 100644
index 00000000..38c8edeb
--- /dev/null
+++ b/vendor/lowrisc_ibex/ci/vars.env
@@ -0,0 +1,15 @@
+# Copyright lowRISC contributors.
+# Licensed under the Apache License, Version 2.0, see LICENSE for details.
+# SPDX-License-Identifier: Apache-2.0
+
+# Pipeline variables, used by the public and private CI pipelines
+# Quote values to ensure they are parsed as string (version numbers might
+# end up as float otherwise).
+VERILATOR_VERSION=v4.104
+IBEX_COSIM_VERSION=15fbd568
+RISCV_TOOLCHAIN_TAR_VERSION=20220210-1
+RISCV_TOOLCHAIN_TAR_VARIANT=lowrisc-toolchain-gcc-rv32imcb
+RISCV_COMPLIANCE_GIT_VERSION=844c6660ef3f0d9b96957991109dfd80cc4938e2
+VERIBLE_VERSION=v0.0-2135-gb534c1fe
+# lowRISC-internal version numbers of Ibex-specific Spike builds.
+SPIKE_IBEX_VERSION=20220817-git-eccdcb15c3e51b4f7906c7b42fb824f24a4338a2
diff --git a/vendor/lowrisc_ibex/doc/03_reference/images/blockdiagram.svg b/vendor/lowrisc_ibex/doc/03_reference/images/blockdiagram.svg
index cee009a3..16924ccf 100644
--- a/vendor/lowrisc_ibex/doc/03_reference/images/blockdiagram.svg
+++ b/vendor/lowrisc_ibex/doc/03_reference/images/blockdiagram.svg
@@ -2,21 +2,21 @@
diff --git a/vendor/lowrisc_ibex/dv/uvm/common_project_cfg.hjson b/vendor/lowrisc_ibex/dv/uvm/common_project_cfg.hjson
index f91771b3..ef0499fe 100644
--- a/vendor/lowrisc_ibex/dv/uvm/common_project_cfg.hjson
+++ b/vendor/lowrisc_ibex/dv/uvm/common_project_cfg.hjson
@@ -5,6 +5,7 @@
project: ibex
// These keys are expected by dvsim.py, so we have to set them to something.
+ book: bogus.book.domain
doc_server: bogus.doc.server
results_server: bogus.results.server
results_html_name: report.html
diff --git a/vendor/lowrisc_ibex/dv/uvm/core_ibex/directed_tests/README.md b/vendor/lowrisc_ibex/dv/uvm/core_ibex/directed_tests/README.md
index b63b774e..cb82814d 100644
--- a/vendor/lowrisc_ibex/dv/uvm/core_ibex/directed_tests/README.md
+++ b/vendor/lowrisc_ibex/dv/uvm/core_ibex/directed_tests/README.md
@@ -5,7 +5,7 @@ This directory contains the custom directed tests as well as scripts and headers
Currently following open source test suites are vendored:
- [riscv-tests](https://github.com/riscv-software-src/riscv-tests)
- [riscv-arch-tests](https://github.com/riscv-non-isa/riscv-arch-test)
-- epmp-tests ([fork](https://github.com/Saad525/riscv-isa-sim) from an opensource [repo](https://github.com/joxie/riscv-isa-sim))
+- epmp-tests ([fork](https://github.com/lowRISC/riscv-isa-sim/tree/mseccfg_tests) from an opensource [repo](https://github.com/joxie/riscv-isa-sim))
## Generating test list
diff --git a/vendor/lowrisc_ibex/dv/uvm/core_ibex/scripts/directed_test_schema.py b/vendor/lowrisc_ibex/dv/uvm/core_ibex/scripts/directed_test_schema.py
index fdbdc754..7cd4faf7 100755
--- a/vendor/lowrisc_ibex/dv/uvm/core_ibex/scripts/directed_test_schema.py
+++ b/vendor/lowrisc_ibex/dv/uvm/core_ibex/scripts/directed_test_schema.py
@@ -62,7 +62,8 @@ class Config: # noqa
# DConfig.VALIDATORS #
##################################
- @pydantic.validator('ld_script', 'includes', pre=True)
+ @pydantic.field_validator('ld_script', 'includes', mode='before')
+ @classmethod
def _make_valid_paths(cls, v: Any) -> pathlib.Path:
return make_valid_pathlib_path(cls, v)
@@ -124,20 +125,20 @@ def yaml_file_must_exist(cls, v: pathlib.Path):
raise ValueError(f"Path object not found in filesystem : {v}")
return v
- @pydantic.root_validator()
- def test_config_must_exist(cls, values):
- """Check that if a test specifies a common config, it exists in the list available."""
- configs = [c.config for c in values.get('configs')]
- for test in values.get('tests'):
- if test.config not in configs:
+ @pydantic.model_validator(mode='after')
+ def test_config_must_exist(self):
+ """A test may only specify common configs in the available list."""
+ config_names = {c.config for c in self.configs}
+ for test in self.tests:
+ if test.config not in config_names:
raise ValueError(
f"Test '{test.test}' gave the config '{test.config}', but "
"this config does not exist in the file "
- f"'{values.get('yaml')}'. Configs detected : {configs} \n")
- return values
+ f"'{self.yaml}'. Configs detected : {self.configs} \n")
+ return self
- @pydantic.root_validator()
- def all_paths_must_exist(cls, values):
+ @pydantic.model_validator(mode='after')
+ def all_paths_must_exist(self):
"""Check that all fields specifying files exist on disk.
We need to check all fields recursively for pathlib.Path fields,
@@ -145,18 +146,20 @@ def all_paths_must_exist(cls, values):
"""
def check_model_path_fields_exist(model):
- for f in filter(lambda f: (f.type_ == pathlib.Path),
- model.__fields__.values()):
- p = validate_path_exists(getattr(model, f.name), values.get('yaml'))
- setattr(model, f.name, p)
+ for k, f in model.__fields__.items():
+ if f.annotation != pathlib.Path:
+ continue
- for c in values.get('configs'):
+ p = validate_path_exists(getattr(model, k), self.yaml)
+ setattr(model, k, p)
+
+ for c in self.configs:
check_model_path_fields_exist(c)
- for t in values.get('tests'):
+ for t in self.tests:
check_model_path_fields_exist(t)
- return values
+ return self
def import_model(directed_test_yaml: pathlib.Path) -> dict:
diff --git a/vendor/lowrisc_ibex/dv/uvm/core_ibex/scripts/ibex_cmd.py b/vendor/lowrisc_ibex/dv/uvm/core_ibex/scripts/ibex_cmd.py
index 3cf163e2..0d8c5346 100644
--- a/vendor/lowrisc_ibex/dv/uvm/core_ibex/scripts/ibex_cmd.py
+++ b/vendor/lowrisc_ibex/dv/uvm/core_ibex/scripts/ibex_cmd.py
@@ -150,13 +150,18 @@ def filter_tests_by_config(cfg: ibex_config.Config,
for test in test_list:
if "rtl_params" not in test:
- # We currently only exclude tests by mismatching 'rtl_params', so if
- # that key is missing then the test is accepted by default.
+ # We currently only exclude tests by mismatching 'rtl_params', so
+ # if that key is missing then the test is accepted by default.
filtered_test_list.append(test)
else:
param_dict = test['rtl_params']
assert isinstance(param_dict, dict)
for p, p_val in param_dict.items():
+ # Parameters are strings or ints, or lists of those two. Coerce
+ # to the latter to make the code below simpler.
+ if isinstance(p_val, str) or isinstance(p_val, int):
+ p_val = [p_val]
+
config_val = cfg.params.get(p, None)
# Throw an error if required RTL parameters in the testlist
@@ -171,11 +176,11 @@ def filter_tests_by_config(cfg: ibex_config.Config,
# bitmanipulation tests). If this is the case, the testlist
# will specify all legal enum values, check if any of them
# match the config.
- if ((isinstance(p_val, list) and (config_val not in p_val)) or
- (isinstance(p_val, str) and (config_val != p_val))):
+ if config_val not in p_val:
logger.warning(
- f"Rejecting test {test['test']}, 'rtl_params' specified "
- "not compatible with ibex_config")
+ f"Rejecting test: {test['test']}. It specifies "
+ f"rtl_params of {p_val}, which doesn't contain the "
+ f"expected '{config_val}'.")
break
# The test is accepted if we got this far
diff --git a/vendor/lowrisc_ibex/dv/uvm/core_ibex/scripts/report_lib/util.py b/vendor/lowrisc_ibex/dv/uvm/core_ibex/scripts/report_lib/util.py
index c94698d4..f6a00756 100644
--- a/vendor/lowrisc_ibex/dv/uvm/core_ibex/scripts/report_lib/util.py
+++ b/vendor/lowrisc_ibex/dv/uvm/core_ibex/scripts/report_lib/util.py
@@ -96,7 +96,7 @@ def parse_xcelium_cov_report(cov_report: str) -> Dict[str, Dict[str, Dict[str, i
metric_info.append((metric_info_match.group(1),
metric_info_match.group(2)))
- # Skip header seperator line
+ # Skip header separator line
metrics_start_line = line_no + 2
if metrics_start_line == -1:
diff --git a/vendor/lowrisc_ibex/dv/verilator/pcount/cpp/ibex_pcounts.cc b/vendor/lowrisc_ibex/dv/verilator/pcount/cpp/ibex_pcounts.cc
index 733dcf09..6924ee52 100644
--- a/vendor/lowrisc_ibex/dv/verilator/pcount/cpp/ibex_pcounts.cc
+++ b/vendor/lowrisc_ibex/dv/verilator/pcount/cpp/ibex_pcounts.cc
@@ -10,6 +10,7 @@
#include
extern "C" {
+extern unsigned int mhpmcounter_num();
extern unsigned long long mhpmcounter_get(int index);
}
@@ -32,24 +33,48 @@ const std::vector ibex_counter_names = {
"Multiply Wait",
"Divide Wait"};
+static bool has_hpm_counter(int index) {
+ // The "cycles" and "instructions retired" counters are special and always
+ // exist.
+ if (index == 0 || index == 2)
+ return true;
+
+ // The "NONE" counter is a placeholder. The space reserves an index that was
+ // once the "MTIME" CSR, but now is unused. Return false: there's no real HPM
+ // counter at index 1.
+ if (index == 1)
+ return false;
+
+ // Otherwise, a counter exists if the index is strictly less than
+ // the MHPMCounterNum parameter that got passed to the
+ // ibex_cs_registers module.
+ return index < mhpmcounter_num();
+}
+
std::string ibex_pcount_string(bool csv) {
- char seperator = csv ? ',' : ':';
+ char separator = csv ? ',' : ':';
std::string::size_type longest_name_length;
if (!csv) {
longest_name_length = 0;
- for (const std::string &counter_name : ibex_counter_names) {
- longest_name_length = std::max(longest_name_length, counter_name.length());
+ for (int i = 0; i < ibex_counter_names.size(); ++i) {
+ if (has_hpm_counter(i)) {
+ longest_name_length =
+ std::max(longest_name_length, ibex_counter_names[i].length());
+ }
}
- // Add 1 to always get at least once space after the seperator
+ // Add 1 to always get at least once space after the separator
longest_name_length++;
}
std::stringstream pcount_ss;
for (int i = 0; i < ibex_counter_names.size(); ++i) {
- pcount_ss << ibex_counter_names[i] << seperator;
+ if (!has_hpm_counter(i))
+ continue;
+
+ pcount_ss << ibex_counter_names[i] << separator;
if (!csv) {
int padding = longest_name_length - ibex_counter_names[i].length();
diff --git a/vendor/lowrisc_ibex/examples/simple_system/README.md b/vendor/lowrisc_ibex/examples/simple_system/README.md
index 4459e5a3..12ab72df 100644
--- a/vendor/lowrisc_ibex/examples/simple_system/README.md
+++ b/vendor/lowrisc_ibex/examples/simple_system/README.md
@@ -27,13 +27,20 @@ run stand-alone binaries. It contains:
## Building Simulation
-The Simple System simulator binary can be built via FuseSoC. From the Ibex
-repository root run:
+The Simple System simulator binary can be built via FuseSoC. This can be built
+with different configurations of Ibex, specified by parameters. To build the
+"small" configuration, run the following command from the Ibex repository root.
+
```
-fusesoc --cores-root=. run --target=sim --setup --build lowrisc:ibex:ibex_simple_system --RV32E=0 --RV32M=ibex_pkg::RV32MFast
+fusesoc --cores-root=. run --target=sim --setup --build \
+ lowrisc:ibex:ibex_simple_system $(util/ibex_config.py small fusesoc_opts)
```
+To see performance counters other than the total number of instructions
+executed, you will need to ask for a larger configuration. One possible example
+comes from replacing `small` in the command above with `opentitan`.
+
## Building Software
Simple System related software can be found in `examples/sw/simple_system`.
@@ -68,10 +75,17 @@ built as described above. Use
`./examples/sw/simple_system/hello_test/hello_test.elf` to run the `hello_test`
binary.
-Pass `-t` to get an FST trace of execution that can be viewed with
-[GTKWave](http://gtkwave.sourceforge.net/). If using the `hello_test`
-binary the simulator will halt itself, outputting some simulation
-statistics:
+Pass `-t` to get an FST/VCD trace of execution that can be viewed with
+[GTKWave](http://gtkwave.sourceforge.net/).
+
+By default a FST file is created in your current directory.
+
+To produce a VCD file, remove the Verilator flags `--trace-fst` and
+`-DVM_TRACE_FMT_FST` in ibex_simple_system.core before building the simulator
+binary.
+
+If using the `hello_test` binary the simulator will halt itself, outputting some
+simulation statistics:
```
Simulation statistics
diff --git a/vendor/lowrisc_ibex/examples/simple_system/rtl/ibex_simple_system.sv b/vendor/lowrisc_ibex/examples/simple_system/rtl/ibex_simple_system.sv
index a67687b1..ac74691d 100644
--- a/vendor/lowrisc_ibex/examples/simple_system/rtl/ibex_simple_system.sv
+++ b/vendor/lowrisc_ibex/examples/simple_system/rtl/ibex_simple_system.sv
@@ -210,9 +210,9 @@ module ibex_simple_system (
.clk_i (clk_sys),
.rst_ni (rst_sys_n),
- .test_en_i ('b0),
+ .test_en_i (1'b0),
.scan_rst_ni (1'b1),
- .ram_cfg_i ('b0),
+ .ram_cfg_i (prim_ram_1p_pkg::RAM_1P_CFG_DEFAULT),
.hart_id_i (32'b0),
// First instruction executed is at 0x0 + 0x80
@@ -249,7 +249,7 @@ module ibex_simple_system (
.scramble_nonce_i ('0),
.scramble_req_o (),
- .debug_req_i ('b0),
+ .debug_req_i (1'b0),
.crash_dump_o (),
.double_fault_seen_o (),
@@ -318,6 +318,12 @@ module ibex_simple_system (
.timer_intr_o (timer_irq)
);
+ export "DPI-C" function mhpmcounter_num;
+
+ function automatic int unsigned mhpmcounter_num();
+ return u_top.u_ibex_top.u_ibex_core.cs_registers_i.MHPMCounterNum;
+ endfunction
+
export "DPI-C" function mhpmcounter_get;
function automatic longint unsigned mhpmcounter_get(int index);
diff --git a/vendor/lowrisc_ibex/ibex_core.core b/vendor/lowrisc_ibex/ibex_core.core
index c228c186..a9fd400b 100644
--- a/vendor/lowrisc_ibex/ibex_core.core
+++ b/vendor/lowrisc_ibex/ibex_core.core
@@ -42,10 +42,6 @@ filesets:
files:
- lint/verilator_waiver.vlt: {file_type: vlt}
- files_lint_verible:
- files:
- - lint/verible_waiver.vbw: {file_type: veribleLintWaiver}
-
files_check_tool_requirements:
depend:
- lowrisc:tool:check_tool_requirements
@@ -170,7 +166,6 @@ targets:
default: &default_target
filesets:
- tool_verilator ? (files_lint_verilator)
- - tool_veriblelint ? (files_lint_verible)
- files_rtl
- files_check_tool_requirements
toplevel: ibex_core
diff --git a/vendor/lowrisc_ibex/ibex_top.core b/vendor/lowrisc_ibex/ibex_top.core
index 8adbce20..db33a72f 100644
--- a/vendor/lowrisc_ibex/ibex_top.core
+++ b/vendor/lowrisc_ibex/ibex_top.core
@@ -27,10 +27,6 @@ filesets:
files:
- lint/verilator_waiver.vlt: {file_type: vlt}
- files_lint_verible:
- files:
- - lint/verible_waiver.vbw: {file_type: veribleLintWaiver}
-
files_check_tool_requirements:
depend:
- lowrisc:tool:check_tool_requirements
@@ -155,7 +151,6 @@ targets:
default: &default_target
filesets:
- tool_verilator ? (files_lint_verilator)
- - tool_veriblelint ? (files_lint_verible)
- files_rtl
- files_check_tool_requirements
toplevel: ibex_top
diff --git a/vendor/lowrisc_ibex/lint/verible_waiver.vbw b/vendor/lowrisc_ibex/lint/verible_waiver.vbw
deleted file mode 100644
index e69de29b..00000000
diff --git a/vendor/lowrisc_ibex/lint/verilator_waiver.vlt b/vendor/lowrisc_ibex/lint/verilator_waiver.vlt
index b7c952ca..d03f9bcb 100644
--- a/vendor/lowrisc_ibex/lint/verilator_waiver.vlt
+++ b/vendor/lowrisc_ibex/lint/verilator_waiver.vlt
@@ -64,6 +64,13 @@ lint_off -rule UNUSED -file "*/rtl/ibex_compressed_decoder.sv" -match "*rst_ni*"
lint_off -rule UNUSED -file "*/rtl/ibex_decoder.sv" -match "*rst_ni*"
lint_off -rule UNUSED -file "*/rtl/ibex_branch_predict.sv" -match "*rst_ni*"
+// Don't worry about the fact that decoded_str and data_accessed appear to be
+// written by multiple processes that might race with each other. They can't
+// race with each other (everything is a descendent of the always_comb block),
+// but Verilator doesn't notice this.
+lint_off -rule MULTIDRIVEN -file "*/rtl/ibex_tracer.sv" -match "*decoded_str*"
+lint_off -rule MULTIDRIVEN -file "*/rtl/ibex_tracer.sv" -match "*data_accessed*"
+
// Temporary waivers until OpenTitan primitives are lint-clean
// https://github.com/lowRISC/opentitan/issues/2313
lint_off -file "*/lowrisc_prim_*/rtl/*.sv"
diff --git a/vendor/lowrisc_ibex/python-requirements.txt b/vendor/lowrisc_ibex/python-requirements.txt
index c47b58d0..d6b7fe13 100644
--- a/vendor/lowrisc_ibex/python-requirements.txt
+++ b/vendor/lowrisc_ibex/python-requirements.txt
@@ -8,6 +8,7 @@ git+https://github.com/lowRISC/edalize.git@ot
# Development version with OT-specific changes
git+https://github.com/lowRISC/fusesoc.git@ot
+packaging
pyyaml
mako
junit-xml
@@ -16,7 +17,7 @@ pathlib3x # Backports some useful features
typing-utils # Ditto
typeguard ~= 2.13
portalocker
-pydantic < 2.0
+pydantic
svg.py
# Needed by dvsim.py (not actually used in Ibex)
diff --git a/vendor/lowrisc_ibex/rtl/ibex_pmp.sv b/vendor/lowrisc_ibex/rtl/ibex_pmp.sv
index 74cb6a37..48c3a7ed 100644
--- a/vendor/lowrisc_ibex/rtl/ibex_pmp.sv
+++ b/vendor/lowrisc_ibex/rtl/ibex_pmp.sv
@@ -50,32 +50,17 @@ module ibex_pmp #(
// |
// \--> pmp_req_err_o
- // A wrapper function in which it is decided which form of permission check function gets called
- function automatic logic perm_check_wrapper(logic csr_pmp_mseccfg_mml,
- ibex_pkg::pmp_cfg_t csr_pmp_cfg,
- ibex_pkg::pmp_req_e pmp_req_type,
- ibex_pkg::priv_lvl_e priv_mode,
- logic permission_check);
- return csr_pmp_mseccfg_mml ? mml_perm_check(csr_pmp_cfg,
- pmp_req_type,
- priv_mode,
- permission_check) :
- orig_perm_check(csr_pmp_cfg.lock,
- priv_mode,
- permission_check);
- endfunction
-
// Compute permissions checks that apply when MSECCFG.MML is set. Added for Smepmp support.
- function automatic logic mml_perm_check(ibex_pkg::pmp_cfg_t csr_pmp_cfg,
+ function automatic logic mml_perm_check(ibex_pkg::pmp_cfg_t region_csr_pmp_cfg,
ibex_pkg::pmp_req_e pmp_req_type,
ibex_pkg::priv_lvl_e priv_mode,
logic permission_check);
logic result = 1'b0;
- logic unused_cfg = |csr_pmp_cfg.mode;
+ logic unused_cfg = |region_csr_pmp_cfg.mode;
- if (!csr_pmp_cfg.read && csr_pmp_cfg.write) begin
+ if (!region_csr_pmp_cfg.read && region_csr_pmp_cfg.write) begin
// Special-case shared regions where R = 0, W = 1
- unique case ({csr_pmp_cfg.lock, csr_pmp_cfg.exec})
+ unique case ({region_csr_pmp_cfg.lock, region_csr_pmp_cfg.exec})
// Read/write in M, read only in S/U
2'b00: result =
(pmp_req_type == PMP_ACC_READ) |
@@ -92,14 +77,15 @@ module ibex_pmp #(
default: ;
endcase
end else begin
- if (csr_pmp_cfg.read & csr_pmp_cfg.write & csr_pmp_cfg.exec & csr_pmp_cfg.lock) begin
+ if (region_csr_pmp_cfg.read & region_csr_pmp_cfg.write &
+ region_csr_pmp_cfg.exec & region_csr_pmp_cfg.lock) begin
// Special-case shared read only region when R = 1, W = 1, X = 1, L = 1
result = pmp_req_type == PMP_ACC_READ;
end else begin
// Otherwise use basic permission check. Permission is always denied if in S/U mode and
// L is set or if in M mode and L is unset.
result = permission_check &
- (priv_mode == PRIV_LVL_M ? csr_pmp_cfg.lock : ~csr_pmp_cfg.lock);
+ (priv_mode == PRIV_LVL_M ? region_csr_pmp_cfg.lock : ~region_csr_pmp_cfg.lock);
end
end
return result;
@@ -118,6 +104,21 @@ module ibex_pmp #(
permission_check;
endfunction
+ // A wrapper function in which it is decided which form of permission check function gets called
+ function automatic logic perm_check_wrapper(logic csr_pmp_mseccfg_mml,
+ ibex_pkg::pmp_cfg_t region_csr_pmp_cfg,
+ ibex_pkg::pmp_req_e pmp_req_type,
+ ibex_pkg::priv_lvl_e priv_mode,
+ logic permission_check);
+ return csr_pmp_mseccfg_mml ? mml_perm_check(region_csr_pmp_cfg,
+ pmp_req_type,
+ priv_mode,
+ permission_check) :
+ orig_perm_check(region_csr_pmp_cfg.lock,
+ priv_mode,
+ permission_check);
+ endfunction
+
// Access fault determination / prioritization
function automatic logic access_fault_check (logic csr_pmp_mseccfg_mmwp,
logic csr_pmp_mseccfg_mml,
diff --git a/vendor/lowrisc_ibex/rtl/ibex_tracer.sv b/vendor/lowrisc_ibex/rtl/ibex_tracer.sv
index b07b742c..f361ddb3 100644
--- a/vendor/lowrisc_ibex/rtl/ibex_tracer.sv
+++ b/vendor/lowrisc_ibex/rtl/ibex_tracer.sv
@@ -107,20 +107,9 @@ module ibex_tracer (
end
end
- function automatic void printbuffer_dumpline();
+ function automatic void printbuffer_dumpline(int fh);
string rvfi_insn_str;
- if (file_handle == 32'h0) begin
- string file_name_base = "trace_core";
- void'($value$plusargs("ibex_tracer_file_base=%s", file_name_base));
- $sformat(file_name, "%s_%h.log", file_name_base, hart_id_i);
-
- $display("%m: Writing execution trace to %s", file_name);
- file_handle = $fopen(file_name, "w");
- $fwrite(file_handle,
- "Time\tCycle\tPC\tInsn\tDecoded instruction\tRegister and memory contents\n");
- end
-
// Write compressed instructions as four hex digits (16 bit word), and
// uncompressed ones as 8 hex digits (32 bit words).
if (insn_is_compressed) begin
@@ -129,33 +118,33 @@ module ibex_tracer (
rvfi_insn_str = $sformatf("%h", rvfi_insn);
end
- $fwrite(file_handle, "%15t\t%d\t%h\t%s\t%s\t",
+ $fwrite(fh, "%15t\t%d\t%h\t%s\t%s\t",
$time, cycle, rvfi_pc_rdata, rvfi_insn_str, decoded_str);
if ((data_accessed & RS1) != 0) begin
- $fwrite(file_handle, " %s:0x%08x", reg_addr_to_str(rvfi_rs1_addr), rvfi_rs1_rdata);
+ $fwrite(fh, " %s:0x%08x", reg_addr_to_str(rvfi_rs1_addr), rvfi_rs1_rdata);
end
if ((data_accessed & RS2) != 0) begin
- $fwrite(file_handle, " %s:0x%08x", reg_addr_to_str(rvfi_rs2_addr), rvfi_rs2_rdata);
+ $fwrite(fh, " %s:0x%08x", reg_addr_to_str(rvfi_rs2_addr), rvfi_rs2_rdata);
end
if ((data_accessed & RS3) != 0) begin
- $fwrite(file_handle, " %s:0x%08x", reg_addr_to_str(rvfi_rs3_addr), rvfi_rs3_rdata);
+ $fwrite(fh, " %s:0x%08x", reg_addr_to_str(rvfi_rs3_addr), rvfi_rs3_rdata);
end
if ((data_accessed & RD) != 0) begin
- $fwrite(file_handle, " %s=0x%08x", reg_addr_to_str(rvfi_rd_addr), rvfi_rd_wdata);
+ $fwrite(fh, " %s=0x%08x", reg_addr_to_str(rvfi_rd_addr), rvfi_rd_wdata);
end
if ((data_accessed & MEM) != 0) begin
- $fwrite(file_handle, " PA:0x%08x", rvfi_mem_addr);
+ $fwrite(fh, " PA:0x%08x", rvfi_mem_addr);
if (rvfi_mem_rmask != 4'b0000) begin
- $fwrite(file_handle, " store:0x%08x", rvfi_mem_wdata);
+ $fwrite(fh, " store:0x%08x", rvfi_mem_wdata);
end
if (rvfi_mem_wmask != 4'b0000) begin
- $fwrite(file_handle, " load:0x%08x", rvfi_mem_rdata);
+ $fwrite(fh, " load:0x%08x", rvfi_mem_rdata);
end
end
- $fwrite(file_handle, "\n");
+ $fwrite(fh, "\n");
endfunction
@@ -747,14 +736,33 @@ module ibex_tracer (
// close output file for writing
final begin
if (file_handle != 32'h0) begin
- $fclose(file_handle);
+ // This dance with "fh" is a bit silly. Some versions of Verilator treat a call of $fclose(xx)
+ // as a blocking assignment to xx. They then complain about the mixture with that an the
+ // non-blocking assignment we use when opening the file. The bug is fixed with recent versions
+ // of Verilator, but this hack is probably worth it for now.
+ int fh = file_handle;
+ $fclose(fh);
end
end
// log execution
- always_ff @(posedge clk_i) begin
+ always @(posedge clk_i) begin
if (rvfi_valid && trace_log_enable) begin
- printbuffer_dumpline();
+
+ int fh = file_handle;
+
+ if (fh == 32'h0) begin
+ string file_name_base = "trace_core";
+ void'($value$plusargs("ibex_tracer_file_base=%s", file_name_base));
+ $sformat(file_name, "%s_%h.log", file_name_base, hart_id_i);
+
+ $display("%m: Writing execution trace to %s", file_name);
+ fh = $fopen(file_name, "w");
+ file_handle <= fh;
+ $fwrite(fh, "Time\tCycle\tPC\tInsn\tDecoded instruction\tRegister and memory contents\n");
+ end
+
+ printbuffer_dumpline(fh);
end
end
diff --git a/vendor/lowrisc_ibex/shared/rtl/sim/simulator_ctrl.sv b/vendor/lowrisc_ibex/shared/rtl/sim/simulator_ctrl.sv
index be4a04df..1e10b75c 100644
--- a/vendor/lowrisc_ibex/shared/rtl/sim/simulator_ctrl.sv
+++ b/vendor/lowrisc_ibex/shared/rtl/sim/simulator_ctrl.sv
@@ -42,7 +42,7 @@ module simulator_ctrl #(
localparam logic [7:0] SIM_CTRL_ADDR = 8'h2;
logic [7:0] ctrl_addr;
- logic [2:0] sim_finish = 3'b000;
+ logic [2:0] sim_finish;
integer log_fd;
diff --git a/vendor/lowrisc_ibex/tool_requirements.py b/vendor/lowrisc_ibex/tool_requirements.py
index bb691613..876206d2 100644
--- a/vendor/lowrisc_ibex/tool_requirements.py
+++ b/vendor/lowrisc_ibex/tool_requirements.py
@@ -5,7 +5,7 @@
# Version requirements for various tools. Checked by tooling (e.g. fusesoc),
# and inserted into the Sphinx-generated documentation.
__TOOL_REQUIREMENTS__ = {
- 'verilator': '4.028',
+ 'verilator': '4.104',
'edalize': '0.2.0',
'vcs': {
'min_version': '2020.03-SP2',
diff --git a/vendor/lowrisc_ibex/util/check_tool_requirements.py b/vendor/lowrisc_ibex/util/check_tool_requirements.py
index 08c51672..bc9e32f6 100755
--- a/vendor/lowrisc_ibex/util/check_tool_requirements.py
+++ b/vendor/lowrisc_ibex/util/check_tool_requirements.py
@@ -4,9 +4,10 @@
# SPDX-License-Identifier: Apache-2.0
import argparse
-from distutils.version import StrictVersion
+from importlib.metadata import version
import logging as log
import os
+from packaging.version import Version
import re
import shlex
import subprocess
@@ -156,7 +157,7 @@ def check(self):
'Failed to convert requirement to semantic version: {}'
.format(err))
try:
- min_sv = StrictVersion(min_semver)
+ min_sv = Version(min_semver)
except ValueError as err:
return (False,
'Bad semver inferred from required version ({}): {}'
@@ -174,7 +175,7 @@ def check(self):
'Failed to convert installed to semantic version: {}'
.format(err))
try:
- actual_sv = StrictVersion(actual_semver)
+ actual_sv = Version(actual_semver)
except ValueError as err:
return (False,
'Bad semver inferred from installed version ({}): {}'
@@ -212,7 +213,7 @@ class VeribleToolReq(ToolReq):
def to_semver(self, version, from_req):
# Drop the hash suffix and convert into version string that
- # is compatible with StrictVersion in check_version below.
+ # is compatible with Version in check_version below.
# Example: v0.0-808-g1e17daa -> 0.0.808
m = re.fullmatch(r'v([0-9]+)\.([0-9]+)-([0-9]+)-g[0-9a-f]+$', version)
if m is None:
@@ -237,7 +238,7 @@ def to_semver(self, version, from_req):
# already. A version always has the "2020.03" (year and month) part,
# and may also have an -SP and/or - suffix.
#
- # Since StrictVersion expects a 3 digit versioning scheme, we multiply
+ # Since Version expects a 3 digit versioning scheme, we multiply
# any SP number by 100, which should work as long as the patch version
# isn't greater than 99.
#
@@ -261,11 +262,11 @@ def to_semver(self, version, from_req):
class PyModuleToolReq(ToolReq):
'''A tool in a Python module (its version can be found by running pip)'''
- version_regex = re.compile(r'Version: (.*)')
-
- def _get_tool_cmd(self):
- return ['pip3', 'show', self.tool]
+ # For Python modules, use metadata directly instead of call into pip3, which
+ # may not always be available for some systems.
+ def get_version(self):
+ return version(self.tool)
def dict_to_tool_req(path, tool, raw):
'''Parse a dict (as read from Python) as a ToolReq
diff --git a/vendor/lowrisc_ibex/vendor/google_riscv-dv.lock.hjson b/vendor/lowrisc_ibex/vendor/google_riscv-dv.lock.hjson
index 033791af..b7d8d399 100644
--- a/vendor/lowrisc_ibex/vendor/google_riscv-dv.lock.hjson
+++ b/vendor/lowrisc_ibex/vendor/google_riscv-dv.lock.hjson
@@ -8,7 +8,7 @@
{
upstream:
{
- url: https://github.com/google/riscv-dv
- rev: 68ab8230c52ec66b393c04394aef4d6082ee53b4
+ url: https://github.com/chipsalliance/riscv-dv
+ rev: 71666ebacd69266b1abb7cdbad5e1897ce5884e6
}
}
diff --git a/vendor/lowrisc_ibex/vendor/google_riscv-dv.vendor.hjson b/vendor/lowrisc_ibex/vendor/google_riscv-dv.vendor.hjson
index a079e024..ea249598 100644
--- a/vendor/lowrisc_ibex/vendor/google_riscv-dv.vendor.hjson
+++ b/vendor/lowrisc_ibex/vendor/google_riscv-dv.vendor.hjson
@@ -6,7 +6,7 @@
target_dir: "google_riscv-dv",
upstream: {
- url: "https://github.com/google/riscv-dv",
+ url: "https://github.com/chipsalliance/riscv-dv",
rev: "master",
},
diff --git a/vendor/lowrisc_ibex/vendor/google_riscv-dv/.github/scripts/code_fixup.py b/vendor/lowrisc_ibex/vendor/google_riscv-dv/.github/scripts/code_fixup.py
new file mode 100644
index 00000000..cd982544
--- /dev/null
+++ b/vendor/lowrisc_ibex/vendor/google_riscv-dv/.github/scripts/code_fixup.py
@@ -0,0 +1,106 @@
+#!/usr/bin/env python3
+import argparse
+import re
+
+# =============================================================================
+
+class AssemblyLine:
+ """
+ Simple assembly line representation
+ """
+
+ RE_INSTR = re.compile(r"(?P\S+)\s+(?P.*)")
+
+ def __init__(self, text):
+ self.text = text
+ self.mnemonic = None
+ self.operands = None
+
+ # Strip label if any
+ if ":" in text:
+ text = text.split(":", maxsplit=1)[1]
+
+ # Strip comment if any
+ if "#" in text:
+ text = text.split("#", maxsplit=1)[0]
+
+ # Get instruction and operands
+ m = self.RE_INSTR.match(text.strip())
+ if m is not None:
+
+ if m.group("mnemonic")[0] == ".":
+ return
+
+ self.mnemonic = m.group("mnemonic").lower()
+ self.operands = [op.strip() for op in m.group("operands").split()]
+
+ def __str__(self):
+ return self.text
+
+# =============================================================================
+
+
+def main():
+ parser = argparse.ArgumentParser()
+ parser.add_argument(
+ "-i",
+ type=str,
+ required=True,
+ help="Input assembly file"
+ )
+ parser.add_argument(
+ "-o",
+ type=str,
+ required=True,
+ help="Output assembly file"
+ )
+
+ args = parser.parse_args()
+
+ max_nops = 10
+
+ # Read and parse
+ with open(args.i, "r") as fp:
+ inp_lines = [AssemblyLine(l) for l in fp.readlines()]
+
+ # Identify a delayed write instruction followed by another one which writes
+ # to the same register
+ out_lines = []
+ for i in range(len(inp_lines)):
+ line = inp_lines[i]
+ out_lines.append(line)
+
+ # Bypass
+ if not line.mnemonic:
+ continue
+
+ # Check if it is a delayed write. If not then bypass
+ is_delayed = line.mnemonic in ["div", "divu", "rem", "remu", "lw"]
+ if not is_delayed:
+ continue
+
+ # Get next 2 instructions
+ following = []
+ for j in range(i+1, len(inp_lines)):
+ if inp_lines[j].mnemonic is not None:
+ following.append(inp_lines[j])
+ if len(following) >= 2:
+ break
+
+ # If any of the instructions targets the same register insert NOPs
+ dst = line.operands[0]
+ for j, l in enumerate(following):
+ if l.operands and l.operands[0] == dst:
+ nops = max(0, max_nops - j)
+ for _ in range(nops):
+ out_lines.append(" " * 18 + "nop # FIXME: A fixup not to make VeeR cancel a delayed write\n")
+ break
+
+ # Write
+ with open(args.o, "w") as fp:
+ for l in out_lines:
+ fp.write(str(l))
+
+
+if __name__ == "__main__":
+ main()
diff --git a/vendor/lowrisc_ibex/vendor/google_riscv-dv/.github/scripts/parse_testlist.py b/vendor/lowrisc_ibex/vendor/google_riscv-dv/.github/scripts/parse_testlist.py
new file mode 100644
index 00000000..9b36c559
--- /dev/null
+++ b/vendor/lowrisc_ibex/vendor/google_riscv-dv/.github/scripts/parse_testlist.py
@@ -0,0 +1,26 @@
+import sys
+from json import dumps
+from yaml import load, Loader
+from typing import Generator
+
+
+def parse_yaml(path: str) -> Generator[str, None, None]:
+ with open(path, 'rb') as fd:
+ tests = load(fd, Loader=Loader)
+ for test in tests:
+ if 'import' in test:
+ import_path = test['import'].split('/', 1)[1]
+ yield from parse_yaml(import_path)
+ elif 'test' in test:
+ yield test['test']
+
+
+if __name__ == "__main__":
+ if len(sys.argv) == 2:
+ testlist = parse_yaml(f'target/{sys.argv[1]}/testlist.yaml')
+ else:
+ testlist = parse_yaml('yaml/base_testlist.yaml')
+ testlist = list(testlist)
+ # remove, will cause incomplete sim, need customized RTL
+ testlist.remove("riscv_csr_test")
+ print(dumps(testlist))
diff --git a/vendor/lowrisc_ibex/vendor/google_riscv-dv/.github/workflows/build-spike.yml b/vendor/lowrisc_ibex/vendor/google_riscv-dv/.github/workflows/build-spike.yml
new file mode 100644
index 00000000..442e825e
--- /dev/null
+++ b/vendor/lowrisc_ibex/vendor/google_riscv-dv/.github/workflows/build-spike.yml
@@ -0,0 +1,63 @@
+# https://github.com/chipsalliance/Cores-VeeR-EL2/blob/774510e43f5408ec2b818db8f865027bc9be97b8/.github/workflows/build-spike.yml
+
+name: Spike Build
+
+on:
+ workflow_call:
+
+jobs:
+ verilator:
+ name: Build Spike
+ runs-on: ubuntu-latest
+ env:
+ TOOL_NAME: spike
+ TOOL_VERSION: d70ea67d
+ DEBIAN_FRONTEND: "noninteractive"
+
+ steps:
+ - name: Setup Cache Metadata
+ id: cache_metadata
+ run: |
+ cache_date=$(date +"%Y_%m_%d")
+ cache_name=cache_${{ env.TOOL_NAME }}_${{ env.TOOL_VERSION }}
+ echo "Cache date: "$cache_date
+ echo "Cache name: "$cache_name
+ echo "cache_date=$cache_date" >> "$GITHUB_ENV"
+ echo "cache_name=$cache_name" >> "$GITHUB_ENV"
+
+ - name: Setup cache
+ uses: actions/cache@v3
+ id: cache
+ timeout-minutes: 60
+ with:
+ path: |
+ /opt/spike
+ /opt/spike/.cache
+ key: ${{ env.cache_name }}_${{ env.cache_date }}
+ restore-keys: ${{ env.cache_name }}_
+
+ - name: Install prerequisities
+ if: ${{ steps.cache.outputs.cache-hit != 'true' }}
+ run: |
+ sudo apt -qqy update && sudo apt -qqy --no-install-recommends install \
+ git build-essential cmake ccache device-tree-compiler
+
+ - name: Build Spike
+ if: ${{ steps.cache.outputs.cache-hit != 'true' }}
+ run: |
+ export CCACHE_DIR=/opt/spike/.cache
+ ccache --show-config | grep cache_dir
+ git clone https://github.com/riscv-software-src/riscv-isa-sim spike
+ export CC="ccache gcc"
+ export CXX="ccache g++"
+ pushd spike
+ git checkout ${{ env.TOOL_VERSION }}
+ mkdir build
+ cd build
+ ../configure --prefix=/opt/spike
+ make -j`nproc`
+ make install
+ popd
+ rm -rf /opt/spike/include # Remove include and lib to save space
+ rm -rf /opt/spike/lib
+
diff --git a/vendor/lowrisc_ibex/vendor/google_riscv-dv/.github/workflows/metrics-regress.yml b/vendor/lowrisc_ibex/vendor/google_riscv-dv/.github/workflows/metrics-regress.yml
deleted file mode 100644
index de92648e..00000000
--- a/vendor/lowrisc_ibex/vendor/google_riscv-dv/.github/workflows/metrics-regress.yml
+++ /dev/null
@@ -1,27 +0,0 @@
-# This is a basic workflow to help you get started with Actions
-
-name: metrics-regress
-
-# Controls when the action will run. Triggers the workflow on push or pull request
-# events but only for the master branch
-on:
- push:
- branches: [ master ]
-# pull_request_target:
-# branches: [ master ]
-
-# If you fork this repository, you must create a new Metrics project for your fork
-# and set the environment variable $METRICS_PROJECT_ID accordingly
-jobs:
- metrics-regression:
- runs-on: ubuntu-latest
- steps:
- - uses: actions/checkout@v2
- - run: ./scripts/metrics-regress.py $METRICS_REGRESSION_NAME $METRICS_PROJECT_ID
- env:
- METRICS_CI_TOKEN: ${{ secrets.METRICS_CI_TOKEN }}
- METRICS_REGRESSION_NAME: riscv-dv_regression
- METRICS_PROJECT_ID: ${{ secrets.METRICS_PROJECT_ID }}
- PR_NUMBER: ${{ github.event.pull_request.number }}
- shell: bash
-
diff --git a/vendor/lowrisc_ibex/vendor/google_riscv-dv/.github/workflows/run-tests.yml b/vendor/lowrisc_ibex/vendor/google_riscv-dv/.github/workflows/run-tests.yml
new file mode 100644
index 00000000..dd93d0cf
--- /dev/null
+++ b/vendor/lowrisc_ibex/vendor/google_riscv-dv/.github/workflows/run-tests.yml
@@ -0,0 +1,182 @@
+name: run-tests
+
+on:
+ push:
+ pull_request:
+
+env:
+ RISCV_TARGET: rv32imc
+
+jobs:
+ build-spike:
+ uses: ./.github/workflows/build-spike.yml
+
+ generate-config:
+ runs-on: ubuntu-latest
+ outputs:
+ test-types: ${{ steps.test-types.outputs.tests }}
+ hash: ${{ steps.hash.outputs.files-hash }}
+ steps:
+ - uses: actions/checkout@v4
+ - id: test-types
+ name: Prepare test types
+ run: |
+ python3 -m pip install pyyaml
+ echo "tests=$(python3 .github/scripts/parse_testlist.py $RISCV_TARGET)" | tee -a $GITHUB_OUTPUT
+ - id: hash
+ name: Prepare files' hash
+ run: |
+ echo "files-hash=$(sha256sum **/*.sv **/*.py **/*.yml **/*.yaml | cut -d\ -f1 | sha256sum | cut -d\ -f1)" | tee -a $GITHUB_OUTPUT
+
+
+ generate-code:
+ runs-on: [ self-hosted, Linux, X64, gcp-custom-runners ]
+ container: centos:8
+ needs: generate-config
+ strategy:
+ fail-fast: false
+ matrix:
+ test: ${{ fromJSON(needs.generate-config.outputs.test-types) }}
+ version: [ uvm ]
+ include:
+ - test: riscv_arithmetic_basic_test
+ version: pyflow
+ env:
+ GHA_EXTERNAL_DISK: additional-tools
+ CACHE_HASH: ${{ needs.generate-config.outputs.hash }}
+ steps:
+ - uses: actions/checkout@v3
+
+ - name: Setup Cache Metadata
+ id: cache_metadata
+ run: |
+ cache_code=cache_${{ matrix.test }}_${{ matrix.version }}
+ echo "cache_code=${cache_code}_${{ env.CACHE_HASH }}" | tee -a "$GITHUB_ENV"
+
+ - name: Cache Code
+ uses: actions/cache@v3
+ id: cache-code
+ timeout-minutes: 60
+ with:
+ path: test/asm_test
+ key: ${{ env.cache_code }}
+
+ - name: Prepare Environment
+ if: steps.cache-code.outputs.cache-hit != 'true'
+ run: _secret_environment
+
+ - name: Setup Python 3.9
+ if: steps.cache-code.outputs.cache-hit != 'true'
+ run: |
+ yum update -y
+ yum install -y python39
+ python3.9 -m pip install -r requirements.txt
+
+ - name: Generate UVM Tests
+ if: steps.cache-code.outputs.cache-hit != 'true' && matrix.version == 'uvm'
+ run: _secret_riscv
+ env:
+ RISCV_TEST: ${{ matrix.test }}
+ RISCV_TARGET: ${{ env.RISCV_TARGET }}
+
+ - name: Generate PyFlow Tests
+ if: steps.cache-code.outputs.cache-hit != 'true' && matrix.version == 'pyflow'
+ run: |
+ set -eo pipefail
+ python3 run.py --simulator pyflow \
+ --test ${{ matrix.test }} --iss spike \
+ --start_seed 999 --iterations 1 --batch_size 1 \
+ --isa $RISCV_TARGET --mabi ilp32 --steps gen -v -o test 2>&1 | tee test/generate.log
+
+ - name: Upload Artifacts
+ uses: actions/upload-artifact@v3
+ if: always()
+ with:
+ path: |
+ test/asm_test/*.S
+
+
+ run-tests:
+ runs-on: ubuntu-latest
+ needs: [ build-spike, generate-code, generate-config ]
+ strategy:
+ fail-fast: false
+ matrix:
+ test: ${{ fromJSON(needs.generate-config.outputs.test-types) }}
+ version:
+ - uvm
+ include:
+ - test: riscv_arithmetic_basic_test
+ version: pyflow
+ env:
+ TOOL_VERSION: d70ea67d
+ CACHE_HASH: ${{ needs.generate-config.outputs.hash }}
+
+ steps:
+ - uses: actions/checkout@v4
+
+ - name: Install dependencies
+ run: sudo apt-get -qqy update && sudo apt-get -qqy install gcc-riscv64-unknown-elf device-tree-compiler
+
+ - name: Setup python
+ # python dependencies cannot be properly downloaded with new versions of python
+ uses: actions/setup-python@v4
+ with:
+ python-version: '3.9'
+
+ - name: Install python dependencies
+ run: python3 -m pip install -r requirements.txt
+
+ - name: Setup Cache Metadata
+ id: cache_metadata
+ run: |
+ date=$(date +"%Y_%m_%d")
+ time=$(date +"%Y%m%d_%H%M%S_%N")
+ cache_spike_restore_key=cache_spike_
+ cache_spike_key=${cache_spike_restore_key}${{ env.TOOL_VERSION }}_${date}
+ cache_code=cache_${{ matrix.test }}_${{ matrix.version }}
+
+ echo "cache_spike_restore_key=$cache_spike_restore_key" | tee -a "$GITHUB_ENV"
+ echo "cache_spike_key=$cache_spike_key" | tee -a "$GITHUB_ENV"
+ echo "cache_code=${cache_code}_${{ env.CACHE_HASH }}" | tee -a "$GITHUB_ENV"
+
+ - name: Restore Spike cache
+ id: cache-spike-restore
+ uses: actions/cache/restore@v3
+ with:
+ path: |
+ /opt/spike
+ /opt/spike/.cache
+ key: ${{ env.cache_spike_key }}
+ restore-keys: ${{ env.cache_spike_restore_key }}
+
+ - name: Set variables
+ run: |
+ echo "RISCV_GCC=riscv64-unknown-elf-gcc" >> $GITHUB_ENV
+ echo "RISCV_OBJCOPY=riscv64-unknown-elf-objcopy" >> $GITHUB_ENV
+ echo "SPIKE_PATH=/opt/spike/bin" >> $GITHUB_ENV
+ echo "PYTHONPATH=pygen" >> $GITHUB_ENV
+
+ - name: Cache Code Restore
+ uses: actions/cache/restore@v3
+ id: cache-code-restore
+ timeout-minutes: 60
+ with:
+ path: test/asm_test
+ key: ${{ env.cache_code }}
+
+ - name: Run Tests
+ run: |
+ set -eo pipefail
+ python3 run.py --simulator pyflow \
+ --test ${{ matrix.test }} --iss spike --iss_timeout 60 \
+ --start_seed 999 --iterations 1 --batch_size 1 \
+ --isa $RISCV_TARGET --mabi ilp32 --steps gcc_compile,iss_sim -v -o test 2>&1 | tee -a test/generate.log
+
+ - name: Upload Artifacts
+ uses: actions/upload-artifact@v3
+ if: always()
+ with:
+ path: |
+ test/asm_test/*.log
+ test/*.log
diff --git a/vendor/lowrisc_ibex/vendor/google_riscv-dv/euvm/riscv/gen/riscv_asm_program_gen.d b/vendor/lowrisc_ibex/vendor/google_riscv-dv/euvm/riscv/gen/riscv_asm_program_gen.d
index 98324ac4..abfb638d 100644
--- a/vendor/lowrisc_ibex/vendor/google_riscv-dv/euvm/riscv/gen/riscv_asm_program_gen.d
+++ b/vendor/lowrisc_ibex/vendor/google_riscv-dv/euvm/riscv/gen/riscv_asm_program_gen.d
@@ -736,7 +736,7 @@ class riscv_asm_program_gen : uvm_object
}
// get a random double precision floating value
- ubvec!XLEN get_rand_dpf_value() {
+ ubvec!64 get_rand_dpf_value() {
ubvec!64 value;
int randint = urandom(0,6);
diff --git a/vendor/lowrisc_ibex/vendor/google_riscv-dv/euvm/riscv/gen/riscv_load_store_instr_lib.d b/vendor/lowrisc_ibex/vendor/google_riscv-dv/euvm/riscv/gen/riscv_load_store_instr_lib.d
index 4694ce4d..a5308ed2 100644
--- a/vendor/lowrisc_ibex/vendor/google_riscv-dv/euvm/riscv/gen/riscv_load_store_instr_lib.d
+++ b/vendor/lowrisc_ibex/vendor/google_riscv-dv/euvm/riscv/gen/riscv_load_store_instr_lib.d
@@ -670,11 +670,11 @@ class riscv_vector_load_store_instr_stream : riscv_mem_access_stream
add_mixed_instr(num_mixed_instr);
add_rs1_init_la_instr(rs1_reg, data_page_id, base);
if (address_mode == address_mode_e.STRIDED) {
- this.append_instr(get_init_gpr_instr(rs2_reg, toubvec!64(stride_byte_offset)));
+ this.append_instr(get_init_gpr_instr(rs2_reg, toubvec!XLEN(stride_byte_offset)));
}
else if (address_mode == address_mode_e.INDEXED) {
// TODO: Support different index address for each element
- add_init_vector_gpr_instr(vs2_reg, toubvec!64(index_addr));
+ add_init_vector_gpr_instr(vs2_reg, toubvec!XLEN(index_addr));
}
super.post_randomize();
}
diff --git a/vendor/lowrisc_ibex/vendor/google_riscv-dv/euvm/riscv/gen/riscv_page_table_entry.d b/vendor/lowrisc_ibex/vendor/google_riscv-dv/euvm/riscv/gen/riscv_page_table_entry.d
index cc1ae5de..308526d7 100644
--- a/vendor/lowrisc_ibex/vendor/google_riscv-dv/euvm/riscv/gen/riscv_page_table_entry.d
+++ b/vendor/lowrisc_ibex/vendor/google_riscv-dv/euvm/riscv/gen/riscv_page_table_entry.d
@@ -161,7 +161,7 @@ class riscv_page_table_entry(satp_mode_t MODE = satp_mode_t.SV39) : uvm_object
void pack_entry() {
switch (MODE) {
case satp_mode_t.SV32:
- bits = ppn1 ~ ppn0 ~ rsw ~ d ~ a ~ g ~ u ~ xwr ~ v;
+ bits = cast(ubvec!XLEN) (ppn1 ~ ppn0 ~ rsw ~ d ~ a ~ g ~ u ~ xwr ~ v);
break;
case satp_mode_t.SV39:
bits = cast(ubvec!XLEN) (rsvd ~ ppn2 ~ ppn1 ~ ppn0 ~ rsw ~ d ~ a ~ g ~ u ~ xwr ~ v);
diff --git a/vendor/lowrisc_ibex/vendor/google_riscv-dv/euvm/riscv/gen/riscv_privileged_common_seq.d b/vendor/lowrisc_ibex/vendor/google_riscv-dv/euvm/riscv/gen/riscv_privileged_common_seq.d
index 8b41a916..64e7a198 100644
--- a/vendor/lowrisc_ibex/vendor/google_riscv-dv/euvm/riscv/gen/riscv_privileged_common_seq.d
+++ b/vendor/lowrisc_ibex/vendor/google_riscv-dv/euvm/riscv/gen/riscv_privileged_common_seq.d
@@ -123,16 +123,18 @@ class riscv_privileged_common_seq : uvm_sequence!(uvm_sequence_item,uvm_sequence
mstatus.set_field("TW", cfg.set_mstatus_tw);
mstatus.set_field("FS", cfg.mstatus_fs);
mstatus.set_field("VS", cfg.mstatus_vs);
- if (!(canFind(supported_privileged_mode, privileged_mode_t.SUPERVISOR_MODE) && (XLEN != 32))) {
- mstatus.set_field("SXL", toubvec!2(0b00));
- }
- else if (XLEN == 64) {
- mstatus.set_field("SXL", toubvec!2(0b10));
- }
- if (!(canFind(supported_privileged_mode, privileged_mode_t.USER_MODE) && (XLEN != 32))) {
- mstatus.set_field("UXL", toubvec!2(0b00));
- } else if (XLEN == 64) {
- mstatus.set_field("UXL", toubvec!2(0b10));
+ if (XLEN != 32) {
+ if (!(canFind(supported_privileged_mode, privileged_mode_t.SUPERVISOR_MODE))) {
+ mstatus.set_field("SXL", toubvec!2(0b00));
+ }
+ else if (XLEN == 64) {
+ mstatus.set_field("SXL", toubvec!2(0b10));
+ }
+ if (!(canFind(supported_privileged_mode, privileged_mode_t.USER_MODE))) {
+ mstatus.set_field("UXL", toubvec!2(0b00));
+ } else if (XLEN == 64) {
+ mstatus.set_field("UXL", toubvec!2(0b10));
+ }
}
mstatus.set_field("XS", 0);
mstatus.set_field("SD", 0);
diff --git a/vendor/lowrisc_ibex/vendor/google_riscv-dv/pygen/pygen_src/test/.riscv_instr_base_test.py.swp b/vendor/lowrisc_ibex/vendor/google_riscv-dv/pygen/pygen_src/test/.riscv_instr_base_test.py.swp
deleted file mode 100644
index 1f8b7389..00000000
Binary files a/vendor/lowrisc_ibex/vendor/google_riscv-dv/pygen/pygen_src/test/.riscv_instr_base_test.py.swp and /dev/null differ
diff --git a/vendor/lowrisc_ibex/vendor/google_riscv-dv/requirements.txt b/vendor/lowrisc_ibex/vendor/google_riscv-dv/requirements.txt
index 49934ea5..6fbc7060 100644
--- a/vendor/lowrisc_ibex/vendor/google_riscv-dv/requirements.txt
+++ b/vendor/lowrisc_ibex/vendor/google_riscv-dv/requirements.txt
@@ -1,5 +1,5 @@
PyYAML
-bitstring
+bitstring==3.1.9
Sphinx
Pallets-Sphinx-Themes
sphinxcontrib-log-cabinet
diff --git a/vendor/lowrisc_ibex/vendor/google_riscv-dv/run.py b/vendor/lowrisc_ibex/vendor/google_riscv-dv/run.py
index 84c0eb6c..b6b8aa0a 100644
--- a/vendor/lowrisc_ibex/vendor/google_riscv-dv/run.py
+++ b/vendor/lowrisc_ibex/vendor/google_riscv-dv/run.py
@@ -139,6 +139,11 @@ def parse_iss_yaml(iss, iss_yaml, isa, setting_dir, debug_cmd):
"""
logging.info("Processing ISS setup file : {}".format(iss_yaml))
yaml_data = read_yaml(iss_yaml)
+
+ # Path to the "scripts" subdirectory
+ my_path = os.path.dirname(os.path.realpath(__file__))
+ scripts_dir = os.path.join(my_path, "scripts") # Search for matched ISS
+
# Search for matched ISS
for entry in yaml_data:
if entry['iss'] == iss:
@@ -161,6 +166,7 @@ def parse_iss_yaml(iss, iss_yaml, isa, setting_dir, debug_cmd):
cmd = re.sub("\", variant, cmd)
else:
cmd = re.sub("\", isa, cmd)
+ cmd = re.sub("\", scripts_dir, cmd)
return cmd
logging.error("Cannot find ISS {}".format(iss))
sys.exit(RET_FAIL)
@@ -662,7 +668,7 @@ def iss_sim(test_list, output_dir, iss_list, iss_yaml, iss_opts,
prefix = ("{}/asm_test/{}_{}".format(
output_dir, test['test'], i))
elf = prefix + ".o"
- log = ("{}/{}.{}.log".format(log_dir, test['test'], i))
+ log = ("{}/{}_{}.log".format(log_dir, test['test'], i))
cmd = get_iss_cmd(base_cmd, elf, log)
if 'iss_opts' in test:
cmd += ' '
diff --git a/vendor/lowrisc_ibex/vendor/google_riscv-dv/scripts/lib.py b/vendor/lowrisc_ibex/vendor/google_riscv-dv/scripts/lib.py
index 9acf6dcf..872752f0 100644
--- a/vendor/lowrisc_ibex/vendor/google_riscv-dv/scripts/lib.py
+++ b/vendor/lowrisc_ibex/vendor/google_riscv-dv/scripts/lib.py
@@ -109,6 +109,7 @@ def run_cmd(cmd, timeout_s=999, exit_on_error=1, check_return_code=True,
executable='/bin/bash',
universal_newlines=True,
start_new_session=True,
+ env=os.environ,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
except subprocess.CalledProcessError:
diff --git a/vendor/lowrisc_ibex/vendor/google_riscv-dv/scripts/renode_log_to_trace_csv.py b/vendor/lowrisc_ibex/vendor/google_riscv-dv/scripts/renode_log_to_trace_csv.py
new file mode 100644
index 00000000..cee4a29a
--- /dev/null
+++ b/vendor/lowrisc_ibex/vendor/google_riscv-dv/scripts/renode_log_to_trace_csv.py
@@ -0,0 +1,156 @@
+#!/usr/bin/env python3
+"""
+Converts Renode log to execution trace for RISC-V DV
+"""
+
+import argparse
+import os
+import re
+import sys
+import logging
+
+sys.path.insert(0, os.path.dirname(os.path.realpath(__file__)))
+
+from riscv_trace_csv import *
+from lib import *
+
+# =============================================================================
+
+GPR_NAMES = [
+ ("x0", "zero"),
+ ("x1", "ra"),
+ ("x2", "sp"),
+ ("x3", "gp"),
+ ("x4", "tp"),
+ ("x5", "t0"),
+ ("x6", "t1"),
+ ("x7", "t2"),
+ ("x8", "s0"),
+ ("x9", "s1"),
+ ("x10", "a0"),
+ ("x11", "a1"),
+ ("x12", "a2"),
+ ("x13", "a3"),
+ ("x14", "a4"),
+ ("x15", "a5"),
+ ("x16", "a6"),
+ ("x17", "a7"),
+ ("x18", "s2"),
+ ("x19", "s3"),
+ ("x20", "s4"),
+ ("x21", "s5"),
+ ("x22", "s6"),
+ ("x23", "s7"),
+ ("x24", "s8"),
+ ("x25", "s9"),
+ ("x26", "s10"),
+ ("x27", "s11"),
+ ("x28", "t3"),
+ ("x29", "t4"),
+ ("x30", "t5"),
+ ("x31", "t6"),
+]
+
+# =============================================================================
+
+
+def process_renode_sim_log(log_name, csv_name):
+ """
+ Converts a Renode trace log to CSV format
+ """
+
+ # Build lookups
+ gpr_to_name = {m[0]: m[1] for m in GPR_NAMES}
+ known_gpr = {m[0].upper() for m in GPR_NAMES}
+
+ # FIXME: We need a previous PC each time. Assume its value for the first
+ # entry.
+ prev_pc = "80000000"
+
+ # FIXME: Assume initial state of all GPR set to 0
+ state = {m[0].upper(): "0" for m in GPR_NAMES}
+ trace = []
+
+ with open(log_name, "r") as fp:
+ for line in fp:
+
+ line = line.strip()
+ if not line:
+ continue
+
+ # Skip non-regdump
+ if not line.startswith("REGDUMP:"):
+ continue
+
+ # Decode state
+ fields = line.replace("REGDUMP:", "").split(",")
+ regs = {fields[i]: fields[i+1] for i in range(0, len(fields), 2)}
+
+ # Compute state difference
+ diff = {r: regs[r] for r in known_gpr \
+ if r in state and r in regs and state[r] != regs[r]}
+ state = regs
+
+ # Format the entry
+ entry = RiscvInstructionTraceEntry()
+ entry.pc = prev_pc
+ entry.binary = "0"
+ entry.operand = ""
+ entry.mode = "0"
+
+ # GPRs
+ for x in range(32):
+ name = "X{}".format(x)
+ if name in diff:
+ lname = name.lower()
+ value = int(diff[name], 16)
+ entry.gpr.append("{}:{:08x}".format(gpr_to_name[lname], value))
+
+ # CSRs
+ # TODO:
+
+ # Add only if there is a GPR/CSR change
+ if entry.gpr or entry.csr:
+ trace.append(entry)
+
+ prev_pc = state["PC"]
+
+ return trace
+
+
+def write_csv(file_name, data):
+ """
+ Writes the trace to CSV
+ """
+
+ with open(file_name, "w") as fp:
+
+ writer = RiscvInstructionTraceCsv(fp)
+ writer.start_new_trace()
+
+ for entry in data:
+ writer.write_trace_entry(entry)
+
+# ============================================================================
+
+
+def main():
+ # Parse input arguments
+ parser = argparse.ArgumentParser()
+ parser.add_argument("--log", type=str, help="Input Renode simulation log")
+ parser.add_argument("--csv", type=str, help="Output trace CSV file")
+ parser.add_argument("-v", "--verbose", dest="verbose", action="store_true",
+ help="Verbose logging")
+ parser.set_defaults(verbose=False)
+
+ args = parser.parse_args()
+ setup_logging(args.verbose)
+
+ # Process Renode log
+ trace = process_renode_sim_log(args.log, args.csv)
+ # Write CSV
+ write_csv(args.csv, trace)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/vendor/lowrisc_ibex/vendor/google_riscv-dv/scripts/renode_wrapper.py b/vendor/lowrisc_ibex/vendor/google_riscv-dv/scripts/renode_wrapper.py
new file mode 100644
index 00000000..245de927
--- /dev/null
+++ b/vendor/lowrisc_ibex/vendor/google_riscv-dv/scripts/renode_wrapper.py
@@ -0,0 +1,106 @@
+#!/usr/bin/env python3
+import argparse
+import subprocess
+import os
+import tempfile
+
+# =============================================================================
+
+REPL_TEMPLATE = """
+memory: Memory.MappedMemory @ sysbus 0x80000000
+ size: 0x10000
+
+cpu: CPU.RiscV32 @ sysbus
+ cpuType: "{isa}"
+ timeProvider: clint
+ hartId: 0
+
+clint: IRQControllers.CoreLevelInterruptor @ sysbus 0x02000000
+ [0,1] -> cpu@[3,7]
+ frequency: 1000000
+"""
+
+RESC_TEMPLATE = """
+using sysbus
+mach create "riscv"
+machine LoadPlatformDescription @{repl}
+
+sysbus LoadELF @{elf}
+
+cpu MaximumBlockSize 1
+cpu SetHookAtBlockEnd "print('REGDUMP:' + ','.join(self.GetRegistersValues()))"
+
+emulation RunFor "0.000100"
+
+quit
+"""
+
+# =============================================================================
+
+
+def main():
+ """
+ The entry point
+ """
+
+ parser = argparse.ArgumentParser()
+
+ parser.add_argument(
+ "--renode",
+ type=str,
+ default="renode",
+ help="Path to Renode binary",
+ )
+ parser.add_argument(
+ "--log",
+ type=str,
+ default=None,
+ help="Output log file",
+ )
+ parser.add_argument(
+ "--isa",
+ type=str,
+ default="rv32i",
+ help="RISC-V ISA specification string",
+ )
+ parser.add_argument(
+ "--elf",
+ type=str,
+ required=True,
+ help="ELF file to run",
+ )
+
+ args = parser.parse_args()
+
+ with tempfile.TemporaryDirectory() as tmpdir:
+
+ repl = os.path.join(tmpdir, "riscv.repl")
+ resc = os.path.join(tmpdir, "riscv.resc")
+
+ params = {
+ "renode": args.renode,
+ "isa": args.isa,
+ "elf": args.elf,
+ "repl": repl,
+ "resc": resc,
+ "log": args.log,
+ }
+
+ # Render REPL template
+ with open(repl, "w") as fp:
+ fp.write(REPL_TEMPLATE.format(**params))
+
+ # Render RESC template
+ with open(resc, "w") as fp:
+ fp.write(RESC_TEMPLATE.format(**params))
+
+ # Launch Renode, capture output
+ cmd = "{renode} --console -p {resc}".format(**params)
+ if args.log is not None:
+ cmd += " &>{log}".format(**params)
+
+ subprocess.call(cmd, shell=True)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/vendor/lowrisc_ibex/vendor/google_riscv-dv/scripts/spike_log_to_trace_csv.py b/vendor/lowrisc_ibex/vendor/google_riscv-dv/scripts/spike_log_to_trace_csv.py
index 2272cc31..b6970bca 100644
--- a/vendor/lowrisc_ibex/vendor/google_riscv-dv/scripts/spike_log_to_trace_csv.py
+++ b/vendor/lowrisc_ibex/vendor/google_riscv-dv/scripts/spike_log_to_trace_csv.py
@@ -27,10 +27,12 @@
from riscv_trace_csv import *
from lib import *
-RD_RE = re.compile(r"(core\s+\d+:\s+)?(?P\d) 0x(?P[a-f0-9]+?) " \
- "\((?P.*?)\) (?P[xf]\s*\d*?) 0x(?P[a-f0-9]+)")
+RD_RE = re.compile(
+ r"(core\s+\d+:\s+)?(?P\d)\s+0x(?P[a-f0-9]+?)\s+" \
+ r"\((?P.*?)\)\s+(?P[xf]\s*\d*?)\s+0x(?P[a-f0-9]+)" \
+ r"(\s+(?P\S+)\s+0x(?P[a-f0-9]+))?")
CORE_RE = re.compile(
- r"core\s+\d+:\s+0x(?P[a-f0-9]+?) \(0x(?P.*?)\) (?P.*?)$")
+ r"core\s+\d+:\s+0x(?P[a-f0-9]+?)\s+\(0x(?P.*?)\)\s+(?P.*?)$")
ADDR_RE = re.compile(
r"(?P[a-z0-9]+?),(?P[\-0-9]+?)\((?P[a-z0-9]+)\)")
ILLE_RE = re.compile(r"trap_illegal_instruction")
@@ -173,9 +175,13 @@ def read_spike_trace(path, full_trace):
# the --log-commits Spike option)?
commit_match = RD_RE.match(line)
if commit_match:
- instr.gpr.append(gpr_to_abi(commit_match.group('reg')
- .replace(' ', '')) +
- ':' + commit_match.group('val'))
+ groups = commit_match.groupdict()
+ instr.gpr.append(gpr_to_abi(groups["reg"].replace(' ', '')) +
+ ":" + groups["val"])
+
+ if groups["csr"] and groups["csr_val"]:
+ instr.csr.append(groups["csr"] + ":" + groups["csr_val"])
+
instr.mode = commit_match.group('pri')
# At EOF, we might have an instruction in hand. Yield it if so.
diff --git a/vendor/lowrisc_ibex/vendor/google_riscv-dv/scripts/whisper_log_trace_csv.py b/vendor/lowrisc_ibex/vendor/google_riscv-dv/scripts/whisper_log_trace_csv.py
index cd509bee..c0ffb89e 100644
--- a/vendor/lowrisc_ibex/vendor/google_riscv-dv/scripts/whisper_log_trace_csv.py
+++ b/vendor/lowrisc_ibex/vendor/google_riscv-dv/scripts/whisper_log_trace_csv.py
@@ -60,6 +60,7 @@ def process_whisper_sim_log(whisper_log, csv, full_trace=0):
whisper_instr = m.group("instr").replace("\. + ", "")
whisper_instr = whisper_instr.replace("\. - ", "-")
rv_instr_trace = RiscvInstructionTraceEntry()
+ rv_instr_trace.pc = m.group("pc")
rv_instr_trace.instr_str = whisper_instr
rv_instr_trace.binary = m.group("bin")
reg = "x" + str(int(m.group("reg"), 16))
diff --git a/vendor/lowrisc_ibex/vendor/google_riscv-dv/src/isa/riscv_csr_instr.sv b/vendor/lowrisc_ibex/vendor/google_riscv-dv/src/isa/riscv_csr_instr.sv
index 14a0b247..35d98fe4 100644
--- a/vendor/lowrisc_ibex/vendor/google_riscv-dv/src/isa/riscv_csr_instr.sv
+++ b/vendor/lowrisc_ibex/vendor/google_riscv-dv/src/isa/riscv_csr_instr.sv
@@ -114,7 +114,7 @@ class riscv_csr_instr extends riscv_instr;
foreach (initial_csrs[r]) begin
if (!(initial_csrs[r] inside {remove_csr})) begin
- include_write_reg.push_back(initial_csrs[r]);
+ include_write_reg.push_back(privileged_reg_t'(initial_csrs[r]));
end
end
diff --git a/vendor/lowrisc_ibex/vendor/google_riscv-dv/src/riscv_page_table_list.sv b/vendor/lowrisc_ibex/vendor/google_riscv-dv/src/riscv_page_table_list.sv
index 0bf6a617..a6935e0f 100644
--- a/vendor/lowrisc_ibex/vendor/google_riscv-dv/src/riscv_page_table_list.sv
+++ b/vendor/lowrisc_ibex/vendor/google_riscv-dv/src/riscv_page_table_list.sv
@@ -186,6 +186,9 @@ class riscv_page_table_list#(satp_mode_t MODE = SV39) extends uvm_object;
$cast(valid_data_leaf_pte, valid_leaf_pte.clone());
illegal_pte.turn_off_default_constraint();
valid_link_pte.xwr = NEXT_LEVEL_PAGE;
+ valid_link_pte.a = 1'b0;
+ valid_link_pte.d = 1'b0;
+ valid_link_pte.u = 1'b0;
valid_link_pte.pack_entry();
// Set data page to read/write, but not executable
valid_data_leaf_pte.xwr = READ_WRITE_PAGE;
diff --git a/vendor/lowrisc_ibex/vendor/google_riscv-dv/src/riscv_pmp_cfg.sv b/vendor/lowrisc_ibex/vendor/google_riscv-dv/src/riscv_pmp_cfg.sv
index 147f13ed..e2561783 100644
--- a/vendor/lowrisc_ibex/vendor/google_riscv-dv/src/riscv_pmp_cfg.sv
+++ b/vendor/lowrisc_ibex/vendor/google_riscv-dv/src/riscv_pmp_cfg.sv
@@ -764,15 +764,26 @@ class riscv_pmp_cfg extends uvm_object;
// if counter < pmp_num_regions => branch to beginning of loop,
// otherwise jump to the end of the loop
$sformatf("ble x%0d, x%0d, 19f", scratch_reg[1], scratch_reg[0]),
- $sformatf("j 0b"),
- // If we reach here, it means that no PMP entry has matched the request.
- // We must immediately jump to since the CPU is taking a PMP exception,
- // but this routine is unable to find a matching PMP region for the faulting access -
- // there is a bug somewhere.
- // In case of MMWP mode this is expected behavior, we should try to continue.
- $sformatf("19: csrr x%0d, 0x%0x", scratch_reg[0], MSECCFG),
- $sformatf("andi x%0d, x%0d, 2", scratch_reg[0], scratch_reg[0]),
- $sformatf("bnez x%0d, 27f", scratch_reg[0]),
+ $sformatf("j 0b")
+ };
+
+ // If we reach here, it means that no PMP entry has matched the request.
+ // We must immediately jump to since the CPU is taking a PMP exception,
+ // but this routine is unable to find a matching PMP region for the faulting access -
+ // there is a bug somewhere.
+ // In case of MMWP mode this is expected behavior, we should try to continue.
+ if (riscv_instr_pkg::support_epmp) begin
+ instr = {instr,
+ $sformatf("19: csrr x%0d, 0x%0x", scratch_reg[0], MSECCFG),
+ $sformatf("andi x%0d, x%0d, 2", scratch_reg[0], scratch_reg[0]),
+ $sformatf("bnez x%0d, 27f", scratch_reg[0])
+ };
+ end else begin
+ instr = {instr,
+ $sformatf("19: nop")
+ };
+ end
+ instr = {instr,
$sformatf("la x%0d, test_done", scratch_reg[0]),
$sformatf("jalr x0, x%0d, 0", scratch_reg[0])
};
@@ -839,16 +850,24 @@ class riscv_pmp_cfg extends uvm_object;
// If masked_fault_addr != masked_pmpaddr[i] : mismatch, so continue looping
$sformatf("bne x%0d, x%0d, 18b", scratch_reg[0], scratch_reg[4]),
$sformatf("j 26f")
- };
+ };
// Sub-section that is common to the address modes deciding what to do what to do when hitting
// a locked region
+ if (riscv_instr_pkg::support_epmp) begin
+ instr = {instr,
+ // If we get here there is an address match.
+ // First check whether we are in MML mode.
+ $sformatf("26: csrr x%0d, 0x%0x", scratch_reg[4], MSECCFG),
+ $sformatf("andi x%0d, x%0d, 1", scratch_reg[4], scratch_reg[4]),
+ $sformatf("bnez x%0d, 27f", scratch_reg[4])
+ };
+ end else begin
+ instr = {instr,
+ $sformatf("26: nop")
+ };
+ end
instr = {instr,
- // If we get here there is an address match.
- // First check whether we are in MML mode.
- $sformatf("26: csrr x%0d, 0x%0x", scratch_reg[4], MSECCFG),
- $sformatf("andi x%0d, x%0d, 1", scratch_reg[4], scratch_reg[4]),
- $sformatf("bnez x%0d, 27f", scratch_reg[4]),
// Then check whether the lock bit is set.
$sformatf("andi x%0d, x%0d, 128", scratch_reg[4], scratch_reg[3]),
$sformatf("bnez x%0d, 27f", scratch_reg[4]),
diff --git a/vendor/lowrisc_ibex/vendor/google_riscv-dv/yaml/iss.yaml b/vendor/lowrisc_ibex/vendor/google_riscv-dv/yaml/iss.yaml
index 63cbb2a1..8a8b6623 100644
--- a/vendor/lowrisc_ibex/vendor/google_riscv-dv/yaml/iss.yaml
+++ b/vendor/lowrisc_ibex/vendor/google_riscv-dv/yaml/iss.yaml
@@ -36,3 +36,8 @@
path_var: WHISPER_ISS
cmd: >
--log --xlen --isa
+
+- iss: renode
+ path_var: RENODE_PATH
+ cmd: >
+ python3 /renode_wrapper.py --renode "" --elf --isa
diff --git a/vendor/lowrisc_ibex/vendor/google_riscv-dv/yaml/simulator.yaml b/vendor/lowrisc_ibex/vendor/google_riscv-dv/yaml/simulator.yaml
index 730f494c..76dd6f1e 100644
--- a/vendor/lowrisc_ibex/vendor/google_riscv-dv/yaml/simulator.yaml
+++ b/vendor/lowrisc_ibex/vendor/google_riscv-dv/yaml/simulator.yaml
@@ -22,6 +22,7 @@
- "vcs -file /vcs.compile.option.f
+incdir+
+incdir+
+ +vcs+lic+wait
-f /files.f -full64
-l /compile.log
-LDFLAGS '-Wl,--no-as-needed'
@@ -53,7 +54,7 @@
- tool: questa
compile:
cmd:
- - "vmap mtiUvm $QUESTA_HOME/questasim/uvm-1.2"
+ - "vmap mtiUvm $QUESTA_HOME/uvm-1.2"
- "vlog -64
+incdir+
+incdir+
diff --git a/vendor/lowrisc_ibex/vendor/lowrisc_ip.lock.hjson b/vendor/lowrisc_ibex/vendor/lowrisc_ip.lock.hjson
index e1788265..33a53061 100644
--- a/vendor/lowrisc_ibex/vendor/lowrisc_ip.lock.hjson
+++ b/vendor/lowrisc_ibex/vendor/lowrisc_ip.lock.hjson
@@ -9,6 +9,6 @@
upstream:
{
url: https://github.com/lowRISC/opentitan
- rev: 0deeaa99e5760ee4f5c0a08e5fc1670509d22744
+ rev: e6a0e9a1363d33789283ea6ba3c4d94d41f2dee5
}
}
diff --git a/vendor/lowrisc_ibex/vendor/lowrisc_ip/dv/sv/dv_utils/dv_macros.svh b/vendor/lowrisc_ibex/vendor/lowrisc_ip/dv/sv/dv_utils/dv_macros.svh
index 4e67d40f..e33b1622 100644
--- a/vendor/lowrisc_ibex/vendor/lowrisc_ip/dv/sv/dv_utils/dv_macros.svh
+++ b/vendor/lowrisc_ibex/vendor/lowrisc_ip/dv/sv/dv_utils/dv_macros.svh
@@ -642,6 +642,6 @@
// Do not leave this macro in other source files in the remote repo.
`ifndef OTDBG
`define OTDBG(x) \
- $write($sformatf("%t:OTDBG:",$time));\
+ $write($sformatf("%t:OTDBG:%s:%d:",$time,`__FILE__, `__LINE__));\
$display($sformatf x);
`endif
diff --git a/vendor/lowrisc_ibex/vendor/lowrisc_ip/dv/sv/mem_bkdr_util/mem_bkdr_util__sram.sv b/vendor/lowrisc_ibex/vendor/lowrisc_ip/dv/sv/mem_bkdr_util/mem_bkdr_util__sram.sv
index b9590335..f5edc537 100644
--- a/vendor/lowrisc_ibex/vendor/lowrisc_ip/dv/sv/mem_bkdr_util/mem_bkdr_util__sram.sv
+++ b/vendor/lowrisc_ibex/vendor/lowrisc_ip/dv/sv/mem_bkdr_util/mem_bkdr_util__sram.sv
@@ -5,6 +5,7 @@
// Wrapper functions for SRAM's encrypted read/write operations.
// This file is included in `mem_bkdr_util.sv` as a continuation of `mem_bkdr_util` class.
+// Returns the address after scrambling it using the given nonce.
function logic [bus_params_pkg::BUS_AW-1:0] get_sram_encrypt_addr (
logic [bus_params_pkg::BUS_AW-1:0] addr,
logic [SRAM_BLOCK_WIDTH-1:0] nonce,
@@ -22,27 +23,27 @@ function logic [bus_params_pkg::BUS_AW-1:0] get_sram_encrypt_addr (
addr_arr[i] = addr[addr_lsb + i];
end
- // calculate scrambled address
scr_addr_arr = sram_scrambler_pkg::encrypt_sram_addr(addr_arr, full_addr_width, nonce_arr);
- // convert to bus address output
+ // Convert to bus address output.
for (int i = 0; i < addr_lsb; i++) begin
scr_addr[i] = addr[i];
end
-
for (int i = 0; i < full_addr_width; i++) begin
scr_addr[addr_lsb + i] = scr_addr_arr[i];
end
-
return scr_addr;
+endfunction : get_sram_encrypt_addr
-endfunction // get_sram_encrypt_addr
-
+// Returns the data after adding integrity bits and encrypting it with the given key and nonce.
+// If flip_bits is non-zero it may introduce integrity errors, but notice there is a small chance
+// after descrambling the data the errors will not be detected.
function logic [38:0] get_sram_encrypt32_intg_data (
logic [bus_params_pkg::BUS_AW-1:0] addr,
logic [31:0] data,
logic [SRAM_KEY_WIDTH-1:0] key,
logic [SRAM_BLOCK_WIDTH-1:0] nonce,
+ bit [38:0] flip_bits = '0,
int extra_addr_bits=0);
logic [38:0] integ_data;
@@ -61,25 +62,35 @@ function logic [38:0] get_sram_encrypt32_intg_data (
addr_arr[i] = addr[addr_lsb + i];
end
- // Calculate the integrity constant
integ_data = prim_secded_pkg::prim_secded_inv_39_32_enc(data);
+ integ_data ^= flip_bits;
- // Calculate the scrambled data
wdata_arr = {<<{integ_data}};
wdata_arr = sram_scrambler_pkg::encrypt_sram_data(
wdata_arr, 39, 39, addr_arr, full_addr_width, key_arr, nonce_arr
);
scrambled_data = {<<{wdata_arr}};
-
return scrambled_data;
+endfunction : get_sram_encrypt32_intg_data
-endfunction // get_sram_encrypt32_intg_data
-
+// Returns the data at the given address after descrambling the address and decrypting the data.
+// It simply ignores the integrity bits.
virtual function logic [38:0] sram_encrypt_read32_integ(logic [bus_params_pkg::BUS_AW-1:0] addr,
logic [SRAM_KEY_WIDTH-1:0] key,
logic [SRAM_BLOCK_WIDTH-1:0] nonce);
- logic [bus_params_pkg::BUS_AW-1:0] scr_addr;
- logic [38:0] rdata = '0;
+ logic [bus_params_pkg::BUS_AW-1:0] scr_addr = get_sram_encrypt_addr(addr, nonce);
+ logic [38:0] rdata39 = _sram_decrypt_read39(addr, scr_addr, key, nonce);
+ return rdata39[31:0];
+endfunction : sram_encrypt_read32_integ
+
+// This reads the data at a scrambled address and decrypts it. It returns the data and
+// integrity bits.
+local function logic [38:0] _sram_decrypt_read39(
+ logic [bus_params_pkg::BUS_AW-1:0] addr,
+ logic [bus_params_pkg::BUS_AW-1:0] scr_addr,
+ logic [SRAM_KEY_WIDTH-1:0] key,
+ logic [SRAM_BLOCK_WIDTH-1:0] nonce);
+ logic [38:0] rdata39 = '0;
logic rdata_arr [] = new[39];
logic addr_arr [] = new[addr_width];
@@ -92,59 +103,68 @@ virtual function logic [38:0] sram_encrypt_read32_integ(logic [bus_params_pkg::B
addr_arr[i] = addr[addr_lsb + i];
end
- // Calculate the scrambled address
- scr_addr = get_sram_encrypt_addr(addr, nonce);
-
- // Read memory and return the decrypted data
- rdata = read39integ(scr_addr);
- `uvm_info(`gfn, $sformatf("scr data: 0x%0x", rdata), UVM_HIGH)
- rdata_arr = {<<{rdata}};
+ rdata39 = read39integ(scr_addr);
+ `uvm_info(`gfn, $sformatf("scr data: 0x%0x", rdata39), UVM_HIGH)
+ rdata_arr = {<<{rdata39}};
rdata_arr = sram_scrambler_pkg::decrypt_sram_data(
rdata_arr, 39, 39, addr_arr, addr_width, key_arr, nonce_arr
);
- rdata = {<<{rdata_arr}};
- // Only return the data payload without ECC bits.
- return rdata[31:0];
-
-endfunction
+ rdata39 = {<<{rdata_arr}};
+ return rdata39;
+endfunction : _sram_decrypt_read39
+// Writes the data at the given address. It scrambles the address and encrypts the data after
+// adding integrity bits. If flip_bits is non-zero it may introduce ecc errors.
virtual function void sram_encrypt_write32_integ(logic [bus_params_pkg::BUS_AW-1:0] addr,
logic [31:0] data,
logic [SRAM_KEY_WIDTH-1:0] key,
logic [SRAM_BLOCK_WIDTH-1:0] nonce,
bit [38:0] flip_bits = 0);
- logic [bus_params_pkg::BUS_AW-1:0] scr_addr;
- logic [38:0] integ_data;
- logic [38:0] scrambled_data;
-
- logic wdata_arr [] = new[39];
- logic addr_arr [] = new[addr_width];
- logic key_arr [] = new[SRAM_KEY_WIDTH];
- logic nonce_arr [] = new[SRAM_BLOCK_WIDTH];
-
- key_arr = {<<{key}};
- nonce_arr = {<<{nonce}};
-
- for (int i = 0; i < addr_width; i++) begin
- addr_arr[i] = addr[addr_lsb + i];
- end
-
- // Calculate the scrambled address
- scr_addr = get_sram_encrypt_addr(addr, nonce);
-
- // Calculate the integrity constant
- integ_data = prim_secded_pkg::prim_secded_inv_39_32_enc(data);
-
- // flip some bits to inject integrity fault
- integ_data ^= flip_bits;
-
- // Calculate the scrambled data
- wdata_arr = {<<{integ_data}};
- wdata_arr = sram_scrambler_pkg::encrypt_sram_data(
- wdata_arr, 39, 39, addr_arr, addr_width, key_arr, nonce_arr
- );
- scrambled_data = {<<{wdata_arr}};
-
- // Write the scrambled data to memory
+ logic [bus_params_pkg::BUS_AW-1:0] scr_addr = get_sram_encrypt_addr(addr, nonce);
+ _sram_encrypt_write39(addr, scr_addr, data, key, nonce, flip_bits);
+endfunction : sram_encrypt_write32_integ
+
+// This encrypts, possibly flips some bits to inject errors, and writes the resulting data
+// to a scrambled address.
+local function void _sram_encrypt_write39(logic [bus_params_pkg::BUS_AW-1:0] addr,
+ logic [bus_params_pkg::BUS_AW-1:0] scr_addr,
+ logic [31:0] data,
+ logic [SRAM_KEY_WIDTH-1:0] key,
+ logic [SRAM_BLOCK_WIDTH-1:0] nonce,
+ bit [38:0] flip_bits);
+ logic [38:0] scrambled_data = get_sram_encrypt32_intg_data(addr, data, key, nonce, flip_bits);
write39integ(scr_addr, scrambled_data);
-endfunction
+endfunction : _sram_encrypt_write39
+
+// This injects integrity errors in sram for an original address and the corresponding
+// scrambled address. It needs to pass both addresses even though only the scrambled address
+// is affected, since the original address is used to encrypt the data.
+//
+// This code needs to try multiple random data since it is possible after encryption the
+// bit pattern will not result in a data error, as described in issue #10976
+virtual function void sram_inject_integ_error(logic [bus_params_pkg::BUS_AW-1:0] addr,
+ logic [bus_params_pkg::BUS_AW-1:0] scr_addr,
+ logic [SRAM_KEY_WIDTH-1:0] key,
+ logic [SRAM_BLOCK_WIDTH-1:0] nonce);
+ int max_attempts = 40;
+ int attempt = 0;
+
+ while (attempt < max_attempts) begin
+ bit [31:0] data = $urandom();
+ bit [38:0] rdata_integ;
+ prim_secded_pkg::secded_inv_39_32_t dec;
+ // The specific bits to be flipped should be irrelevant.
+ _sram_encrypt_write39(addr, scr_addr, data, key, nonce, 39'h1001);
+ rdata_integ = _sram_decrypt_read39(addr, scr_addr, key, nonce);
+ dec = prim_secded_pkg::prim_secded_inv_39_32_dec(rdata_integ);
+ if (dec.err) begin
+ `uvm_info(`gfn, $sformatf(
+ "sram_inject_integ_error addr 0x%x, data 0x%x injects error %b after %0d attempts",
+ addr, rdata_integ, dec.err, attempt),
+ UVM_MEDIUM)
+ break;
+ end
+ ++attempt;
+ end
+ `DV_CHECK_LT(attempt, max_attempts, "Too many attempts in sram_inject_ecc_error")
+endfunction : sram_inject_integ_error
diff --git a/vendor/lowrisc_ibex/vendor/lowrisc_ip/dv/sv/mem_bkdr_util/sram_scrambler_pkg.sv b/vendor/lowrisc_ibex/vendor/lowrisc_ip/dv/sv/mem_bkdr_util/sram_scrambler_pkg.sv
index 0093d4e3..f832a5f5 100644
--- a/vendor/lowrisc_ibex/vendor/lowrisc_ip/dv/sv/mem_bkdr_util/sram_scrambler_pkg.sv
+++ b/vendor/lowrisc_ibex/vendor/lowrisc_ip/dv/sv/mem_bkdr_util/sram_scrambler_pkg.sv
@@ -211,7 +211,7 @@ package sram_scrambler_pkg;
endfunction : encrypt_sram_addr
- // Deccrypts the target SRAM address using the custom S&P network.
+ // Decrypts the target SRAM address using the custom S&P network.
function automatic state_t decrypt_sram_addr(logic addr[], int addr_width,
logic full_nonce[]);
diff --git a/vendor/lowrisc_ibex/vendor/lowrisc_ip/dv/sv/push_pull_agent/push_pull_agent_cfg.sv b/vendor/lowrisc_ibex/vendor/lowrisc_ip/dv/sv/push_pull_agent/push_pull_agent_cfg.sv
index 52b7b965..f9d24e6f 100644
--- a/vendor/lowrisc_ibex/vendor/lowrisc_ip/dv/sv/push_pull_agent/push_pull_agent_cfg.sv
+++ b/vendor/lowrisc_ibex/vendor/lowrisc_ip/dv/sv/push_pull_agent/push_pull_agent_cfg.sv
@@ -187,6 +187,33 @@ class push_pull_agent_cfg #(parameter int HostDataWidth = 32,
return (d_user_data_q.size() > 0);
endfunction
+ // Return true if the interface is completely silent
+ virtual function logic is_silent();
+ return !((agent_type == PushAgent) ?
+ (vif.mon_cb.valid || vif.mon_cb.ready) :
+ (vif.mon_cb.req || vif.mon_cb.ack));
+ endfunction
+
+ // Return true if there's a stalled transaction
+ //
+ // If this is a pull agent, there is a stalled transaction when the req signal is high (so
+ // something is trying to read data), but the ack signal is low (there's no data available). If it
+ // is a push agent, there is a stalled transaction when the valid signal is high (so something is
+ // trying to provide data) but the ready signal is low (the data isn't being consumed).
+ virtual function logic is_stalled();
+ return ((agent_type == PushAgent) ?
+ (vif.mon_cb.valid && !vif.mon_cb.ready) :
+ (vif.mon_cb.req && !vif.mon_cb.ack));
+ endfunction
+
+ // Wait for any current transaction to finish
+ //
+ virtual task wait_while_running();
+ while (is_stalled()) @(vif.mon_cb);
+ // Add one last cycle to wait past the final cycle for the transaction that was stalled.
+ @(vif.mon_cb);
+ endtask
+
`uvm_object_param_utils_begin(push_pull_agent_cfg#(HostDataWidth, DeviceDataWidth))
`uvm_field_enum(push_pull_agent_e, agent_type, UVM_DEFAULT)
`uvm_field_enum(pull_handshake_e, pull_handshake_type, UVM_DEFAULT)
diff --git a/vendor/lowrisc_ibex/vendor/lowrisc_ip/dv/tools/dvsim/sim.mk b/vendor/lowrisc_ibex/vendor/lowrisc_ip/dv/tools/dvsim/sim.mk
index 59fb1ac1..0f8f6b7f 100644
--- a/vendor/lowrisc_ibex/vendor/lowrisc_ip/dv/tools/dvsim/sim.mk
+++ b/vendor/lowrisc_ibex/vendor/lowrisc_ip/dv/tools/dvsim/sim.mk
@@ -83,7 +83,7 @@ ifneq (${sw_images},)
index=`echo $$sw_image | cut -d: -f 3`; \
flags=(`echo $$sw_image | cut -d: -f 4- --output-delimiter " "`); \
bazel_label="`echo $$sw_image | cut -d: -f 1-2`"; \
- if [[ $${index} != 4 ]]; then \
+ if [[ $${index} != 4 && $${index} != 5 ]]; then \
bazel_label="$${bazel_label}_$${sw_build_device}"; \
bazel_cquery="labels(data, $${bazel_label}) union labels(srcs, $${bazel_label})"; \
else \
@@ -117,27 +117,54 @@ ifneq (${sw_images},)
fi; \
echo "Building with command: $${bazel_cmd} build $${bazel_opts} $${bazel_label}"; \
$${bazel_cmd} build $${bazel_airgapped_opts} $${bazel_opts} $${bazel_label}; \
- for dep in $$($${bazel_cmd} cquery $${bazel_airgapped_opts} \
- $${bazel_cquery} \
+ kind=$$($${bazel_cmd} cquery $${bazel_airgapped_opts} \
+ $${bazel_label} \
--ui_event_filters=-info \
--noshow_progress \
- --output=starlark); do \
- if [[ $$dep == //hw/ip/otp_ctrl/data* ]] || \
- ([[ $$dep != //hw* ]] && [[ $$dep != //util* ]] && [[ $$dep != //sw/host* ]]); then \
- for artifact in $$($${bazel_cmd} cquery $${bazel_airgapped_opts} $${dep} \
- --ui_event_filters=-info \
- --noshow_progress \
- --output=starlark \
- --starlark:expr="\"\\n\".join([f.path for f in target.files.to_list()])"); do \
+ --output=label_kind | cut -f1 -d' '); \
+ if [[ $${kind} == "opentitan_test" \
+ || $${bazel_label} == "//sw/device/lib/testing/test_rom:test_rom_sim_dv" \
+ || $${bazel_label} == "//sw/device/silicon_creator/rom:rom_with_fake_keys_sim_dv" ]]; then \
+ for artifact in $$($${bazel_cmd} cquery $${bazel_airgapped_opts} \
+ $${bazel_label} \
+ --ui_event_filters=-info \
+ --noshow_progress \
+ --output=starlark \
+ `# An opentitan_test rule has all of its needed files in its runfiles.` \
+ --starlark:expr='"\n".join([f.path for f in target.data_runfiles.files.to_list()])'); do \
cp -f $${artifact} $${run_dir}/$$(basename $${artifact}); \
if [[ $$artifact == *.bin && \
-f "$$(echo $${artifact} | cut -d. -f 1).elf" ]]; then \
cp -f "$$(echo $${artifact} | cut -d. -f 1).elf" \
$${run_dir}/$$(basename -s .bin $${artifact}).elf; \
fi; \
- done; \
- fi; \
- done; \
+ done; \
+ else \
+ for dep in $$($${bazel_cmd} cquery $${bazel_airgapped_opts} \
+ $${bazel_cquery} \
+ --ui_event_filters=-info \
+ --noshow_progress \
+ --output=starlark \
+ `# Bazel 6 cquery outputs repository targets in canonical format (@//blabla) whereas bazel 5 does not, ` \
+ `# so we use a custom starlark printer to remove in leading @ when needed.` \
+ --starlark:expr='str(target.label)[1:] if str(target.label).startswith("@//") else target.label'); do \
+ if [[ $$dep == //hw/ip/otp_ctrl/data* ]] || \
+ ([[ $$dep != //hw* ]] && [[ $$dep != //util* ]] && [[ $$dep != //sw/host* ]]); then \
+ for artifact in $$($${bazel_cmd} cquery $${bazel_airgapped_opts} $${dep} \
+ --ui_event_filters=-info \
+ --noshow_progress \
+ --output=starlark \
+ --starlark:expr="\"\\n\".join([f.path for f in target.files.to_list()])"); do \
+ cp -f $${artifact} $${run_dir}/$$(basename $${artifact}); \
+ if [[ $$artifact == *.bin && \
+ -f "$$(echo $${artifact} | cut -d. -f 1).elf" ]]; then \
+ cp -f "$$(echo $${artifact} | cut -d. -f 1).elf" \
+ $${run_dir}/$$(basename -s .bin $${artifact}).elf; \
+ fi; \
+ done; \
+ fi; \
+ done; \
+ fi; \
fi; \
done;
endif
diff --git a/vendor/lowrisc_ibex/vendor/lowrisc_ip/dv/tools/dvsim/vcs.hjson b/vendor/lowrisc_ibex/vendor/lowrisc_ip/dv/tools/dvsim/vcs.hjson
index b4894ce1..10bf616d 100644
--- a/vendor/lowrisc_ibex/vendor/lowrisc_ip/dv/tools/dvsim/vcs.hjson
+++ b/vendor/lowrisc_ibex/vendor/lowrisc_ip/dv/tools/dvsim/vcs.hjson
@@ -106,6 +106,9 @@
"-error=ENUMASSIGN",
// Tasks must not be enabled in functions. Other tools do not allow this.
"-error=TEIF"
+ // This helps avoid races in flops per Synopsys CASE 01552811.
+ // It causes flops to always use the sampled data value.
+ "-deraceclockdata"
]
run_opts: ["-licqueue",
diff --git a/vendor/lowrisc_ibex/vendor/lowrisc_ip/ip/prim/README.md b/vendor/lowrisc_ibex/vendor/lowrisc_ip/ip/prim/README.md
index 789f8c8d..62dfd43d 100644
--- a/vendor/lowrisc_ibex/vendor/lowrisc_ip/ip/prim/README.md
+++ b/vendor/lowrisc_ibex/vendor/lowrisc_ip/ip/prim/README.md
@@ -1,5 +1,35 @@
# lowRISC Hardware Primitives
+[`prim_alert`](https://reports.opentitan.org/hw/ip/prim/dv/prim_alert/latest/report.html):
+![](https://dashboards.lowrisc.org/badges/dv/prim_alert/test.svg)
+![](https://dashboards.lowrisc.org/badges/dv/prim_alert/passing.svg)
+![](https://dashboards.lowrisc.org/badges/dv/prim_alert/functional.svg)
+![](https://dashboards.lowrisc.org/badges/dv/prim_alert/code.svg)
+
+[`prim_esc`](https://reports.opentitan.org/hw/ip/prim/dv/prim_esc/latest/report.html):
+![](https://dashboards.lowrisc.org/badges/dv/prim_esc/test.svg)
+![](https://dashboards.lowrisc.org/badges/dv/prim_esc/passing.svg)
+![](https://dashboards.lowrisc.org/badges/dv/prim_esc/functional.svg)
+![](https://dashboards.lowrisc.org/badges/dv/prim_esc/code.svg)
+
+[`prim_lfsr`](https://reports.opentitan.org/hw/ip/prim/dv/prim_lfsr/latest/report.html):
+![](https://dashboards.lowrisc.org/badges/dv/prim_lfsr/test.svg)
+![](https://dashboards.lowrisc.org/badges/dv/prim_lfsr/passing.svg)
+![](https://dashboards.lowrisc.org/badges/dv/prim_lfsr/functional.svg)
+![](https://dashboards.lowrisc.org/badges/dv/prim_lfsr/code.svg)
+
+[`prim_present`](https://reports.opentitan.org/hw/ip/prim/dv/prim_lfsr/latest/report.html):
+![](https://dashboards.lowrisc.org/badges/dv/prim_present/test.svg)
+![](https://dashboards.lowrisc.org/badges/dv/prim_present/passing.svg)
+![](https://dashboards.lowrisc.org/badges/dv/prim_present/functional.svg)
+![](https://dashboards.lowrisc.org/badges/dv/prim_present/code.svg)
+
+[`prim_prince`](https://reports.opentitan.org/hw/ip/prim/dv/prim_lfsr/latest/report.html):
+![](https://dashboards.lowrisc.org/badges/dv/prim_prince/test.svg)
+![](https://dashboards.lowrisc.org/badges/dv/prim_prince/passing.svg)
+![](https://dashboards.lowrisc.org/badges/dv/prim_prince/functional.svg)
+![](https://dashboards.lowrisc.org/badges/dv/prim_prince/code.svg)
+
## Concepts
This directory contains basic building blocks to create a hardware design,
diff --git a/vendor/lowrisc_ibex/vendor/lowrisc_ip/ip/prim/doc/prim_lfsr.md b/vendor/lowrisc_ibex/vendor/lowrisc_ip/ip/prim/doc/prim_lfsr.md
index a9a01787..0531fdde 100644
--- a/vendor/lowrisc_ibex/vendor/lowrisc_ip/ip/prim/doc/prim_lfsr.md
+++ b/vendor/lowrisc_ibex/vendor/lowrisc_ip/ip/prim/doc/prim_lfsr.md
@@ -10,8 +10,7 @@ with the shift register, whereas the latter combines several shift register taps
and reduces them with an XNOR tree. For more information, refer to
[this page](https://en.wikipedia.org/wiki/Linear-feedback_shift_register). Both
LFSR flavors have maximal period (`2^LfsrDw - 1`). The recommendation is to use
-the Galois type and fall back to the Fibonacci type depending on the polynomial
-width availability in the lookup table (see below).
+the Galois type.
## Parameters
@@ -70,18 +69,15 @@ LFSR with the `DefaultSeed` in the next cycle.
The LFSR coefficients are taken from an internal set of lookup tables with
precomputed coefficients. Alternatively, a custom polynomial can be provided
-using the `Custom` parameter. The lookup tables contain polynomials for both
-LFSR forms and range from 4bit to 64bit for the Galois form and 3bit to 168bit
-for the Fibonacci form. The polynomial coefficients have been obtained from
-[this page](https://users.ece.cmu.edu/~koopman/lfsr/) and
+using the `Custom` parameter. The lookup table contains polynomials for both
+LFSR forms and range from 3bit to 168bit.
+The polynomial coefficients have been obtained from
[Xilinx application note 52](https://www.xilinx.com/support/documentation/application_notes/xapp052.pdf).
The script `./script/get-lfsr-coeffs.py` can be used to download, parse and dump
these coefficients in SV format as follows:
```
$ script/get-lfsr-coeffs.py -o
```
-The default is to get the Galois coefficients. If the Fibonacci coefficients
-are needed, add the `--fib` switch to the above command.
The implementation of the state transition function of both polynomials have
been formally verified. Further, all polynomials up to 34bit in length have been
diff --git a/vendor/lowrisc_ibex/vendor/lowrisc_ip/ip/prim/doc/prim_xoshiro256pp.md b/vendor/lowrisc_ibex/vendor/lowrisc_ip/ip/prim/doc/prim_xoshiro256pp.md
index 50beef4c..267af600 100644
--- a/vendor/lowrisc_ibex/vendor/lowrisc_ip/ip/prim/doc/prim_xoshiro256pp.md
+++ b/vendor/lowrisc_ibex/vendor/lowrisc_ip/ip/prim/doc/prim_xoshiro256pp.md
@@ -1,8 +1,7 @@
# Primitive Component: XoShiRo256++
-# Overviewtitle
-`prim_xoshiro256pp` is a PRNG with 256 bit state.
+`prim_xoshiro256pp` is a PRNG with 256-bit state.
For more information refer to [this page](https://arxiv.org/pdf/1805.01407.pdf).
## Parameters
@@ -42,22 +41,22 @@ entropy_i | DefaultSeed | all_zero_o
```
Xoshiro256++ PRNG consists of:
- * 256b state
+ * A 256-bit state
* A single-cycle state-update function.
* A state output function.
-The basic xoshiro256++ PRNG has a 64 bit output.
-This implementation enables the output size to be any multiple of 64 bits.
-The output size controlled using the `OutputDW` parameter.
+The basic xoshiro256++ PRNG has a 64-bit output.
+This implementation supports an output size of any multiple of 64 bits.
+The output size is controlled using the `OutputDW` parameter.
The xoshiro256++ module has an enable input and an additional entropy input that is
-XOR'ed into the PRNG state (connect to zero if this feature is unused).
+XORed into the PRNG state (connect to zero if this feature is unused).
As the PRNG may jump into the all-zero state (e.g. due to an active attack), the PRNG
state-update function contains a lockup protection which re-seeds the state with
`DefaultSeed` and raises the alert signal `all_zero_o`, once this condition is detected.
When the seed enable signal `seed_en_i` is raised, the internal state of xoshiro256++ is updated
-with the value provided at the 256b input 'seed_i'.
+with the value provided at the 256-bit input 'seed_i'.
The state is internally updated in every clock cycle whenever the enable signal `xoshiro_en_i` is raised.
The timing diagram below visualizes this process.
diff --git a/vendor/lowrisc_ibex/vendor/lowrisc_ip/ip/prim/dv/prim_lfsr/prim_lfsr_tb.sv b/vendor/lowrisc_ibex/vendor/lowrisc_ip/ip/prim/dv/prim_lfsr/prim_lfsr_tb.sv
index 8d87d458..98556155 100644
--- a/vendor/lowrisc_ibex/vendor/lowrisc_ip/ip/prim/dv/prim_lfsr/prim_lfsr_tb.sv
+++ b/vendor/lowrisc_ibex/vendor/lowrisc_ip/ip/prim/dv/prim_lfsr/prim_lfsr_tb.sv
@@ -50,6 +50,8 @@ module prim_lfsr_tb;
logic [MaxLfsrDw-1:0] lfsr_periods [MaxLfsrDw+1];
logic [MaxLfsrDw-1:0] entropy [MaxLfsrDw+1];
logic [MaxLfsrDw-1:0] seed [MaxLfsrDw+1];
+ logic [MaxLfsrDw-1:0] rand_entropy;
+ logic [MaxLfsrDw-1:0] rand_seed;
for (genvar k = MinLfsrDw; k <= MaxLfsrDw; k++) begin : gen_duts
@@ -224,10 +226,12 @@ module prim_lfsr_tb;
repeat ($urandom_range(5000, 10000)) begin
// Do random reset sometimes
if ($urandom_range(0, 10) == 0) main_clk.apply_reset();
- randomize(seed[k]);
- randomize(entropy[k]);
- randomize(lfsr_en[k]);
- randomize(seed_en[k]);
+ randomize(rand_seed);
+ randomize(rand_entropy);
+ seed[k] = rand_seed;
+ entropy[k] = rand_entropy;
+ lfsr_en[k] = $urandom_range(0, 1);
+ seed_en[k] = $urandom_range(0, 1);
main_clk.wait_clks(1);
end
diff --git a/vendor/lowrisc_ibex/vendor/lowrisc_ip/ip/prim/lint/prim_cipher.vlt b/vendor/lowrisc_ibex/vendor/lowrisc_ip/ip/prim/lint/prim_cipher.vlt
index 8cbfb1b5..533b1cd9 100644
--- a/vendor/lowrisc_ibex/vendor/lowrisc_ip/ip/prim/lint/prim_cipher.vlt
+++ b/vendor/lowrisc_ibex/vendor/lowrisc_ip/ip/prim/lint/prim_cipher.vlt
@@ -6,9 +6,11 @@
// Tell the Verilator scheduler to split up these variables into
// separate pieces when it's figuring out process scheduling. This
-// avoids spurious UNOPTFLAT warnings caused by the fact that the
-// arrays feed into themselves (with different bits for different
-// positions in the tree).
+// avoids spurious UNOPTFLAT warnings caused by the fact that (if you
+// don't track the indices carefully) it looks like the arrays feed
+// into themselves.
split_var -module "prim_present" -var "data_state"
split_var -module "prim_present" -var "round_idx"
split_var -module "prim_present" -var "round_key"
+split_var -module "prim_prince" -var "data_state_lo"
+split_var -module "prim_prince" -var "data_state_hi"
diff --git a/vendor/lowrisc_ibex/vendor/lowrisc_ip/ip/prim/lint/prim_clock_div.waiver b/vendor/lowrisc_ibex/vendor/lowrisc_ip/ip/prim/lint/prim_clock_div.waiver
index 899400c7..c62bfacc 100644
--- a/vendor/lowrisc_ibex/vendor/lowrisc_ip/ip/prim/lint/prim_clock_div.waiver
+++ b/vendor/lowrisc_ibex/vendor/lowrisc_ip/ip/prim/lint/prim_clock_div.waiver
@@ -4,18 +4,11 @@
#
# waiver file for prim_clock_div
-waive -rules CLOCK_EDGE -location {prim_clock_div.sv} -msg {Falling edge of clock 'clk_i' used here, should use rising edge} \
- -comment "The clock switch signal is synchronized on negative edge to ensure it is away from any transition"
-
-waive -rules DUAL_EDGE_CLOCK -location {prim_clock_div.sv} -regexp {.*} \
- -comment "The clock switch signal is synchronized on negative edge to ensure it is away from any transition"
+waive -rules {STAR_PORT_CONN_USE} -location {prim_clock_div.sv} -regexp {.*wild card port connection encountered on instance.*} \
+ -comment "Generated prims may have wildcard connections."
waive -rules CLOCK_MUX -location {prim_clock_div.sv} -regexp {.*reaches a multiplexer here, used as a clock.*} \
-comment "A mux is used during scan bypass, and for switching between div by 2 and div by 1 clocks"
-waive -rules CLOCK_USE -location {prim_clock_div.sv} -regexp {'clk_i' is connected to 'prim_clock_mux2' port 'clk1_i', and used as a clock} \
- -comment "This clock mux usage is OK."
-
-waive -rules SAME_NAME_TYPE -location {prim_clock_div.sv} -regexp {'ResetValue' is used as a parameter here, and as an enumeration value at} \
- -comment "Reused parameter name."
-
+waive -rules {SAME_NAME_TYPE} -location {prim_clock_div.sv} -regexp {'ResetValue' is used as a parameter here, and as an enumeration value at prim.*} \
+ -comment "Parameter name reuse."
diff --git a/vendor/lowrisc_ibex/vendor/lowrisc_ip/ip/prim/lint/prim_subreg.waiver b/vendor/lowrisc_ibex/vendor/lowrisc_ip/ip/prim/lint/prim_subreg.waiver
index e0c83faa..fe378348 100644
--- a/vendor/lowrisc_ibex/vendor/lowrisc_ip/ip/prim/lint/prim_subreg.waiver
+++ b/vendor/lowrisc_ibex/vendor/lowrisc_ip/ip/prim/lint/prim_subreg.waiver
@@ -4,3 +4,6 @@
waive -rules INPUT_NOT_READ -location {prim_subreg.sv} -regexp {Input port 'wd' is not read from} \
-comment "for RO wd is not used"
+
+waive -rules {PARAM_NOT_USED} -location {prim_subreg_shadow.sv} -regexp {Mubi} \
+ -comment "Mubi is not yet supported in prim_subreg_shadow."
diff --git a/vendor/lowrisc_ibex/vendor/lowrisc_ip/ip/prim/prim.core b/vendor/lowrisc_ibex/vendor/lowrisc_ip/ip/prim/prim.core
index 47389f96..7d701c8f 100644
--- a/vendor/lowrisc_ibex/vendor/lowrisc_ip/ip/prim/prim.core
+++ b/vendor/lowrisc_ibex/vendor/lowrisc_ip/ip/prim/prim.core
@@ -15,6 +15,7 @@ filesets:
- lowrisc:prim:pad_wrapper
- lowrisc:prim:prim_pkg
- lowrisc:prim:clock_mux2
+ - lowrisc:prim:clock_inv
- lowrisc:prim:buf
- lowrisc:prim:flop
- lowrisc:prim:flop_en
diff --git a/vendor/lowrisc_ibex/vendor/lowrisc_ip/ip/prim/prim_clock_div.core b/vendor/lowrisc_ibex/vendor/lowrisc_ip/ip/prim/prim_clock_div.core
index ab031d75..755f17d5 100644
--- a/vendor/lowrisc_ibex/vendor/lowrisc_ip/ip/prim/prim_clock_div.core
+++ b/vendor/lowrisc_ibex/vendor/lowrisc_ip/ip/prim/prim_clock_div.core
@@ -6,15 +6,17 @@ CAPI=2:
name: "lowrisc:prim:clock_div"
description: "Generic clock divide"
filesets:
- files_rtl:
+ primgen_dep:
depend:
- lowrisc:prim:prim_pkg
- - lowrisc:prim:flop
- - lowrisc:prim:clock_inv
- - lowrisc:prim:clock_buf
+ - lowrisc:prim:primgen
+
+ files_verilator_waiver:
+ depend:
+ # common waivers
+ - lowrisc:lint:common
files:
- - rtl/prim_clock_div.sv
- file_type: systemVerilogSource
+ file_type: vlt
files_ascentlint_waiver:
depend:
@@ -24,8 +26,23 @@ filesets:
- lint/prim_clock_div.waiver
file_type: waiver
+ files_veriblelint_waiver:
+ depend:
+ # common waivers
+ - lowrisc:lint:common
+
+generate:
+ impl:
+ generator: primgen
+ parameters:
+ prim_name: clock_div
+
targets:
default:
filesets:
- - tool_ascentlint ? (files_ascentlint_waiver)
- - files_rtl
+ - tool_verilator ? (files_verilator_waiver)
+ - tool_ascentlint ? (files_ascentlint_waiver)
+ - tool_veriblelint ? (files_veriblelint_waiver)
+ - primgen_dep
+ generate:
+ - impl
diff --git a/vendor/lowrisc_ibex/vendor/lowrisc_ip/ip/prim/prim_subreg.core b/vendor/lowrisc_ibex/vendor/lowrisc_ip/ip/prim/prim_subreg.core
index 8aaf94b2..9f326c2b 100644
--- a/vendor/lowrisc_ibex/vendor/lowrisc_ip/ip/prim/prim_subreg.core
+++ b/vendor/lowrisc_ibex/vendor/lowrisc_ip/ip/prim/prim_subreg.core
@@ -18,6 +18,7 @@ filesets:
file_type: systemVerilogSource
depend:
- lowrisc:prim:assert
+ - lowrisc:prim:mubi
files_verilator_waiver:
depend:
diff --git a/vendor/lowrisc_ibex/vendor/lowrisc_ip/ip/prim/rtl/prim_lc_dec.sv b/vendor/lowrisc_ibex/vendor/lowrisc_ip/ip/prim/rtl/prim_lc_dec.sv
index 78845ae5..d10e0ce3 100644
--- a/vendor/lowrisc_ibex/vendor/lowrisc_ip/ip/prim/rtl/prim_lc_dec.sv
+++ b/vendor/lowrisc_ibex/vendor/lowrisc_ip/ip/prim/rtl/prim_lc_dec.sv
@@ -5,9 +5,11 @@
// Decoder for life cycle control signals with additional
// input buffers.
-module prim_lc_dec (
- input lc_ctrl_pkg::lc_tx_t lc_en_i,
- output logic lc_en_dec_o
+module prim_lc_dec
+ import lc_ctrl_pkg::*;
+(
+ input lc_tx_t lc_en_i,
+ output logic lc_en_dec_o
);
logic [lc_ctrl_pkg::TxWidth-1:0] lc_en;
@@ -16,13 +18,13 @@ assign lc_en = lc_en_i;
// The buffer cells have a don't touch constraint on them
// such that synthesis tools won't collapse them
-for (genvar k = 0; k < lc_ctrl_pkg::TxWidth; k++) begin : gen_bits
+for (genvar k = 0; k < TxWidth; k++) begin : gen_bits
prim_buf u_prim_buf (
.in_i ( lc_en[k] ),
.out_o ( lc_en_out[k] )
);
end
-assign lc_en_dec_o = (lc_en_out == lc_ctrl_pkg::On);
+assign lc_en_dec_o = lc_tx_test_true_strict(lc_tx_t'(lc_en_out));
endmodule : prim_lc_dec
diff --git a/vendor/lowrisc_ibex/vendor/lowrisc_ip/ip/prim/rtl/prim_lfsr.sv b/vendor/lowrisc_ibex/vendor/lowrisc_ip/ip/prim/rtl/prim_lfsr.sv
index 1ced7212..d6d12445 100644
--- a/vendor/lowrisc_ibex/vendor/lowrisc_ip/ip/prim/rtl/prim_lfsr.sv
+++ b/vendor/lowrisc_ibex/vendor/lowrisc_ip/ip/prim/rtl/prim_lfsr.sv
@@ -5,8 +5,8 @@
// This module implements different LFSR types:
//
// 0) Galois XOR type LFSR ([1], internal XOR gates, very fast).
-// Parameterizable width from 4 to 64 bits.
-// Coefficients obtained from [2].
+// Parameterizable width from 3 to 168 bits.
+// Coefficients obtained from [3].
//
// 1) Fibonacci XNOR type LFSR, parameterizable from 3 to 168 bits.
// Coefficients obtained from [3].
@@ -73,73 +73,8 @@ module prim_lfsr #(
);
// automatically generated with util/design/get-lfsr-coeffs.py script
- localparam int unsigned GAL_XOR_LUT_OFF = 4;
- localparam logic [63:0] GAL_XOR_COEFFS [61] =
- '{ 64'h9,
- 64'h12,
- 64'h21,
- 64'h41,
- 64'h8E,
- 64'h108,
- 64'h204,
- 64'h402,
- 64'h829,
- 64'h100D,
- 64'h2015,
- 64'h4001,
- 64'h8016,
- 64'h10004,
- 64'h20013,
- 64'h40013,
- 64'h80004,
- 64'h100002,
- 64'h200001,
- 64'h400010,
- 64'h80000D,
- 64'h1000004,
- 64'h2000023,
- 64'h4000013,
- 64'h8000004,
- 64'h10000002,
- 64'h20000029,
- 64'h40000004,
- 64'h80000057,
- 64'h100000029,
- 64'h200000073,
- 64'h400000002,
- 64'h80000003B,
- 64'h100000001F,
- 64'h2000000031,
- 64'h4000000008,
- 64'h800000001C,
- 64'h10000000004,
- 64'h2000000001F,
- 64'h4000000002C,
- 64'h80000000032,
- 64'h10000000000D,
- 64'h200000000097,
- 64'h400000000010,
- 64'h80000000005B,
- 64'h1000000000038,
- 64'h200000000000E,
- 64'h4000000000025,
- 64'h8000000000004,
- 64'h10000000000023,
- 64'h2000000000003E,
- 64'h40000000000023,
- 64'h8000000000004A,
- 64'h100000000000016,
- 64'h200000000000031,
- 64'h40000000000003D,
- 64'h800000000000001,
- 64'h1000000000000013,
- 64'h2000000000000034,
- 64'h4000000000000001,
- 64'h800000000000000D };
-
- // automatically generated with get-lfsr-coeffs.py script
- localparam int unsigned FIB_XNOR_LUT_OFF = 3;
- localparam logic [167:0] FIB_XNOR_COEFFS [166] =
+ localparam int unsigned LUT_OFF = 3;
+ localparam logic [167:0] LFSR_COEFFS [166] =
'{ 168'h6,
168'hC,
168'h14,
@@ -351,10 +286,10 @@ module prim_lfsr #(
if (CustomCoeffs > 0) begin : gen_custom
assign coeffs = CustomCoeffs[LfsrDw-1:0];
end else begin : gen_lut
- assign coeffs = GAL_XOR_COEFFS[LfsrDw-GAL_XOR_LUT_OFF][LfsrDw-1:0];
+ assign coeffs = LFSR_COEFFS[LfsrDw-LUT_OFF][LfsrDw-1:0];
// check that the most significant bit of polynomial is 1
- `ASSERT_INIT(MinLfsrWidth_A, LfsrDw >= $low(GAL_XOR_COEFFS)+GAL_XOR_LUT_OFF)
- `ASSERT_INIT(MaxLfsrWidth_A, LfsrDw <= $high(GAL_XOR_COEFFS)+GAL_XOR_LUT_OFF)
+ `ASSERT_INIT(MinLfsrWidth_A, LfsrDw >= $low(LFSR_COEFFS)+LUT_OFF)
+ `ASSERT_INIT(MaxLfsrWidth_A, LfsrDw <= $high(LFSR_COEFFS)+LUT_OFF)
end
// calculate next state using internal XOR feedback and entropy input
@@ -376,10 +311,10 @@ module prim_lfsr #(
if (CustomCoeffs > 0) begin : gen_custom
assign coeffs = CustomCoeffs[LfsrDw-1:0];
end else begin : gen_lut
- assign coeffs = FIB_XNOR_COEFFS[LfsrDw-FIB_XNOR_LUT_OFF][LfsrDw-1:0];
+ assign coeffs = LFSR_COEFFS[LfsrDw-LUT_OFF][LfsrDw-1:0];
// check that the most significant bit of polynomial is 1
- `ASSERT_INIT(MinLfsrWidth_A, LfsrDw >= $low(FIB_XNOR_COEFFS)+FIB_XNOR_LUT_OFF)
- `ASSERT_INIT(MaxLfsrWidth_A, LfsrDw <= $high(FIB_XNOR_COEFFS)+FIB_XNOR_LUT_OFF)
+ `ASSERT_INIT(MinLfsrWidth_A, LfsrDw >= $low(LFSR_COEFFS)+LUT_OFF)
+ `ASSERT_INIT(MaxLfsrWidth_A, LfsrDw <= $high(LFSR_COEFFS)+LUT_OFF)
end
// calculate next state using external XNOR feedback and entropy input
diff --git a/vendor/lowrisc_ibex/vendor/lowrisc_ip/ip/prim/rtl/prim_mubi_pkg.sv b/vendor/lowrisc_ibex/vendor/lowrisc_ip/ip/prim/rtl/prim_mubi_pkg.sv
index b5d7c1bb..13e167db 100644
--- a/vendor/lowrisc_ibex/vendor/lowrisc_ip/ip/prim/rtl/prim_mubi_pkg.sv
+++ b/vendor/lowrisc_ibex/vendor/lowrisc_ip/ip/prim/rtl/prim_mubi_pkg.sv
@@ -27,7 +27,7 @@ package prim_mubi_pkg;
// This is a prerequisite for the multibit functions below to work.
`ASSERT_STATIC_IN_PACKAGE(CheckMuBi4ValsComplementary_A, MuBi4True == ~MuBi4False)
- // Test whether the value is supplied is one of the valid enumerations
+ // Test whether the multibit value is one of the valid enumerations
function automatic logic mubi4_test_invalid(mubi4_t val);
return ~(val inside {MuBi4True, MuBi4False});
endfunction : mubi4_test_invalid
@@ -140,7 +140,7 @@ package prim_mubi_pkg;
endfunction : mubi4_or_lo
// Performs a logical AND operation between two multibit values.
- // Tlos treats "False" as logical 1, and all other values are
+ // This treats "False" as logical 1, and all other values are
// treated as 0.
function automatic mubi4_t mubi4_and_lo(mubi4_t a, mubi4_t b);
return mubi4_and(a, b, MuBi4False);
@@ -159,7 +159,7 @@ package prim_mubi_pkg;
// This is a prerequisite for the multibit functions below to work.
`ASSERT_STATIC_IN_PACKAGE(CheckMuBi8ValsComplementary_A, MuBi8True == ~MuBi8False)
- // Test whether the value is supplied is one of the valid enumerations
+ // Test whether the multibit value is one of the valid enumerations
function automatic logic mubi8_test_invalid(mubi8_t val);
return ~(val inside {MuBi8True, MuBi8False});
endfunction : mubi8_test_invalid
@@ -272,7 +272,7 @@ package prim_mubi_pkg;
endfunction : mubi8_or_lo
// Performs a logical AND operation between two multibit values.
- // Tlos treats "False" as logical 1, and all other values are
+ // This treats "False" as logical 1, and all other values are
// treated as 0.
function automatic mubi8_t mubi8_and_lo(mubi8_t a, mubi8_t b);
return mubi8_and(a, b, MuBi8False);
@@ -291,7 +291,7 @@ package prim_mubi_pkg;
// This is a prerequisite for the multibit functions below to work.
`ASSERT_STATIC_IN_PACKAGE(CheckMuBi12ValsComplementary_A, MuBi12True == ~MuBi12False)
- // Test whether the value is supplied is one of the valid enumerations
+ // Test whether the multibit value is one of the valid enumerations
function automatic logic mubi12_test_invalid(mubi12_t val);
return ~(val inside {MuBi12True, MuBi12False});
endfunction : mubi12_test_invalid
@@ -404,7 +404,7 @@ package prim_mubi_pkg;
endfunction : mubi12_or_lo
// Performs a logical AND operation between two multibit values.
- // Tlos treats "False" as logical 1, and all other values are
+ // This treats "False" as logical 1, and all other values are
// treated as 0.
function automatic mubi12_t mubi12_and_lo(mubi12_t a, mubi12_t b);
return mubi12_and(a, b, MuBi12False);
@@ -423,7 +423,7 @@ package prim_mubi_pkg;
// This is a prerequisite for the multibit functions below to work.
`ASSERT_STATIC_IN_PACKAGE(CheckMuBi16ValsComplementary_A, MuBi16True == ~MuBi16False)
- // Test whether the value is supplied is one of the valid enumerations
+ // Test whether the multibit value is one of the valid enumerations
function automatic logic mubi16_test_invalid(mubi16_t val);
return ~(val inside {MuBi16True, MuBi16False});
endfunction : mubi16_test_invalid
@@ -536,7 +536,7 @@ package prim_mubi_pkg;
endfunction : mubi16_or_lo
// Performs a logical AND operation between two multibit values.
- // Tlos treats "False" as logical 1, and all other values are
+ // This treats "False" as logical 1, and all other values are
// treated as 0.
function automatic mubi16_t mubi16_and_lo(mubi16_t a, mubi16_t b);
return mubi16_and(a, b, MuBi16False);
diff --git a/vendor/lowrisc_ibex/vendor/lowrisc_ip/ip/prim/rtl/prim_prince.sv b/vendor/lowrisc_ibex/vendor/lowrisc_ip/ip/prim/rtl/prim_prince.sv
index 0adb98c5..779999c1 100644
--- a/vendor/lowrisc_ibex/vendor/lowrisc_ip/ip/prim/rtl/prim_prince.sv
+++ b/vendor/lowrisc_ibex/vendor/lowrisc_ip/ip/prim/rtl/prim_prince.sv
@@ -106,20 +106,14 @@ module prim_prince #(
//////////////
// State variable for holding the rounds
- //
- // The "split_var" hint that we pass to verilator here tells it to schedule the different parts of
- // data_state separately. This avoids an UNOPTFLAT error where it would otherwise see a dependency
- // chain
- //
- // data_state -> data_state_round -> data_state_xor -> data_state
- //
- logic [NumRoundsHalf*2+1:0][DataWidth-1:0] data_state /* verilator split_var */;
+ logic [NumRoundsHalf:0][DataWidth-1:0] data_state_lo;
+ logic [NumRoundsHalf:0][DataWidth-1:0] data_state_hi;
// pre-round XOR
always_comb begin : p_pre_round_xor
- data_state[0] = data_i ^ k0;
- data_state[0] ^= k1_d;
- data_state[0] ^= prim_cipher_pkg::PRINCE_ROUND_CONST[0][DataWidth-1:0];
+ data_state_lo[0] = data_i ^ k0;
+ data_state_lo[0] ^= k1_d;
+ data_state_lo[0] ^= prim_cipher_pkg::PRINCE_ROUND_CONST[0][DataWidth-1:0];
end
// forward pass
@@ -127,7 +121,7 @@ module prim_prince #(
logic [DataWidth-1:0] data_state_round;
if (DataWidth == 64) begin : gen_fwd_d64
always_comb begin : p_fwd_d64
- data_state_round = prim_cipher_pkg::sbox4_64bit(data_state[k-1],
+ data_state_round = prim_cipher_pkg::sbox4_64bit(data_state_lo[k-1],
prim_cipher_pkg::PRINCE_SBOX4);
data_state_round = prim_cipher_pkg::prince_mult_prime_64bit(data_state_round);
data_state_round = prim_cipher_pkg::prince_shiftrows_64bit(data_state_round,
@@ -135,7 +129,7 @@ module prim_prince #(
end
end else begin : gen_fwd_d32
always_comb begin : p_fwd_d32
- data_state_round = prim_cipher_pkg::sbox4_32bit(data_state[k-1],
+ data_state_round = prim_cipher_pkg::sbox4_32bit(data_state_lo[k-1],
prim_cipher_pkg::PRINCE_SBOX4);
data_state_round = prim_cipher_pkg::prince_mult_prime_32bit(data_state_round);
data_state_round = prim_cipher_pkg::prince_shiftrows_32bit(data_state_round,
@@ -147,9 +141,9 @@ module prim_prince #(
prim_cipher_pkg::PRINCE_ROUND_CONST[k][DataWidth-1:0];
// improved keyschedule proposed by https://eprint.iacr.org/2014/656.pdf
if (k % 2 == 1) begin : gen_fwd_key_odd
- assign data_state[k] = data_state_xor ^ k0_new_d;
+ assign data_state_lo[k] = data_state_xor ^ k0_new_d;
end else begin : gen_fwd_key_even
- assign data_state[k] = data_state_xor ^ k1_d;
+ assign data_state_lo[k] = data_state_xor ^ k1_d;
end
end
@@ -157,7 +151,7 @@ module prim_prince #(
logic [DataWidth-1:0] data_state_middle_d, data_state_middle_q, data_state_middle;
if (DataWidth == 64) begin : gen_middle_d64
always_comb begin : p_middle_d64
- data_state_middle_d = prim_cipher_pkg::sbox4_64bit(data_state[NumRoundsHalf],
+ data_state_middle_d = prim_cipher_pkg::sbox4_64bit(data_state_lo[NumRoundsHalf],
prim_cipher_pkg::PRINCE_SBOX4);
data_state_middle = prim_cipher_pkg::prince_mult_prime_64bit(data_state_middle_q);
data_state_middle = prim_cipher_pkg::sbox4_64bit(data_state_middle,
@@ -193,16 +187,16 @@ module prim_prince #(
assign valid_o = valid_i;
end
- assign data_state[NumRoundsHalf+1] = data_state_middle;
+ assign data_state_hi[0] = data_state_middle;
// backward pass
for (genvar k = 1; k <= NumRoundsHalf; k++) begin : gen_bwd_pass
logic [DataWidth-1:0] data_state_xor0, data_state_xor1;
// improved keyschedule proposed by https://eprint.iacr.org/2014/656.pdf
if ((NumRoundsHalf + k + 1) % 2 == 1) begin : gen_bkwd_key_odd
- assign data_state_xor0 = data_state[NumRoundsHalf+k] ^ k0_new_q;
+ assign data_state_xor0 = data_state_hi[k-1] ^ k0_new_q;
end else begin : gen_bkwd_key_even
- assign data_state_xor0 = data_state[NumRoundsHalf+k] ^ k1_q;
+ assign data_state_xor0 = data_state_hi[k-1] ^ k1_q;
end
// the construction is reflective, hence the subtraction with NumRoundsHalf
assign data_state_xor1 = data_state_xor0 ^
@@ -214,7 +208,7 @@ module prim_prince #(
data_state_bwd = prim_cipher_pkg::prince_shiftrows_64bit(data_state_xor1,
prim_cipher_pkg::PRINCE_SHIFT_ROWS64_INV);
data_state_bwd = prim_cipher_pkg::prince_mult_prime_64bit(data_state_bwd);
- data_state[NumRoundsHalf+k+1] = prim_cipher_pkg::sbox4_64bit(data_state_bwd,
+ data_state_hi[k] = prim_cipher_pkg::sbox4_64bit(data_state_bwd,
prim_cipher_pkg::PRINCE_SBOX4_INV);
end
end else begin : gen_bwd_d32
@@ -222,7 +216,7 @@ module prim_prince #(
data_state_bwd = prim_cipher_pkg::prince_shiftrows_32bit(data_state_xor1,
prim_cipher_pkg::PRINCE_SHIFT_ROWS64_INV);
data_state_bwd = prim_cipher_pkg::prince_mult_prime_32bit(data_state_bwd);
- data_state[NumRoundsHalf+k+1] = prim_cipher_pkg::sbox4_32bit(data_state_bwd,
+ data_state_hi[k] = prim_cipher_pkg::sbox4_32bit(data_state_bwd,
prim_cipher_pkg::PRINCE_SBOX4_INV);
end
end
@@ -230,7 +224,7 @@ module prim_prince #(
// post-rounds
always_comb begin : p_post_round_xor
- data_o = data_state[2*NumRoundsHalf+1] ^
+ data_o = data_state_hi[NumRoundsHalf] ^
prim_cipher_pkg::PRINCE_ROUND_CONST[11][DataWidth-1:0];
data_o ^= k1_q;
data_o ^= k0_prime_q;
diff --git a/vendor/lowrisc_ibex/vendor/lowrisc_ip/ip/prim/rtl/prim_subreg.sv b/vendor/lowrisc_ibex/vendor/lowrisc_ip/ip/prim/rtl/prim_subreg.sv
index 1215e430..0210928b 100644
--- a/vendor/lowrisc_ibex/vendor/lowrisc_ip/ip/prim/rtl/prim_subreg.sv
+++ b/vendor/lowrisc_ibex/vendor/lowrisc_ip/ip/prim/rtl/prim_subreg.sv
@@ -9,7 +9,8 @@ module prim_subreg
#(
parameter int DW = 32,
parameter sw_access_e SwAccess = SwAccessRW,
- parameter logic [DW-1:0] RESVAL = '0 // reset value
+ parameter logic [DW-1:0] RESVAL = '0 , // reset value
+ parameter bit Mubi = 1'b0
) (
input clk_i,
input rst_ni,
@@ -39,7 +40,8 @@ module prim_subreg
prim_subreg_arb #(
.DW ( DW ),
- .SwAccess ( SwAccess )
+ .SwAccess ( SwAccess ),
+ .Mubi ( Mubi )
) wr_en_data_arb (
.we,
.wd,
@@ -61,6 +63,13 @@ module prim_subreg
// feed back out for consolidation
assign ds = wr_en ? wr_data : qs;
assign qe = wr_en;
- assign qs = q;
+
+ if (SwAccess == SwAccessRC) begin : gen_rc
+ // In case of a SW RC colliding with a HW write, SW gets the value written by HW
+ // but the register is cleared to 0. See #5416 for a discussion.
+ assign qs = de && we ? d : q;
+ end else begin : gen_no_rc
+ assign qs = q;
+ end
endmodule
diff --git a/vendor/lowrisc_ibex/vendor/lowrisc_ip/ip/prim/rtl/prim_subreg_arb.sv b/vendor/lowrisc_ibex/vendor/lowrisc_ip/ip/prim/rtl/prim_subreg_arb.sv
index 80033366..ba8a2821 100644
--- a/vendor/lowrisc_ibex/vendor/lowrisc_ip/ip/prim/rtl/prim_subreg_arb.sv
+++ b/vendor/lowrisc_ibex/vendor/lowrisc_ip/ip/prim/rtl/prim_subreg_arb.sv
@@ -8,7 +8,8 @@ module prim_subreg_arb
import prim_subreg_pkg::*;
#(
parameter int DW = 32,
- parameter sw_access_e SwAccess = SwAccessRW
+ parameter sw_access_e SwAccess = SwAccessRW,
+ parameter bit Mubi = 1'b0
) (
// From SW: valid for RW, WO, W1C, W1S, W0C, RC.
// In case of RC, top connects read pulse to we.
@@ -26,13 +27,18 @@ module prim_subreg_arb
output logic wr_en,
output logic [DW-1:0] wr_data
);
+ import prim_mubi_pkg::*;
if (SwAccess inside {SwAccessRW, SwAccessWO}) begin : gen_w
assign wr_en = we | de;
assign wr_data = (we == 1'b1) ? wd : d; // SW higher priority
// Unused q - Prevent lint errors.
logic [DW-1:0] unused_q;
+ //VCS coverage off
+ // pragma coverage off
assign unused_q = q;
+ //VCS coverage on
+ // pragma coverage on
end else if (SwAccess == SwAccessRO) begin : gen_ro
assign wr_en = de;
assign wr_data = d;
@@ -40,32 +46,128 @@ module prim_subreg_arb
logic unused_we;
logic [DW-1:0] unused_wd;
logic [DW-1:0] unused_q;
+ //VCS coverage off
+ // pragma coverage off
assign unused_we = we;
assign unused_wd = wd;
assign unused_q = q;
+ //VCS coverage on
+ // pragma coverage on
end else if (SwAccess == SwAccessW1S) begin : gen_w1s
// If SwAccess is W1S, then assume hw tries to clear.
// So, give a chance HW to clear when SW tries to set.
// If both try to set/clr at the same bit pos, SW wins.
assign wr_en = we | de;
- assign wr_data = (de ? d : q) | (we ? wd : '0);
+ if (Mubi) begin : gen_mubi
+ if (DW == 4) begin : gen_mubi4
+ assign wr_data = prim_mubi_pkg::mubi4_or_hi(prim_mubi_pkg::mubi4_t'(de ? d : q),
+ (we ? prim_mubi_pkg::mubi4_t'(wd) :
+ prim_mubi_pkg::MuBi4False));
+ end else if (DW == 8) begin : gen_mubi8
+ assign wr_data = prim_mubi_pkg::mubi8_or_hi(prim_mubi_pkg::mubi8_t'(de ? d : q),
+ (we ? prim_mubi_pkg::mubi8_t'(wd) :
+ prim_mubi_pkg::MuBi8False));
+ end else if (DW == 12) begin : gen_mubi12
+ assign wr_data = prim_mubi_pkg::mubi12_or_hi(prim_mubi_pkg::mubi12_t'(de ? d : q),
+ (we ? prim_mubi_pkg::mubi12_t'(wd) :
+ prim_mubi_pkg::MuBi12False));
+ end else if (DW == 16) begin : gen_mubi16
+ assign wr_data = prim_mubi_pkg::mubi16_or_hi(prim_mubi_pkg::mubi16_t'(de ? d : q),
+ (we ? prim_mubi_pkg::mubi16_t'(wd) :
+ prim_mubi_pkg::MuBi16False));
+ end else begin : gen_invalid_mubi
+ $error("%m: Invalid width for MuBi");
+ end
+ end else begin : gen_non_mubi
+ assign wr_data = (de ? d : q) | (we ? wd : '0);
+ end
end else if (SwAccess == SwAccessW1C) begin : gen_w1c
// If SwAccess is W1C, then assume hw tries to set.
// So, give a chance HW to set when SW tries to clear.
// If both try to set/clr at the same bit pos, SW wins.
assign wr_en = we | de;
- assign wr_data = (de ? d : q) & (we ? ~wd : '1);
+ if (Mubi) begin : gen_mubi
+ if (DW == 4) begin : gen_mubi4
+ assign wr_data = prim_mubi_pkg::mubi4_and_hi(prim_mubi_pkg::mubi4_t'(de ? d : q),
+ (we ? prim_mubi_pkg::mubi4_t'(~wd) :
+ prim_mubi_pkg::MuBi4True));
+ end else if (DW == 8) begin : gen_mubi8
+ assign wr_data = prim_mubi_pkg::mubi8_and_hi(prim_mubi_pkg::mubi8_t'(de ? d : q),
+ (we ? prim_mubi_pkg::mubi8_t'(~wd) :
+ prim_mubi_pkg::MuBi8True));
+ end else if (DW == 12) begin : gen_mubi12
+ assign wr_data = prim_mubi_pkg::mubi12_and_hi(prim_mubi_pkg::mubi12_t'(de ? d : q),
+ (we ? prim_mubi_pkg::mubi12_t'(~wd) :
+ prim_mubi_pkg::MuBi12True));
+ end else if (DW == 16) begin : gen_mubi16
+ assign wr_data = prim_mubi_pkg::mubi16_and_hi(prim_mubi_pkg::mubi16_t'(de ? d : q),
+ (we ? prim_mubi_pkg::mubi16_t'(~wd) :
+ prim_mubi_pkg::MuBi16True));
+ end else begin : gen_invalid_mubi
+ $error("%m: Invalid width for MuBi");
+ end
+ end else begin : gen_non_mubi
+ assign wr_data = (de ? d : q) & (we ? ~wd : '1);
+ end
end else if (SwAccess == SwAccessW0C) begin : gen_w0c
assign wr_en = we | de;
- assign wr_data = (de ? d : q) & (we ? wd : '1);
+ if (Mubi) begin : gen_mubi
+ if (DW == 4) begin : gen_mubi4
+ assign wr_data = prim_mubi_pkg::mubi4_and_hi(prim_mubi_pkg::mubi4_t'(de ? d : q),
+ (we ? prim_mubi_pkg::mubi4_t'(wd) :
+ prim_mubi_pkg::MuBi4True));
+ end else if (DW == 8) begin : gen_mubi8
+ assign wr_data = prim_mubi_pkg::mubi8_and_hi(prim_mubi_pkg::mubi8_t'(de ? d : q),
+ (we ? prim_mubi_pkg::mubi8_t'(wd) :
+ prim_mubi_pkg::MuBi8True));
+ end else if (DW == 12) begin : gen_mubi12
+ assign wr_data = prim_mubi_pkg::mubi12_and_hi(prim_mubi_pkg::mubi12_t'(de ? d : q),
+ (we ? prim_mubi_pkg::mubi12_t'(wd) :
+ prim_mubi_pkg::MuBi12True));
+ end else if (DW == 16) begin : gen_mubi16
+ assign wr_data = prim_mubi_pkg::mubi16_and_hi(prim_mubi_pkg::mubi16_t'(de ? d : q),
+ (we ? prim_mubi_pkg::mubi16_t'(wd) :
+ prim_mubi_pkg::MuBi16True));
+ end else begin : gen_invalid_mubi
+ $error("%m: Invalid width for MuBi");
+ end
+ end else begin : gen_non_mubi
+ assign wr_data = (de ? d : q) & (we ? wd : '1);
+ end
end else if (SwAccess == SwAccessRC) begin : gen_rc
// This swtype is not recommended but exists for compatibility.
// WARN: we signal is actually read signal not write enable.
assign wr_en = we | de;
- assign wr_data = (de ? d : q) & (we ? '0 : '1);
+ if (Mubi) begin : gen_mubi
+ if (DW == 4) begin : gen_mubi4
+ assign wr_data = prim_mubi_pkg::mubi4_and_hi(prim_mubi_pkg::mubi4_t'(de ? d : q),
+ (we ? prim_mubi_pkg::MuBi4False :
+ prim_mubi_pkg::MuBi4True));
+ end else if (DW == 8) begin : gen_mubi8
+ assign wr_data = prim_mubi_pkg::mubi8_and_hi(prim_mubi_pkg::mubi8_t'(de ? d : q),
+ (we ? prim_mubi_pkg::MuBi8False :
+ prim_mubi_pkg::MuBi8True));
+ end else if (DW == 12) begin : gen_mubi12
+ assign wr_data = prim_mubi_pkg::mubi12_and_hi(prim_mubi_pkg::mubi12_t'(de ? d : q),
+ (we ? prim_mubi_pkg::MuBi12False :
+ prim_mubi_pkg::MuBi12True));
+ end else if (DW == 16) begin : gen_mubi16
+ assign wr_data = prim_mubi_pkg::mubi16_and_hi(prim_mubi_pkg::mubi16_t'(de ? d : q),
+ (we ? prim_mubi_pkg::mubi16_t'(wd) :
+ prim_mubi_pkg::MuBi16True));
+ end else begin : gen_invalid_mubi
+ $error("%m: Invalid width for MuBi");
+ end
+ end else begin : gen_non_mubi
+ assign wr_data = (de ? d : q) & (we ? '0 : '1);
+ end
// Unused wd - Prevent lint errors.
logic [DW-1:0] unused_wd;
+ //VCS coverage off
+ // pragma coverage off
assign unused_wd = wd;
+ //VCS coverage on
+ // pragma coverage on
end else begin : gen_hw
assign wr_en = de;
assign wr_data = d;
@@ -73,9 +175,13 @@ module prim_subreg_arb
logic unused_we;
logic [DW-1:0] unused_wd;
logic [DW-1:0] unused_q;
+ //VCS coverage off
+ // pragma coverage off
assign unused_we = we;
assign unused_wd = wd;
assign unused_q = q;
+ //VCS coverage on
+ // pragma coverage on
end
endmodule
diff --git a/vendor/lowrisc_ibex/vendor/lowrisc_ip/ip/prim/rtl/prim_subreg_shadow.sv b/vendor/lowrisc_ibex/vendor/lowrisc_ip/ip/prim/rtl/prim_subreg_shadow.sv
index 5cdbd6f8..e51e3b22 100644
--- a/vendor/lowrisc_ibex/vendor/lowrisc_ip/ip/prim/rtl/prim_subreg_shadow.sv
+++ b/vendor/lowrisc_ibex/vendor/lowrisc_ip/ip/prim/rtl/prim_subreg_shadow.sv
@@ -11,7 +11,8 @@ module prim_subreg_shadow
#(
parameter int DW = 32,
parameter sw_access_e SwAccess = SwAccessRW,
- parameter logic [DW-1:0] RESVAL = '0 // reset value
+ parameter logic [DW-1:0] RESVAL = '0, // reset value
+ parameter bit Mubi = 1'b0
) (
input clk_i,
input rst_ni,
@@ -187,4 +188,7 @@ module prim_subreg_shadow
assign q = committed_q;
assign qs = committed_qs;
+ // prim_subreg_shadow does not support multi-bit software access yet
+ `ASSERT_NEVER(MubiIsNotYetSupported_A, Mubi)
+
endmodule
diff --git a/vendor/lowrisc_ibex/vendor/lowrisc_ip/ip/prim_generic/lint/prim_generic_clock_div.waiver b/vendor/lowrisc_ibex/vendor/lowrisc_ip/ip/prim_generic/lint/prim_generic_clock_div.waiver
new file mode 100644
index 00000000..ada8d2b0
--- /dev/null
+++ b/vendor/lowrisc_ibex/vendor/lowrisc_ip/ip/prim_generic/lint/prim_generic_clock_div.waiver
@@ -0,0 +1,20 @@
+# Copyright lowRISC contributors.
+# Licensed under the Apache License, Version 2.0, see LICENSE for details.
+# SPDX-License-Identifier: Apache-2.0
+#
+# waiver file for prim_generic_clock_div
+
+waive -rules CLOCK_EDGE -location {prim_generic_clock_div.sv} -msg {Falling edge of clock 'clk_i' used here, should use rising edge} \
+ -comment "The clock switch signal is synchronized on negative edge to ensure it is away from any transition"
+
+waive -rules DUAL_EDGE_CLOCK -location {prim_generic_clock_div.sv} -regexp {.*} \
+ -comment "The clock switch signal is synchronized on negative edge to ensure it is away from any transition"
+
+waive -rules CLOCK_MUX -location {prim_generic_clock_div.sv} -regexp {.*reaches a multiplexer here, used as a clock.*} \
+ -comment "A mux is used during scan bypass, and for switching between div by 2 and div by 1 clocks"
+
+waive -rules CLOCK_USE -location {prim_generic_clock_div.sv} -regexp {'clk_i' is connected to 'prim_clock_mux2' port 'clk1_i', and used as a clock} \
+ -comment "This clock mux usage is OK."
+
+waive -rules SAME_NAME_TYPE -location {prim_generic_clock_div.sv} -regexp {'ResetValue' is used as a parameter here, and as an enumeration value at} \
+ -comment "Reused parameter name."
diff --git a/vendor/lowrisc_ibex/vendor/lowrisc_ip/ip/prim_generic/prim_generic_clock_div.core b/vendor/lowrisc_ibex/vendor/lowrisc_ip/ip/prim_generic/prim_generic_clock_div.core
new file mode 100644
index 00000000..c5092645
--- /dev/null
+++ b/vendor/lowrisc_ibex/vendor/lowrisc_ip/ip/prim_generic/prim_generic_clock_div.core
@@ -0,0 +1,31 @@
+CAPI=2:
+# Copyright lowRISC contributors.
+# Licensed under the Apache License, Version 2.0, see LICENSE for details.
+# SPDX-License-Identifier: Apache-2.0
+
+name: "lowrisc:prim_generic:clock_div"
+description: "Generic clock divide"
+filesets:
+ files_rtl:
+ depend:
+ - lowrisc:prim:prim_pkg
+ - lowrisc:prim:flop
+ - lowrisc:prim:clock_inv
+ - lowrisc:prim:clock_buf
+ files:
+ - rtl/prim_generic_clock_div.sv
+ file_type: systemVerilogSource
+
+ files_ascentlint_waiver:
+ depend:
+ # common waivers
+ - lowrisc:lint:common
+ files:
+ - lint/prim_generic_clock_div.waiver
+ file_type: waiver
+
+targets:
+ default:
+ filesets:
+ - tool_ascentlint ? (files_ascentlint_waiver)
+ - files_rtl
diff --git a/vendor/lowrisc_ibex/vendor/lowrisc_ip/ip/prim/rtl/prim_clock_div.sv b/vendor/lowrisc_ibex/vendor/lowrisc_ip/ip/prim_generic/rtl/prim_generic_clock_div.sv
similarity index 98%
rename from vendor/lowrisc_ibex/vendor/lowrisc_ip/ip/prim/rtl/prim_clock_div.sv
rename to vendor/lowrisc_ibex/vendor/lowrisc_ip/ip/prim_generic/rtl/prim_generic_clock_div.sv
index 0c3418b6..da326623 100644
--- a/vendor/lowrisc_ibex/vendor/lowrisc_ip/ip/prim/rtl/prim_clock_div.sv
+++ b/vendor/lowrisc_ibex/vendor/lowrisc_ip/ip/prim_generic/rtl/prim_generic_clock_div.sv
@@ -4,7 +4,7 @@
`include "prim_assert.sv"
-module prim_clock_div #(
+module prim_generic_clock_div #(
parameter int unsigned Divisor = 2,
parameter logic ResetValue = 0
) (
diff --git a/vendor/lowrisc_ibex/vendor/lowrisc_ip/lint/README.md b/vendor/lowrisc_ibex/vendor/lowrisc_ip/lint/README.md
index 8ed45a09..55c57784 100644
--- a/vendor/lowrisc_ibex/vendor/lowrisc_ip/lint/README.md
+++ b/vendor/lowrisc_ibex/vendor/lowrisc_ip/lint/README.md
@@ -13,7 +13,7 @@ The lint flow run scripts and waiver files are available in the GitHub repositor
However, the _"lowRISC Lint Rules"_ are available as part of the default policies in AscentLint release 2019.A.p3 or newer (as `LRLR-v1.0.policy`).
This enables designers that have access to this tool to run the lint flow provided locally on their premises.
-Our linting flow leverages FuseSoC to resolve dependencies, build file lists and call the linting tools. See [here](https://github.com/olofk/fusesoc) for an introduction to this open source package manager and [here](https://opentitan.org/guides/getting_started) for installation instructions.
+Our linting flow leverages FuseSoC to resolve dependencies, build file lists and call the linting tools. See [here](https://github.com/olofk/fusesoc) for an introduction to this open source package manager and [here](../../doc/getting_started/README.md) for installation instructions.
In order to run lint on a [comportable IP](../../doc/contributing/hw/comportability/README.md) block, the corresponding FuseSoC core file must have a lint target and include (optional) waiver files as shown in the following example taken from the FuseSoC core of the AES comportable IP:
```
diff --git a/vendor/lowrisc_ibex/vendor/lowrisc_ip/util/dvsim/Deploy.py b/vendor/lowrisc_ibex/vendor/lowrisc_ip/util/dvsim/Deploy.py
index c713bd53..78cf8118 100644
--- a/vendor/lowrisc_ibex/vendor/lowrisc_ip/util/dvsim/Deploy.py
+++ b/vendor/lowrisc_ibex/vendor/lowrisc_ip/util/dvsim/Deploy.py
@@ -567,7 +567,7 @@ def get_seed():
if RunTest.fixed_seed:
return RunTest.fixed_seed
for i in range(1000):
- seed = random.getrandbits(32)
+ seed = random.getrandbits(256)
RunTest.seeds.append(seed)
return RunTest.seeds.pop(0)
diff --git a/vendor/lowrisc_ibex/vendor/lowrisc_ip/util/dvsim/FlowCfg.py b/vendor/lowrisc_ibex/vendor/lowrisc_ip/util/dvsim/FlowCfg.py
index 2bfae854..8cebeec9 100644
--- a/vendor/lowrisc_ibex/vendor/lowrisc_ip/util/dvsim/FlowCfg.py
+++ b/vendor/lowrisc_ibex/vendor/lowrisc_ip/util/dvsim/FlowCfg.py
@@ -2,18 +2,17 @@
# Licensed under the Apache License, Version 2.0, see LICENSE for details.
# SPDX-License-Identifier: Apache-2.0
-import datetime
import json
import logging as log
import os
import pprint
-import re
import subprocess
import sys
from pathlib import Path
from shutil import which
import hjson
+from results_server import NoGCPError, ResultsServer
from CfgJson import set_target_attribute
from LauncherFactory import get_launcher_cls
from Scheduler import Scheduler
@@ -147,11 +146,8 @@ def __init__(self, flow_cfg_file, hjson_data, args, mk_config):
self.rel_path / "latest")
self.results_page = (self.results_dir / self.results_html_name)
- tmp_path = self.results_server + "/" + self.rel_path
- self.results_server_path = self.results_server_prefix + tmp_path
- tmp_path += "/latest"
- self.results_server_dir = self.results_server_prefix + tmp_path
- tmp_path += "/" + self.results_html_name
+ tmp_path = (self.results_server + "/" + self.rel_path +
+ "/latest/" + self.results_html_name)
self.results_server_page = self.results_server_prefix + tmp_path
self.results_server_url = "https://" + tmp_path
@@ -470,182 +466,130 @@ def _get_results_page_link(self, relative_to, link_text=''):
relative_to)
return "[%s](%s)" % (link_text, relative_link)
- def _publish_results(self):
+ def _publish_results(self, results_server: ResultsServer):
'''Publish results to the opentitan web server.
- Results are uploaded to {results_server_page}.
- If the 'latest' directory exists, then it is renamed to its 'timestamp'
- directory. If the list of directories in this area is > 14, then the
- oldest entry is removed. Links to the last 7 regression results are
- appended at the end if the results page.
+ Results are uploaded to {results_server_page}. If the 'latest'
+ directory exists, then it is renamed to its 'timestamp' directory.
+ Links to the last 7 regression results are appended at the end if the
+ results page.
'''
- if which('gsutil') is None or which('gcloud') is None:
- log.error("Google cloud SDK not installed! Cannot access the "
- "results server")
- return
-
# Timeformat for moving the dir
tf = "%Y.%m.%d_%H.%M.%S"
- # Extract the timestamp of the existing self.results_server_page
- cmd = (self.results_server_cmd + " ls -L " +
- self.results_server_page + " | grep \'Creation time:\'")
-
- log.log(VERBOSE, cmd)
- cmd_output = subprocess.run(cmd,
- shell=True,
- stdout=subprocess.PIPE,
- stderr=subprocess.DEVNULL)
- log.log(VERBOSE, cmd_output.stdout.decode("utf-8"))
- old_results_ts = cmd_output.stdout.decode("utf-8")
- old_results_ts = old_results_ts.replace("Creation time:", "")
- old_results_ts = old_results_ts.strip()
-
- # Move the 'latest' to its timestamp directory if lookup succeeded
- if cmd_output.returncode == 0:
- try:
- if old_results_ts != "":
- ts = datetime.datetime.strptime(
- old_results_ts, "%a, %d %b %Y %H:%M:%S %Z")
- old_results_ts = ts.strftime(tf)
- except ValueError as e:
- log.error(
- "%s: \'%s\' Timestamp conversion value error raised!", e)
- old_results_ts = ""
-
- # If the timestamp conversion failed - then create a dummy one with
- # yesterday's date.
- if old_results_ts == "":
- log.log(VERBOSE,
- "Creating dummy timestamp with yesterday's date")
- ts = datetime.datetime.now(
- datetime.timezone.utc) - datetime.timedelta(days=1)
- old_results_ts = ts.strftime(tf)
-
- old_results_dir = self.results_server_path + "/" + old_results_ts
- cmd = (self.results_server_cmd + " mv " + self.results_server_dir +
- " " + old_results_dir)
- log.log(VERBOSE, cmd)
- cmd_output = subprocess.run(cmd,
- shell=True,
- stdout=subprocess.PIPE,
- stderr=subprocess.DEVNULL)
- log.log(VERBOSE, cmd_output.stdout.decode("utf-8"))
- if cmd_output.returncode != 0:
- log.error("Failed to mv old results page \"%s\" to \"%s\"!",
- self.results_server_dir, old_results_dir)
+ # Maximum number of links to add to previous results pages at the
+ # bottom of the page that we're generating.
+ max_old_page_links = 7
+
+ # We're going to try to put things in a directory called "latest". But
+ # there's probably something with that name already. If so, we want to
+ # move the thing that's there already to be at a path based on its
+ # creation time.
+
+ # Try to get the creation time of any existing "latest/report.html"
+ latest_dir = '{}/latest'.format(self.rel_path)
+ latest_report_path = '{}/report.html'.format(latest_dir)
+ old_results_time = results_server.get_creation_time(latest_report_path)
+
+ if old_results_time is not None:
+ # If there is indeed a creation time, we will need to move the
+ # "latest" directory to a path based on that time.
+ old_results_ts = old_results_time.strftime(tf)
+ backup_dir = '{}/{}'.format(self.rel_path, old_results_ts)
+
+ results_server.mv(latest_dir, backup_dir)
+
+ # Do an ls in the results root dir to check what directories exist. If
+ # something goes wrong then continue, behaving as if there were none.
+ try:
+ existing_paths = results_server.ls(self.rel_path)
+ except subprocess.CalledProcessError:
+ log.error('Failed to list {} with gsutil. '
+ 'Acting as if there was nothing.'
+ .format(self.rel_path))
+ existing_paths = []
# Do an ls in the results root dir to check what directories exist.
- results_dirs = []
- cmd = self.results_server_cmd + " ls " + self.results_server_path
- log.log(VERBOSE, cmd)
- cmd_output = subprocess.run(args=cmd,
- shell=True,
- stdout=subprocess.PIPE,
- stderr=subprocess.DEVNULL)
- log.log(VERBOSE, cmd_output.stdout.decode("utf-8"))
- if cmd_output.returncode == 0:
- # Some directories exist. Check if 'latest' is one of them
- results_dirs = cmd_output.stdout.decode("utf-8").strip()
- results_dirs = results_dirs.split("\n")
- else:
- log.log(VERBOSE, "Failed to run \"%s\"!", cmd)
-
- # Start pruning
- log.log(VERBOSE, "Pruning %s area to limit last 7 results",
- self.results_server_path)
-
- rdirs = []
- for rdir in results_dirs:
- dirname = rdir.replace(self.results_server_path, '')
- dirname = dirname.replace('/', '')
- # Only track history directories with format
- # "year.month.date_hour.min.sec".
- if not bool(re.match(r"[\d*.]*_[\d*.]*", dirname)):
- continue
- rdirs.append(dirname)
- rdirs.sort(reverse=True)
-
- rm_cmd = ""
+ existing_basenames = []
+ for existing_path in existing_paths:
+ # Here, existing_path will start with "gs://" and should end in a
+ # time or with "latest" and then a trailing '/'. Split it to find
+ # that the directory basename. The rsplit() here will result in
+ # ["some_path", "basename_we_want", ""]. Grab the middle.
+ existing_parts = existing_path.rsplit('/', 2)
+ existing_basenames.append(existing_parts[1])
+
+ # We want to add pointers to existing directories with recent
+ # timestamps. Sort in reverse (time and lexicographic!) order, then
+ # take the top few results.
+ existing_basenames.sort(reverse=True)
+
history_txt = "\n## Past Results\n"
history_txt += "- [Latest](../latest/" + self.results_html_name + ")\n"
- if len(rdirs) > 0:
- for i in range(len(rdirs)):
- if i < 7:
- rdir_url = '../' + rdirs[i] + "/" + self.results_html_name
- history_txt += "- [{}]({})\n".format(rdirs[i], rdir_url)
- elif i > 14:
- rm_cmd += self.results_server_path + '/' + rdirs[i] + " "
-
- if rm_cmd != "":
- rm_cmd = self.results_server_cmd + " -m rm -r " + rm_cmd + "; "
+ for existing_basename in existing_basenames[:max_old_page_links]:
+ relative_url = '../{}/{}'.format(existing_basename,
+ self.results_html_name)
+ history_txt += '- [{}]({})\n'.format(existing_basename,
+ relative_url)
# Append the history to the results.
publish_results_md = self.publish_results_md or self.results_md
publish_results_md = publish_results_md + history_txt
- # Publish the results page.
- # First, write the results html and json files to the scratch area.
- json_str = (json.dumps(self.results_dict)
- if hasattr(self, 'results_dict')
- else None)
+ # Export any results dictionary to json
+ suffixes = ['html']
+ json_str = None
+ if hasattr(self, 'results_dict'):
+ suffixes.append('json')
+ json_str = json.dumps(self.results_dict)
+
+ # Export our markdown page to HTML and dump the json to a local file.
+ # These are called publish.html and publish.json locally, but we'll
+ # rename them as part of the upload.
self.write_results("publish.html", publish_results_md, json_str)
- results_html_file = self.results_dir / "publish.html"
- # Second, copy the files to the server.
+ html_name_no_suffix = self.results_html_name.split('.', 1)[0]
+ dst_no_suffix = '{}/latest/{}'.format(self.rel_path,
+ html_name_no_suffix)
+
+ # Now copy our local files over to the server
log.info("Publishing results to %s", self.results_server_url)
- suffixes = ['html'] + (['json'] if json_str is not None else [])
for suffix in suffixes:
- src = str(Path(results_html_file).with_suffix('.' + suffix))
- dst = self.results_server_page
- # results_server_page has '.html' as suffix. If that does not match
- # suffix, change it.
- if suffix != 'html':
- assert dst[-5:] == '.html'
- dst = dst[:-5] + '.json'
- cmd = f"{self.results_server_cmd} cp {src} {dst}"
- log.log(VERBOSE, cmd)
- try:
- cmd_output = subprocess.run(args=cmd,
- shell=True,
- stdout=subprocess.PIPE,
- stderr=subprocess.STDOUT)
- log.log(VERBOSE, cmd_output.stdout.decode("utf-8"))
- except Exception as e:
- log.error("%s: Failed to publish results:\n\"%s\"", e, str(cmd))
+ src = "{}/publish.{}".format(self.results_dir, suffix)
+ dst = "{}.{}".format(dst_no_suffix, suffix)
+ results_server.upload(src, dst)
def publish_results(self):
- '''Public facing API for publishing results to the opentitan web
- server.
- '''
+ """Publish these results to the opentitan web server."""
+ try:
+ server_handle = ResultsServer(self.results_server)
+ except NoGCPError:
+ # We failed to create a results server object at all, so we're not going to be able
+ # to publish any results right now.
+ log.error("Google Cloud SDK not installed. Cannot access the "
+ "results server")
+ return
+
for item in self.cfgs:
- item._publish_results()
+ item._publish_results(server_handle)
if self.is_primary_cfg:
- self.publish_results_summary()
+ self.publish_results_summary(server_handle)
# Trigger a rebuild of the site/docs which may pull new data from
# the published results.
self.rebuild_site()
- def publish_results_summary(self):
+ def publish_results_summary(self, results_server: ResultsServer):
'''Public facing API for publishing md format results to the opentitan
web server.
'''
# Publish the results page.
log.info("Publishing results summary to %s", self.results_server_url)
- cmd = (self.results_server_cmd + " cp " +
- str(self.results_page) + " " +
- self.results_server_page)
- log.log(VERBOSE, cmd)
- try:
- cmd_output = subprocess.run(args=cmd,
- shell=True,
- stdout=subprocess.PIPE,
- stderr=subprocess.STDOUT)
- log.log(VERBOSE, cmd_output.stdout.decode("utf-8"))
- except Exception as e:
- log.error("%s: Failed to publish results:\n\"%s\"", e, str(cmd))
+
+ latest_dir = '{}/latest'.format(self.rel_path)
+ latest_report_path = '{}/report.html'.format(latest_dir)
+ results_server.upload(self.results_page, latest_report_path)
def rebuild_site(self):
'''Trigger a rebuild of the opentitan.org site using a Cloud Build trigger.
@@ -657,7 +601,7 @@ def rebuild_site(self):
triggered through an appropriately-authenticated Google Cloud SDK command. This
function calls that command.
'''
- if which('gsutil') is None or which('gcloud') is None:
+ if which('gcloud') is None:
log.error("Google Cloud SDK not installed!"
"Cannot access the Cloud Build API to trigger a site rebuild.")
return
diff --git a/vendor/lowrisc_ibex/vendor/lowrisc_ip/util/dvsim/README.md b/vendor/lowrisc_ibex/vendor/lowrisc_ip/util/dvsim/README.md
index 8c391b18..357d5272 100644
--- a/vendor/lowrisc_ibex/vendor/lowrisc_ip/util/dvsim/README.md
+++ b/vendor/lowrisc_ibex/vendor/lowrisc_ip/util/dvsim/README.md
@@ -18,7 +18,7 @@ The following flows are currently supported:
# Installation
-Clone the OpenTitan repository by following the [Getting Started](https://opentitan.org/guides/getting_started) steps.
+Clone the OpenTitan repository by following the [Getting Started](../../doc/getting_started/README.md) steps.
The rest of the documentation will assume `$REPO_TOP` as the root of the local OpenTitan repository.
DVSim is located at `$REPO_TOP/util/dvsim/dvsim.py`.
diff --git a/vendor/lowrisc_ibex/vendor/lowrisc_ip/util/dvsim/SimCfg.py b/vendor/lowrisc_ibex/vendor/lowrisc_ip/util/dvsim/SimCfg.py
index 7d2658c4..31bd3a3c 100644
--- a/vendor/lowrisc_ibex/vendor/lowrisc_ip/util/dvsim/SimCfg.py
+++ b/vendor/lowrisc_ibex/vendor/lowrisc_ip/util/dvsim/SimCfg.py
@@ -12,7 +12,6 @@
import logging as log
import os
import re
-import subprocess
import sys
from collections import OrderedDict
from pathlib import Path
@@ -21,10 +20,11 @@
from Deploy import CompileSim, CovAnalyze, CovMerge, CovReport, CovUnr, RunTest
from FlowCfg import FlowCfg
from Modes import BuildModes, Modes, Regressions, RunModes, Tests
+from results_server import ResultsServer
from SimResults import SimResults
from tabulate import tabulate
from Testplan import Testplan
-from utils import TS_FORMAT, VERBOSE, rm_path
+from utils import TS_FORMAT, rm_path
# This affects the bucketizer failure report.
_MAX_UNIQUE_TESTS = 5
@@ -597,6 +597,9 @@ def _test_result_to_dict(tr) -> dict:
results['report_type'] = 'simulation'
results['tool'] = self.tool.lower()
+ if self.build_seed and not self.run_only:
+ results['build_seed'] = str(self.build_seed)
+
# Create dictionary to store results.
results['results'] = {
'testpoints': [],
@@ -683,7 +686,7 @@ def _test_result_to_dict(tr) -> dict:
frs = []
for test, line, context in test_runs:
frs.append({
- 'seed': test.seed,
+ 'seed': str(test.seed),
'failure_message': {
'log_file_path': test.get_log_path(),
'log_file_line_num': line,
@@ -790,12 +793,34 @@ def create_bucket_report(buckets):
# Add path to testplan, only if it has entries (i.e., its not dummy).
if self.testplan.testpoints:
if hasattr(self, "testplan_doc_path"):
- testplan = "https://{}/{}".format(self.doc_server,
- self.testplan_doc_path)
+ # The key 'testplan_doc_path' can override the path to the testplan file
+ # if it's not in the default location relative to the sim_cfg.
+ relative_path_to_testplan = (Path(self.testplan_doc_path)
+ .relative_to(Path(self.proj_root)))
+ testplan = "https://{}/{}".format(
+ self.book,
+ str(relative_path_to_testplan).replace("hjson", "html")
+ )
else:
- testplan = "https://{}/{}".format(self.doc_server,
- self.rel_path)
- testplan = testplan.replace("/dv", "/doc/dv/#testplan")
+ # Default filesystem layout for an ip block
+ # ├── data
+ # │ ├── gpio_testplan.hjson
+ # │ └── <...>
+ # ├── doc
+ # │ ├── checklist.md
+ # │ ├── programmers_guide.md
+ # │ ├── theory_of_operation.md
+ # │ └── <...>
+ # ├── dv
+ # │ ├── gpio_sim_cfg.hjson
+ # │ └── <...>
+
+ # self.rel_path gives us the path to the directory
+ # containing the sim_cfg file...
+ testplan = "https://{}/{}".format(
+ self.book,
+ Path(self.rel_path).parent / 'data' / f"{self.name}_testplan.html"
+ )
results_str += f"### [Testplan]({testplan})\n"
@@ -898,24 +923,16 @@ def gen_results_summary(self):
print(self.results_summary_md)
return self.results_summary_md
- def _publish_results(self):
+ def _publish_results(self, results_server: ResultsServer):
'''Publish coverage results to the opentitan web server.'''
- super()._publish_results()
+ super()._publish_results(results_server)
if self.cov_report_deploy is not None:
- results_server_dir_url = self.results_server_dir.replace(
- self.results_server_prefix, "https://")
-
- log.info("Publishing coverage results to %s",
- results_server_dir_url)
- cmd = (self.results_server_cmd + " -m cp -R " +
- self.cov_report_dir + " " + self.results_server_dir)
- try:
- cmd_output = subprocess.run(args=cmd,
- shell=True,
- stdout=subprocess.PIPE,
- stderr=subprocess.STDOUT)
- log.log(VERBOSE, cmd_output.stdout.decode("utf-8"))
- except Exception as e:
- log.error("%s: Failed to publish results:\n\"%s\"", e,
- str(cmd))
+ log.info("Publishing coverage results to https://{}/{}/latest"
+ .format(self.results_server,
+ self.rel_path))
+
+ latest_dir = '{}/latest'.format(self.rel_path)
+ results_server.upload(self.cov_report_dir,
+ latest_dir,
+ recursive=True)
diff --git a/vendor/lowrisc_ibex/vendor/lowrisc_ip/util/dvsim/Testplan.py b/vendor/lowrisc_ibex/vendor/lowrisc_ip/util/dvsim/Testplan.py
index 5473e621..c69f68c5 100644
--- a/vendor/lowrisc_ibex/vendor/lowrisc_ip/util/dvsim/Testplan.py
+++ b/vendor/lowrisc_ibex/vendor/lowrisc_ip/util/dvsim/Testplan.py
@@ -7,6 +7,7 @@
import os
import re
import sys
+from typing import TextIO
from collections import defaultdict
from pathlib import Path
@@ -481,73 +482,32 @@ def get_stage_regressions(self):
"tests": list(regressions[ms])
} for ms in regressions]
- def get_testplan_table(self, fmt="pipe"):
- """Generate testplan table from hjson testplan.
+ def write_testplan_doc(self, output: TextIO) -> None:
+ """Write testplan documentation in markdown from the hjson testplan."""
- fmt is either 'pipe' (markdown) or 'html'. 'pipe' is the name used by
- tabulate to generate a markdown formatted table.
- """
- assert fmt in ["pipe", "html"]
-
- # Map between the requested format and a pair (tabfmt, formatter) where
- # tabfmt is the "tablefmt" argument for tabulate.tabulate and formatter
- # converts the input Markdown text to something we can pass to the
- # formatter.
- fmt_configs = {
- # For Markdown output, we pass the input text straight through
- 'pipe': ('pipe', lambda x: x),
- # For HTML output, we convert the Markdown to HTML using the
- # mistletoe library. The tablefmt argument should be 'unsafehtml'
- # in this case because this already escapes things like '<' and
- # don't want to double-escape them when tabulating.
- 'html': ('unsafehtml', mistletoe.markdown)
- }
- tabfmt, formatter = fmt_configs[fmt]
-
- if self.testpoints:
- lines = [formatter("\n### Testpoints\n")]
- header = ["Stage", "Name", "Tests", "Description"]
- colalign = ("center", "center", "left", "left")
- table = []
- for tp in self.testpoints:
- desc = formatter(tp.desc.strip())
-
- # tests is a list of strings. We want to insert them into a
- # table and (conveniently) we can put one on each line in both
- # Markdown and HTML mode by interspersing with ' ' tags.
- tests = " \n".join(tp.tests)
-
- table.append([tp.stage, tp.name, tests, desc])
- lines += [
- tabulate(table,
- headers=header,
- tablefmt=tabfmt,
- colalign=colalign)
- ]
+ stages = {}
+ for tp in self.testpoints:
+ stages.setdefault(tp.stage, list()).append(tp)
+
+ output.write("# Testplan\n\n## Testpoints\n\n")
+ for (stage, testpoints) in stages.items():
+ output.write(f"### Stage {stage} Testpoints\n\n")
+ for tp in testpoints:
+ output.write(f"#### `{tp.name}`\n\n")
+ if len(tp.tests) == 0:
+ output.write("No Tests Implemented")
+ elif len(tp.tests) == 1:
+ output.write(f"Test: `{tp.tests[0]}`")
+ else:
+ output.write("Tests:\n")
+ output.writelines([f"- `{test}`\n" for test in tp.tests])
+
+ output.write("\n\n" + tp.desc.strip() + "\n\n")
if self.covergroups:
- lines += [formatter("\n### Covergroups\n")]
- header = ["Name", "Description"]
- colalign = ("center", "left")
- table = []
+ output.write("## Covergroups\n\n")
for covergroup in self.covergroups:
- desc = formatter(covergroup.desc.strip())
- table.append([covergroup.name, desc])
- lines += [
- tabulate(table,
- headers=header,
- tablefmt=tabfmt,
- colalign=colalign)
- ]
-
- text = "\n".join(lines)
- if fmt == "html":
- text = self.get_dv_style_css() + text
- text = text.replace("
", "
")
-
- # Tabulate does not support HTML tags.
- text = text.replace("<", "<").replace(">", ">")
- return text
+ output.write(f"### {covergroup.name}\n\n{covergroup.desc.strip()}\n\n")
def map_test_results(self, test_results):
"""Map test results to testpoints."""
diff --git a/vendor/lowrisc_ibex/vendor/lowrisc_ip/util/dvsim/dvsim.py b/vendor/lowrisc_ibex/vendor/lowrisc_ip/util/dvsim/dvsim.py
index e5ffbaa5..dfab5334 100755
--- a/vendor/lowrisc_ibex/vendor/lowrisc_ip/util/dvsim/dvsim.py
+++ b/vendor/lowrisc_ibex/vendor/lowrisc_ip/util/dvsim/dvsim.py
@@ -518,11 +518,11 @@ def parse_args():
seedg.add_argument("--build-seed",
nargs="?",
type=int,
- const=random.getrandbits(32),
+ const=random.getrandbits(256),
metavar="S",
help=('Randomize the build. Uses the seed value passed '
'an additional argument, else it randomly picks '
- 'a 32-bit unsigned integer.'))
+ 'a 256-bit unsigned integer.'))
seedg.add_argument("--seeds",
"-s",
diff --git a/vendor/lowrisc_ibex/vendor/lowrisc_ip/util/dvsim/results_server.py b/vendor/lowrisc_ibex/vendor/lowrisc_ip/util/dvsim/results_server.py
new file mode 100644
index 00000000..00aa6486
--- /dev/null
+++ b/vendor/lowrisc_ibex/vendor/lowrisc_ip/util/dvsim/results_server.py
@@ -0,0 +1,129 @@
+# Copyright lowRISC contributors.
+# Licensed under the Apache License, Version 2.0, see LICENSE for details.
+# SPDX-License-Identifier: Apache-2.0
+
+"""
+Code for a wrapper class which represents the "results server".
+
+This is hosted with Google cloud.
+"""
+
+import datetime
+import logging as log
+import subprocess
+from shutil import which
+from typing import List, Optional
+
+
+class NoGCPError(Exception):
+ """Exception to represent "GCP tools are not installed"."""
+
+ pass
+
+
+class ResultsServer:
+ """A class representing connections to GCP (the results server)."""
+
+ def __init__(self, bucket_name: str):
+ """Construct results server; check gsutil is accessible."""
+ self.bucket_name = bucket_name
+
+ # A lazy "half check", which tries to check the GCP tools are available
+ # on this machine. We could move this check to later (in the methods
+ # that actually try to communicate with the server), at which point we
+ # could also do permissions checks. But then it's a bit more fiddly to
+ # work out what to do when something fails.
+ if which('gsutil') is None or which('gcloud') is None:
+ raise NoGCPError()
+
+ def _path_in_bucket(self, path: str) -> str:
+ """Return path in a format that gsutil understands in our bucket."""
+ return "gs://{}/{}".format(self.bucket_name, path)
+
+ def ls(self, path: str) -> List[str]:
+ """Find all the files at the given path on the results server.
+
+ This uses "gsutil ls". If gsutil fails, raise a
+ subprocess.CalledProcessError.
+ """
+ process = subprocess.run(['gsutil', 'ls', self._path_in_bucket(path)],
+ capture_output=True,
+ universal_newlines=True,
+ check=True)
+ # Get the list of files by splitting into lines, then dropping the
+ # empty line at the end.
+ return process.stdout.split('\n')[:-1]
+
+ def get_creation_time(self, path: str) -> Optional[datetime.datetime]:
+ """Get the creation time at path as a datetime.
+
+ If the file does not exist (or we can't see the creation time for some
+ reason), returns None.
+ """
+ bucket_pfx = 'gs://' + self.bucket_name
+ try:
+ process = subprocess.run(['gsutil', 'ls', '-l', bucket_pfx + '/' + path],
+ capture_output=True,
+ universal_newlines=True,
+ check=True)
+ except subprocess.CalledProcessError:
+ log.error("Failed to run ls -l over GCP on {}".format(path))
+ return None
+
+ # With gsutil, ls -l on a file prints out something like the following:
+ #
+ # 35079 2023-07-27T13:26:04Z gs://rjs-ot-scratch/path/to/my.file
+ #
+ # Grab the second word on the first (only) line and parse it into a
+ # datetime object. Recent versions of Python (3.11+) parse this format
+ # with fromisoformat but we can't do that with the minimum version we
+ # support.
+ timestamp = process.stdout.split()[1]
+ try:
+ return datetime.datetime.strptime(timestamp, '%Y-%m-%dT%H:%M:%S%z')
+ except ValueError:
+ log.error("Could not parse creation time ({}) from GCP"
+ .format(timestamp))
+ return None
+
+ def mv(self, from_path: str, to_path: str) -> None:
+ """Use gsutil mv to move a file/directory."""
+ try:
+ subprocess.run(['gsutil', 'mv',
+ self._path_in_bucket(from_path),
+ self._path_in_bucket(to_path)],
+ check=True)
+ except subprocess.CalledProcessError:
+ # If we failed to move the file, print an error message but also
+ # fail with an error: we might not want anything downstream to keep
+ # going if it assumes some precious object has been moved to a
+ # place of safety!
+ log.error('Failed to use gsutil to move {} to {}'
+ .format(from_path, to_path))
+ raise
+
+ def upload(self,
+ local_path: str,
+ dst_path: str,
+ recursive: bool = False) -> None:
+ """Upload a file to GCP.
+
+ Like the "cp" command, dst_path can either be the target directory or
+ it can be the name of the file/directory that you're creating inside.
+
+ On failure, prints a message to the log but returns as normal.
+ """
+ try:
+ sub_cmd = ['cp']
+ if recursive:
+ sub_cmd.append('-r')
+ subprocess.run(['gsutil'] + sub_cmd +
+ [local_path,
+ self._path_in_bucket(dst_path)],
+ check=True)
+ except subprocess.CalledProcessError:
+ # If we failed to copy the file, print an error message but
+ # otherwise keep going. We don't want our failed upload to kill the
+ # rest of the job.
+ log.error('Failed to use gsutil to copy {} to {}'
+ .format(local_path, dst_path))
diff --git a/vendor/lowrisc_ibex/vendor/lowrisc_ip/util/dvsim/testplanner.py b/vendor/lowrisc_ibex/vendor/lowrisc_ip/util/dvsim/testplanner.py
index ecfc9fed..707671a9 100755
--- a/vendor/lowrisc_ibex/vendor/lowrisc_ip/util/dvsim/testplanner.py
+++ b/vendor/lowrisc_ibex/vendor/lowrisc_ip/util/dvsim/testplanner.py
@@ -27,7 +27,7 @@ def main():
'-o',
type=argparse.FileType('w'),
default=sys.stdout,
- help='output HTML file (without CSS)')
+ help='output markdown file')
args = parser.parse_args()
outfile = args.outfile
@@ -36,9 +36,8 @@ def main():
if args.sim_results:
outfile.write(
testplan.get_sim_results(args.sim_results, fmt="html"))
-
else:
- outfile.write(testplan.get_testplan_table("html"))
+ testplan.write_testplan_doc(outfile)
outfile.write('\n')
diff --git a/vendor/lowrisc_ibex/vendor/patches/google_riscv-dv/0002-pin-bitstring.patch b/vendor/lowrisc_ibex/vendor/patches/google_riscv-dv/0002-pin-bitstring.patch
new file mode 100644
index 00000000..bc844d85
--- /dev/null
+++ b/vendor/lowrisc_ibex/vendor/patches/google_riscv-dv/0002-pin-bitstring.patch
@@ -0,0 +1,10 @@
+--- a/requirements.txt
++++ b/requirements.txt
+@@ -1,5 +1,5 @@
+ PyYAML
+-bitstring
++bitstring==3.1.9
+ Sphinx
+ Pallets-Sphinx-Themes
+ sphinxcontrib-log-cabinet
+
diff --git a/vendor/lowrisc_ibex/vendor/riscv-isa-sim/.github/workflows/apt-packages.txt b/vendor/lowrisc_ibex/vendor/riscv-isa-sim/.github/workflows/apt-packages.txt
deleted file mode 100644
index e153391f..00000000
--- a/vendor/lowrisc_ibex/vendor/riscv-isa-sim/.github/workflows/apt-packages.txt
+++ /dev/null
@@ -1,2 +0,0 @@
-build-essential
-device-tree-compiler
diff --git a/vendor/lowrisc_ibex/vendor/riscv-isa-sim/.github/workflows/continuous-integration.yml b/vendor/lowrisc_ibex/vendor/riscv-isa-sim/.github/workflows/continuous-integration.yml
deleted file mode 100644
index aeaf460e..00000000
--- a/vendor/lowrisc_ibex/vendor/riscv-isa-sim/.github/workflows/continuous-integration.yml
+++ /dev/null
@@ -1,28 +0,0 @@
-# This file describes the GitHub Actions workflow for continuous integration of Spike.
-#
-# See
-# https://help.github.com/en/actions/reference/workflow-syntax-for-github-actions
-# for API reference documentation on this file format.
-
-name: Continuous Integration
-
-on:
- push:
- branches:
- - master
- pull_request:
- branches:
- - master
-
-
-jobs:
- test:
- name: Test Spike build
- runs-on: ubuntu-20.04
- steps:
- - uses: actions/checkout@v2
-
- - name: Install Dependencies
- run: sudo xargs apt-get install -y < .github/workflows/apt-packages.txt
-
- - run: ci-tests/test-spike
diff --git a/vendor/lowrisc_ibex/vendor/riscv-isa-sim/.gitignore b/vendor/lowrisc_ibex/vendor/riscv-isa-sim/.gitignore
deleted file mode 100644
index 14326e9c..00000000
--- a/vendor/lowrisc_ibex/vendor/riscv-isa-sim/.gitignore
+++ /dev/null
@@ -1,7 +0,0 @@
-build/
-*.gch
-autom4te.cache/
-.*.swp
-*.o
-*.d
-.gdb_history
diff --git a/vendor/lowrisc_ibex/vendor/riscv-isa-sim/tests/ebreak.py b/vendor/lowrisc_ibex/vendor/riscv-isa-sim/tests/ebreak.py
deleted file mode 100755
index dd7e6587..00000000
--- a/vendor/lowrisc_ibex/vendor/riscv-isa-sim/tests/ebreak.py
+++ /dev/null
@@ -1,26 +0,0 @@
-#!/usr/bin/python
-
-import os
-import testlib
-import unittest
-import tempfile
-import time
-
-class EbreakTest(unittest.TestCase):
- def setUp(self):
- self.binary = testlib.compile("ebreak.s")
-
- def test_noport(self):
- """Make sure that we can run past ebreak when --gdb-port isn't used."""
- spike = testlib.Spike(self.binary, with_gdb=False, timeout=10)
- result = spike.wait()
- self.assertEqual(result, 0)
-
- def test_nogdb(self):
- """Make sure that we can run past ebreak when gdb isn't attached."""
- spike = testlib.Spike(self.binary, timeout=10)
- result = spike.wait()
- self.assertEqual(result, 0)
-
-if __name__ == '__main__':
- unittest.main()
diff --git a/vendor/lowrisc_ibex/vendor/riscv-isa-sim/tests/ebreak.s b/vendor/lowrisc_ibex/vendor/riscv-isa-sim/tests/ebreak.s
deleted file mode 100644
index 99f3e07c..00000000
--- a/vendor/lowrisc_ibex/vendor/riscv-isa-sim/tests/ebreak.s
+++ /dev/null
@@ -1,5 +0,0 @@
- .global main
-main:
- li a0, 0
- ebreak
- ret
diff --git a/vendor/lowrisc_ibex/vendor/riscv-isa-sim/tests/mseccfg/Makefile b/vendor/lowrisc_ibex/vendor/riscv-isa-sim/tests/mseccfg/Makefile
index 2277410c..ac9f8254 100644
--- a/vendor/lowrisc_ibex/vendor/riscv-isa-sim/tests/mseccfg/Makefile
+++ b/vendor/lowrisc_ibex/vendor/riscv-isa-sim/tests/mseccfg/Makefile
@@ -2,8 +2,8 @@
XLEN ?= 32
VLEN ?= 1024
-RISCV_TOOL ?= /home/saad/Downloads/lowrisc-toolchain-gcc-rv32imcb-20220524-1/bin/
-SPIKE_PATH ?= /home/saad/work/riscv-isa-sim/build
+RISCV_TOOL ?= ~/lowrisc-toolchain-gcc-rv32imcb-20220524-1/bin/
+SPIKE_PATH ?= ~/riscv-isa-sim/build
SAIL_EMULATOR_PATH = /home/scratch.soberl_maxwell/arch1/sail_2021/sail-riscv/c_emulator
SSP_OPT ?=
@@ -13,7 +13,7 @@ LIB_PATH = .
# ../ctests/nvrvv_lib.c
COMMON_FILES = \
$(LIB_PATH)/crt.S \
- $(LIB_PATH)/syscalls.c
+ $(LIB_PATH)/syscalls.c
TEST_PATH = ./gengen_src/outputs
@@ -26,7 +26,7 @@ CFLAGS = -march=rv$(XLEN)imafd -O2 -I . -I ./$(LIB_PATH) -I ../softfloat -I ../r
-fno-builtin-printf -fdata-sections -fno-section-anchors $(SSP_OPT) -DPRINTF_SUPPORTED=1
LDFLAGS = -mcmodel=medany -static -nostdlib -nostartfiles -lm -lgcc \
-T $(LIB_PATH)/mseccfg_test.ld -Wl,-M -Wl,-Map=link.log
-
+
# must enable 'C', maybe used in pk
# 8M for TCM memories
# 16M for L2 memories
@@ -36,10 +36,10 @@ default:
@echo "make gen, to generate all test cases with gengen"
@echo "make run, to run all test cases"
@echo "set OBJECTS variant to select specified test case"
-
+
gen:
cd gengen_src; $(MAKE); $(MAKE) gen;
-
+
$(OBJECTS):
@$(RISCV_TOOL)/riscv$(XLEN)-unknown-elf-gcc $(CFLAGS) $(TEST_PATH)/$@.c $(COMMON_FILES) $(LDFLAGS) -o a.out
@echo Running $(TEST_PATH)/$@.c - command - $(RISCV_TOOL)/riscv$(XLEN)-unknown-elf-gcc $(CFLAGS) $(TEST_PATH)/$@.c $(COMMON_FILES) $(LDFLAGS) -o a.out
@@ -53,18 +53,18 @@ ifeq ($(PERF), 0)
# sed -i '0,/ nop/d' $@_pc.log
# sed -i '/ nop/q' $@_pc.log
endif
-
+
run: $(OBJECTS)
clean:
rm *.s *.o *.i *.ss *.out *.log *.bin
-
+
log:
$(SPIKE_PATH)/spike $(SIM_ISA) -m0x100000:0x200000 -l a.out > 1.log 2>&1
$(SAIL_EMULATOR_PATH)/riscv_sim_RV64 --enable-pmp a.out > 2.log 2>&1
-
+
env:
echo $(ALL_TEST)
-
+
.PHONY: gen $(OBJECTS) clean
diff --git a/vendor/lowrisc_ibex/vendor/riscv-isa-sim/tests/testlib.py b/vendor/lowrisc_ibex/vendor/riscv-isa-sim/tests/testlib.py
deleted file mode 100644
index d5e8d795..00000000
--- a/vendor/lowrisc_ibex/vendor/riscv-isa-sim/tests/testlib.py
+++ /dev/null
@@ -1,116 +0,0 @@
-import os.path
-import pexpect
-import subprocess
-import tempfile
-import testlib
-import unittest
-
-# Note that gdb comes with its own testsuite. I was unable to figure out how to
-# run that testsuite against the spike simulator.
-
-def find_file(path):
- for directory in (os.getcwd(), os.path.dirname(testlib.__file__)):
- fullpath = os.path.join(directory, path)
- if os.path.exists(fullpath):
- return fullpath
- return None
-
-def compile(*args):
- """Compile a single .c file into a binary."""
- dst = os.path.splitext(args[0])[0]
- cc = os.path.expandvars("$RISCV/bin/riscv64-unknown-elf-gcc")
- cmd = [cc, "-g", "-O", "-o", dst]
- for arg in args:
- found = find_file(arg)
- if found:
- cmd.append(found)
- else:
- cmd.append(arg)
- cmd = " ".join(cmd)
- result = os.system(cmd)
- assert result == 0, "%r failed" % cmd
- return dst
-
-def unused_port():
- # http://stackoverflow.com/questions/2838244/get-open-tcp-port-in-python/2838309#2838309
- import socket
- s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
- s.bind(("",0))
- port = s.getsockname()[1]
- s.close()
- return port
-
-class Spike(object):
- def __init__(self, binary, halted=False, with_gdb=True, timeout=None):
- """Launch spike. Return tuple of its process and the port it's running on."""
- cmd = []
- if timeout:
- cmd += ["timeout", str(timeout)]
-
- cmd += [find_file("spike")]
- if halted:
- cmd.append('-H')
- if with_gdb:
- self.port = unused_port()
- cmd += ['--gdb-port', str(self.port)]
- cmd.append('pk')
- if binary:
- cmd.append(binary)
- logfile = open("spike.log", "w")
- self.process = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=logfile,
- stderr=logfile)
-
- def __del__(self):
- try:
- self.process.kill()
- self.process.wait()
- except OSError:
- pass
-
- def wait(self, *args, **kwargs):
- return self.process.wait(*args, **kwargs)
-
-class Gdb(object):
- def __init__(self):
- path = os.path.expandvars("$RISCV/bin/riscv64-unknown-elf-gdb")
- self.child = pexpect.spawn(path)
- self.child.logfile = file("gdb.log", "w")
- self.wait()
- self.command("set width 0")
- self.command("set height 0")
- # Force consistency.
- self.command("set print entry-values no")
-
- def wait(self):
- """Wait for prompt."""
- self.child.expect("\(gdb\)")
-
- def command(self, command, timeout=-1):
- self.child.sendline(command)
- self.child.expect("\n", timeout=timeout)
- self.child.expect("\(gdb\)", timeout=timeout)
- return self.child.before.strip()
-
- def c(self, wait=True):
- if wait:
- return self.command("c")
- else:
- self.child.sendline("c")
- self.child.expect("Continuing")
-
- def interrupt(self):
- self.child.send("\003");
- self.child.expect("\(gdb\)")
-
- def x(self, address, size='w'):
- output = self.command("x/%s %s" % (size, address))
- value = int(output.split(':')[1].strip(), 0)
- return value
-
- def p(self, obj):
- output = self.command("p %s" % obj)
- value = int(output.split('=')[-1].strip())
- return value
-
- def stepi(self):
- return self.command("stepi")
diff --git a/vendor/lowrisc_ibex/vendor/riscv_isa_sim.lock.hjson b/vendor/lowrisc_ibex/vendor/riscv_isa_sim.lock.hjson
index 0dc241b2..fb5d7880 100644
--- a/vendor/lowrisc_ibex/vendor/riscv_isa_sim.lock.hjson
+++ b/vendor/lowrisc_ibex/vendor/riscv_isa_sim.lock.hjson
@@ -8,7 +8,7 @@
{
upstream:
{
- url: https://github.com/Saad525/riscv-isa-sim
- rev: a7927883b66b9b92a5e1fd8c938dda23693132b2
+ url: https://github.com/lowrisc/riscv-isa-sim
+ rev: a4b823a1c7a260b532e1aa41b4d929e9634a7222
}
}
diff --git a/vendor/lowrisc_ibex/vendor/riscv_isa_sim.vendor.hjson b/vendor/lowrisc_ibex/vendor/riscv_isa_sim.vendor.hjson
index ac723126..002179fa 100644
--- a/vendor/lowrisc_ibex/vendor/riscv_isa_sim.vendor.hjson
+++ b/vendor/lowrisc_ibex/vendor/riscv_isa_sim.vendor.hjson
@@ -6,8 +6,8 @@
target_dir: "riscv-isa-sim",
upstream: {
- url: "https://github.com/Saad525/riscv-isa-sim",
- rev: "master",
+ url: "https://github.com/lowrisc/riscv-isa-sim",
+ rev: "mseccfg_tests",
},
mapping: [
{from: "tests/mseccfg", to: "tests/mseccfg"},