From 2617a359d12154eb41f541c381804b10bbace975 Mon Sep 17 00:00:00 2001 From: Moritz Scherer Date: Thu, 11 Jul 2024 09:41:16 +0200 Subject: [PATCH] Initial commit --- .gitignore | 29 + Bender.lock | 280 ++++ Bender.yml | 37 + LICENSE | 176 +++ Makefile | 37 + README.md | 70 + bender.mk | 29 + chimera.mk | 81 ++ hw/bootrom/snitch/snitch_bootrom.S | 70 + hw/bootrom/snitch/snitch_bootrom.bin | Bin 0 -> 216 bytes hw/bootrom/snitch/snitch_bootrom.ld | 17 + hw/bootrom/snitch/snitch_bootrom.sv | 100 ++ hw/bootrom/snitch/snitch_startup.c | 44 + hw/chimera_cluster_adapter.sv | 358 +++++ hw/chimera_pkg.sv | 113 ++ hw/chimera_top_wrapper.sv | 488 +++++++ hw/regs/chimera_reg_pkg.sv | 161 +++ hw/regs/chimera_reg_top.sv | 599 ++++++++ hw/regs/chimera_regs.hjson | 156 +++ hw/rv_plic.cfg.hjson | 15 + requirements.txt | 6 + sim.mk | 38 + sw/include/regs/soc_ctrl.h | 64 + sw/sw.mk | 41 + sw/tests/testCluster.c | 59 + sw/tests/testClusterGating.c | 44 + sw/tests/testClusterOffload.c | 62 + target/sim/src/fixture_chimera_soc.sv | 144 ++ target/sim/src/tb_chimera_pkg.sv | 45 + target/sim/src/tb_chimera_soc.sv | 67 + target/sim/src/vip_chimera_soc.sv | 932 +++++++++++++ target/sim/vsim/setup.chimera_soc.tcl | 27 + target/sim/vsim/start.chimera_soc.tcl | 30 + utils/reggen/reggen/README.md | 113 ++ utils/reggen/reggen/__init__.py | 0 utils/reggen/reggen/access.py | 121 ++ utils/reggen/reggen/alert.py | 54 + utils/reggen/reggen/bits.py | 87 ++ utils/reggen/reggen/bus_interfaces.py | 187 +++ utils/reggen/reggen/enum_entry.py | 35 + utils/reggen/reggen/field.py | 291 ++++ utils/reggen/reggen/fpv_csr.sv.tpl | 177 +++ utils/reggen/reggen/gen_cfg_html.py | 113 ++ utils/reggen/reggen/gen_cheader.py | 439 ++++++ utils/reggen/reggen/gen_dv.py | 108 ++ utils/reggen/reggen/gen_fpv.py | 81 ++ utils/reggen/reggen/gen_html.py | 325 +++++ utils/reggen/reggen/gen_json.py | 34 + utils/reggen/reggen/gen_rtl.py | 136 ++ utils/reggen/reggen/gen_selfdoc.py | 306 +++++ utils/reggen/reggen/html_helpers.py | 83 ++ utils/reggen/reggen/inter_signal.py | 81 ++ utils/reggen/reggen/ip_block.py | 365 +++++ utils/reggen/reggen/lib.py | 262 ++++ utils/reggen/reggen/multi_register.py | 142 ++ utils/reggen/reggen/params.py | 341 +++++ utils/reggen/reggen/reg_base.py | 45 + utils/reggen/reggen/reg_block.py | 431 ++++++ utils/reggen/reggen/reg_html.css | 74 + utils/reggen/reggen/reg_pkg.sv.tpl | 347 +++++ utils/reggen/reggen/reg_top.sv.tpl | 795 +++++++++++ utils/reggen/reggen/register.py | 375 +++++ utils/reggen/reggen/signal.py | 63 + utils/reggen/reggen/uvm_reg.sv.tpl | 14 + utils/reggen/reggen/uvm_reg_base.sv.tpl | 431 ++++++ utils/reggen/reggen/validate.py | 155 +++ utils/reggen/reggen/version.py | 24 + utils/reggen/reggen/window.py | 169 +++ utils/reggen/regtool.py | 238 ++++ utils/reggen/topgen/__init__.py | 8 + utils/reggen/topgen/c.py | 444 ++++++ utils/reggen/topgen/gen_dv.py | 46 + utils/reggen/topgen/intermodule.py | 1005 ++++++++++++++ utils/reggen/topgen/lib.py | 497 +++++++ utils/reggen/topgen/merge.py | 1081 +++++++++++++++ utils/reggen/topgen/templates/README.md | 4 + .../templates/chip_env_pkg__params.sv.tpl | 17 + .../reggen/topgen/templates/chiplevel.sv.tpl | 1218 +++++++++++++++++ utils/reggen/topgen/templates/clang-format | 4 + .../tb__alert_handler_connect.sv.tpl | 21 + .../topgen/templates/tb__xbar_connect.sv.tpl | 124 ++ utils/reggen/topgen/templates/toplevel.c.tpl | 21 + utils/reggen/topgen/templates/toplevel.h.tpl | 201 +++ utils/reggen/topgen/templates/toplevel.sv.tpl | 832 +++++++++++ .../topgen/templates/toplevel_memory.h.tpl | 62 + .../topgen/templates/toplevel_memory.ld.tpl | 30 + .../topgen/templates/toplevel_pkg.sv.tpl | 112 ++ .../templates/toplevel_rnd_cnst_pkg.sv.tpl | 44 + .../templates/xbar_env_pkg__params.sv.tpl | 88 ++ utils/reggen/topgen/top.py | 122 ++ utils/reggen/topgen/top_uvm_reg.sv.tpl | 151 ++ utils/reggen/topgen/validate.py | 878 ++++++++++++ 92 files changed, 18471 insertions(+) create mode 100644 .gitignore create mode 100644 Bender.lock create mode 100644 Bender.yml create mode 100644 LICENSE create mode 100644 Makefile create mode 100644 README.md create mode 100644 bender.mk create mode 100644 chimera.mk create mode 100644 hw/bootrom/snitch/snitch_bootrom.S create mode 100755 hw/bootrom/snitch/snitch_bootrom.bin create mode 100644 hw/bootrom/snitch/snitch_bootrom.ld create mode 100644 hw/bootrom/snitch/snitch_bootrom.sv create mode 100644 hw/bootrom/snitch/snitch_startup.c create mode 100644 hw/chimera_cluster_adapter.sv create mode 100644 hw/chimera_pkg.sv create mode 100644 hw/chimera_top_wrapper.sv create mode 100644 hw/regs/chimera_reg_pkg.sv create mode 100644 hw/regs/chimera_reg_top.sv create mode 100644 hw/regs/chimera_regs.hjson create mode 100644 hw/rv_plic.cfg.hjson create mode 100644 requirements.txt create mode 100644 sim.mk create mode 100644 sw/include/regs/soc_ctrl.h create mode 100644 sw/sw.mk create mode 100644 sw/tests/testCluster.c create mode 100644 sw/tests/testClusterGating.c create mode 100644 sw/tests/testClusterOffload.c create mode 100644 target/sim/src/fixture_chimera_soc.sv create mode 100644 target/sim/src/tb_chimera_pkg.sv create mode 100644 target/sim/src/tb_chimera_soc.sv create mode 100644 target/sim/src/vip_chimera_soc.sv create mode 100644 target/sim/vsim/setup.chimera_soc.tcl create mode 100644 target/sim/vsim/start.chimera_soc.tcl create mode 100644 utils/reggen/reggen/README.md create mode 100644 utils/reggen/reggen/__init__.py create mode 100644 utils/reggen/reggen/access.py create mode 100644 utils/reggen/reggen/alert.py create mode 100644 utils/reggen/reggen/bits.py create mode 100644 utils/reggen/reggen/bus_interfaces.py create mode 100644 utils/reggen/reggen/enum_entry.py create mode 100644 utils/reggen/reggen/field.py create mode 100644 utils/reggen/reggen/fpv_csr.sv.tpl create mode 100644 utils/reggen/reggen/gen_cfg_html.py create mode 100644 utils/reggen/reggen/gen_cheader.py create mode 100644 utils/reggen/reggen/gen_dv.py create mode 100644 utils/reggen/reggen/gen_fpv.py create mode 100644 utils/reggen/reggen/gen_html.py create mode 100644 utils/reggen/reggen/gen_json.py create mode 100644 utils/reggen/reggen/gen_rtl.py create mode 100644 utils/reggen/reggen/gen_selfdoc.py create mode 100644 utils/reggen/reggen/html_helpers.py create mode 100644 utils/reggen/reggen/inter_signal.py create mode 100644 utils/reggen/reggen/ip_block.py create mode 100644 utils/reggen/reggen/lib.py create mode 100644 utils/reggen/reggen/multi_register.py create mode 100644 utils/reggen/reggen/params.py create mode 100644 utils/reggen/reggen/reg_base.py create mode 100644 utils/reggen/reggen/reg_block.py create mode 100644 utils/reggen/reggen/reg_html.css create mode 100644 utils/reggen/reggen/reg_pkg.sv.tpl create mode 100644 utils/reggen/reggen/reg_top.sv.tpl create mode 100644 utils/reggen/reggen/register.py create mode 100644 utils/reggen/reggen/signal.py create mode 100644 utils/reggen/reggen/uvm_reg.sv.tpl create mode 100644 utils/reggen/reggen/uvm_reg_base.sv.tpl create mode 100644 utils/reggen/reggen/validate.py create mode 100644 utils/reggen/reggen/version.py create mode 100644 utils/reggen/reggen/window.py create mode 100755 utils/reggen/regtool.py create mode 100644 utils/reggen/topgen/__init__.py create mode 100644 utils/reggen/topgen/c.py create mode 100644 utils/reggen/topgen/gen_dv.py create mode 100644 utils/reggen/topgen/intermodule.py create mode 100644 utils/reggen/topgen/lib.py create mode 100644 utils/reggen/topgen/merge.py create mode 100644 utils/reggen/topgen/templates/README.md create mode 100644 utils/reggen/topgen/templates/chip_env_pkg__params.sv.tpl create mode 100644 utils/reggen/topgen/templates/chiplevel.sv.tpl create mode 100644 utils/reggen/topgen/templates/clang-format create mode 100644 utils/reggen/topgen/templates/tb__alert_handler_connect.sv.tpl create mode 100644 utils/reggen/topgen/templates/tb__xbar_connect.sv.tpl create mode 100644 utils/reggen/topgen/templates/toplevel.c.tpl create mode 100644 utils/reggen/topgen/templates/toplevel.h.tpl create mode 100644 utils/reggen/topgen/templates/toplevel.sv.tpl create mode 100644 utils/reggen/topgen/templates/toplevel_memory.h.tpl create mode 100644 utils/reggen/topgen/templates/toplevel_memory.ld.tpl create mode 100644 utils/reggen/topgen/templates/toplevel_pkg.sv.tpl create mode 100644 utils/reggen/topgen/templates/toplevel_rnd_cnst_pkg.sv.tpl create mode 100644 utils/reggen/topgen/templates/xbar_env_pkg__params.sv.tpl create mode 100644 utils/reggen/topgen/top.py create mode 100644 utils/reggen/topgen/top_uvm_reg.sv.tpl create mode 100644 utils/reggen/topgen/validate.py diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..595c8b9 --- /dev/null +++ b/.gitignore @@ -0,0 +1,29 @@ +.bender + +# EMACS + +*undo-tree~ +*~ +*# + +# HW + +hw/regs/pcr.md + +# SW + +*.elf +*.dump +*.memh +__pycache__ + + +# SIM + +target/sim/vsim/work +target/sim/vsim/transcript +target/sim/vsim/trace* +modelsim.ini +compile.tcl +logs +vsim.wlf \ No newline at end of file diff --git a/Bender.lock b/Bender.lock new file mode 100644 index 0000000..8bce7e5 --- /dev/null +++ b/Bender.lock @@ -0,0 +1,280 @@ +packages: + apb: + revision: 77ddf073f194d44b9119949d2421be59789e69ae + version: 0.2.4 + source: + Git: https://github.com/pulp-platform/apb.git + dependencies: + - common_cells + apb_uart: + revision: 6c7dde3d749ac8274377745c105da8c8b8cd27c6 + version: 0.2.1 + source: + Git: https://github.com/pulp-platform/apb_uart.git + dependencies: + - apb + - register_interface + axi: + revision: 9402c8a9ce0a7b5253c3c29e788612d771e8b5d6 + version: 0.39.3 + source: + Git: https://github.com/pulp-platform/axi.git + dependencies: + - common_cells + - common_verification + - tech_cells_generic + axi_llc: + revision: 559bcbd09a5a884dbe31e2d72fd95d024e357f39 + version: 0.2.1 + source: + Git: https://github.com/pulp-platform/axi_llc.git + dependencies: + - axi + - common_cells + - common_verification + - register_interface + - tech_cells_generic + axi_riscv_atomics: + revision: 0ac3a78fe342c5a5b9b10bff49d58897f773059e + version: 0.8.2 + source: + Git: https://github.com/pulp-platform/axi_riscv_atomics.git + dependencies: + - axi + - common_cells + - common_verification + axi_rt: + revision: d5f857e74d0a5db4e4a2cc3652ca4f40f29a1484 + version: 0.0.0-alpha.8 + source: + Git: https://github.com/pulp-platform/axi_rt.git + dependencies: + - axi + - common_cells + - register_interface + axi_stream: + revision: 54891ff40455ca94a37641b9da4604647878cc07 + version: 0.1.1 + source: + Git: https://github.com/pulp-platform/axi_stream.git + dependencies: + - common_cells + axi_vga: + revision: 3718b9930f94a9eaad8ee50b4bccc71df0403084 + version: 0.1.3 + source: + Git: https://github.com/pulp-platform/axi_vga.git + dependencies: + - axi + - common_cells + - register_interface + cheshire: + revision: 6c75eb70c56f1af1502668b2e587ab329e7c8677 + version: null + source: + Git: https://github.com/pulp-platform/cheshire.git + dependencies: + - apb_uart + - axi + - axi_llc + - axi_riscv_atomics + - axi_rt + - axi_vga + - clic + - clint + - common_cells + - common_verification + - cva6 + - idma + - irq_router + - memory_island + - opentitan_peripherals + - register_interface + - riscv-dbg + - serial_link + - unbent + clic: + revision: 8ed76ffc779a435d0ed034f3068e4c3334fe2ecf + version: 2.0.0 + source: + Git: https://github.com/pulp-platform/clic.git + dependencies: + - common_cells + - register_interface + clint: + revision: d5390a805c20f9226758a152ba1645f61da73349 + version: 0.2.0 + source: + Git: https://github.com/pulp-platform/clint.git + dependencies: + - common_cells + - register_interface + cluster_icache: + revision: 0e1fb6751d9684d968ba7fb40836e6118b448ecd + version: 0.1.1 + source: + Git: https://github.com/pulp-platform/cluster_icache.git + dependencies: + - axi + - common_cells + - scm + - tech_cells_generic + cluster_interconnect: + revision: 7d0a4f8acae71a583a6713cab5554e60b9bb8d27 + version: 1.2.1 + source: + Git: https://github.com/pulp-platform/cluster_interconnect.git + dependencies: + - common_cells + common_cells: + revision: be3866eb3ed5b8ac7bc9a9ec9f8ae86137cd0a45 + version: 1.36.0 + source: + Git: https://github.com/pulp-platform/common_cells.git + dependencies: + - common_verification + - tech_cells_generic + common_verification: + revision: 9c07fa860593b2caabd9b5681740c25fac04b878 + version: 0.2.3 + source: + Git: https://github.com/pulp-platform/common_verification.git + dependencies: [] + cva6: + revision: 9cb2112d5b7823a3b4ad42a1a2419b65e623ec14 + version: null + source: + Git: git@github.com:Scheremo/cva6.git + dependencies: + - axi + - common_cells + - fpnew + - tech_cells_generic + fpnew: + revision: a8e0cba6dd50f357ece73c2c955d96efc3c6c315 + version: null + source: + Git: https://github.com/pulp-platform/cvfpu.git + dependencies: + - common_cells + - fpu_div_sqrt_mvp + fpu_div_sqrt_mvp: + revision: 86e1f558b3c95e91577c41b2fc452c86b04e85ac + version: 1.0.4 + source: + Git: https://github.com/pulp-platform/fpu_div_sqrt_mvp.git + dependencies: + - common_cells + idma: + revision: c12caf59bb482fe44b27361f6924ad346b2d22fe + version: 0.6.3 + source: + Git: https://github.com/pulp-platform/iDMA.git + dependencies: + - axi + - axi_stream + - common_cells + - common_verification + - obi + - register_interface + irq_router: + revision: d1d31350b24f3965b3a51e1bc96c71eb34e94db3 + version: 0.0.1-beta.1 + source: + Git: https://github.com/pulp-platform/irq_router.git + dependencies: + - axi + - common_cells + - register_interface + memory_island: + revision: cfe27a946807713e93ed1be25c241588ffde407f + version: null + source: + Git: git@iis-git.ee.ethz.ch:michaero/memory_island.git + dependencies: + - axi + - cluster_interconnect + - common_cells + - common_verification + - tech_cells_generic + obi: + revision: 1aa411df145c4ebdd61f8fed4d003c33f7b20636 + version: 0.1.2 + source: + Git: https://github.com/pulp-platform/obi.git + dependencies: + - common_cells + - common_verification + opentitan_peripherals: + revision: cd3153de2783abd3d03d0595e6c4b32413c62f14 + version: 0.4.0 + source: + Git: https://github.com/pulp-platform/opentitan_peripherals.git + dependencies: + - common_cells + - register_interface + - tech_cells_generic + register_interface: + revision: ae616e5a1ec2b41e72d200e5ab09c65e94aebd3d + version: 0.4.4 + source: + Git: https://github.com/pulp-platform/register_interface.git + dependencies: + - apb + - axi + - common_cells + - common_verification + riscv-dbg: + revision: 358f90110220adf7a083f8b65d157e836d706236 + version: 0.8.1 + source: + Git: https://github.com/pulp-platform/riscv-dbg.git + dependencies: + - common_cells + - tech_cells_generic + scm: + revision: 998466d2a3c2d7d572e43d2666d93c4f767d8d60 + version: 1.1.1 + source: + Git: https://github.com/pulp-platform/scm.git + dependencies: [] + serial_link: + revision: 5a25f5a71074f1ebb6de7b5280f2b16924bcc666 + version: 1.1.1 + source: + Git: https://github.com/pulp-platform/serial_link.git + dependencies: + - axi + - common_cells + - register_interface + snitch_cluster: + revision: 70ff02fab7749fd6bb6cb0a2ec6c31a8f51e80d6 + version: null + source: + Git: https://github.com/pulp-platform/snitch_cluster.git + dependencies: + - axi + - axi_riscv_atomics + - cluster_icache + - common_cells + - fpnew + - idma + - register_interface + - riscv-dbg + - tech_cells_generic + tech_cells_generic: + revision: 7968dd6e6180df2c644636bc6d2908a49f2190cf + version: 0.2.13 + source: + Git: https://github.com/pulp-platform/tech_cells_generic.git + dependencies: + - common_verification + unbent: + revision: e9c9d5cfb635f2d4668c816ce9235798cfecb297 + version: 0.1.6 + source: + Git: https://github.com/pulp-platform/unbent.git + dependencies: + - axi + - common_cells + - register_interface diff --git a/Bender.yml b/Bender.yml new file mode 100644 index 0000000..aac8757 --- /dev/null +++ b/Bender.yml @@ -0,0 +1,37 @@ +# Copyright 2024 ETH Zurich and University of Bologna. +# Solderpad Hardware License, Version 0.51, see LICENSE for details. +# SPDX-License-Identifier: SHL-0.51 + +package: + name: chimera + authors: + - "Moritz Scherer " + +dependencies: + register_interface: { git: "https://github.com/pulp-platform/register_interface.git", version: 0.4.3 } + axi: { git: "https://github.com/pulp-platform/axi.git", version: 0.39.2 } + cheshire: { git: "https://github.com/pulp-platform/cheshire.git", rev: "scheremo/convolve"} + snitch_cluster: { git: "https://github.com/pulp-platform/snitch_cluster.git", rev: "main"} + common_cells: { git: "https://github.com/pulp-platform/common_cells.git", version: 1.31.1} + +sources: + - hw/chimera_pkg.sv + - hw/regs/chimera_reg_pkg.sv + - hw/regs/chimera_reg_top.sv + - hw/bootrom/snitch/snitch_bootrom.sv + - hw/chimera_cluster_adapter.sv + - hw/chimera_top_wrapper.sv + + - target: any(simulation, test) + files: + - target/sim/src/vip_chimera_soc.sv + - target/sim/src/fixture_chimera_soc.sv + - target/sim/src/tb_chimera_soc.sv + - target/sim/src/tb_chimera_pkg.sv + +vendor_package: + - name: reggen + target_dir: "utils" + upstream: { git: "https://github.com/pulp-platform/register_interface.git", rev: "19163bb5191d2669a8cbc267cdd4ce8e60f20746"} + mapping: + - { from: 'vendor/lowrisc_opentitan/util', to: 'reggen'} diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000..18e4f67 --- /dev/null +++ b/LICENSE @@ -0,0 +1,176 @@ +SOLDERPAD HARDWARE LICENSE version 0.51 + +This license is based closely on the Apache License Version 2.0, but is not +approved or endorsed by the Apache Foundation. A copy of the non-modified +Apache License 2.0 can be found at http://www.apache.org/licenses/LICENSE-2.0. + +As this license is not currently OSI or FSF approved, the Licensor permits any +Work licensed under this License, at the option of the Licensee, to be treated +as licensed under the Apache License Version 2.0 (which is so approved). + +This License is licensed under the terms of this License and in particular +clause 7 below (Disclaimer of Warranties) applies in relation to its use. + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + +"License" shall mean the terms and conditions for use, reproduction, and +distribution as defined by Sections 1 through 9 of this document. + +"Licensor" shall mean the Rights owner or entity authorized by the Rights owner +that is granting the License. + +"Legal Entity" shall mean the union of the acting entity and all other entities +that control, are controlled by, or are under common control with that entity. +For the purposes of this definition, "control" means (i) the power, direct or +indirect, to cause the direction or management of such entity, whether by +contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the +outstanding shares, or (iii) beneficial ownership of such entity. + +"You" (or "Your") shall mean an individual or Legal Entity exercising +permissions granted by this License. + +"Rights" means copyright and any similar right including design right (whether +registered or unregistered), semiconductor topography (mask) rights and +database rights (but excluding Patents and Trademarks). + +"Source" form shall mean the preferred form for making modifications, including +but not limited to source code, net lists, board layouts, CAD files, +documentation source, and configuration files. + +"Object" form shall mean any form resulting from mechanical transformation or +translation of a Source form, including but not limited to compiled object +code, generated documentation, the instantiation of a hardware design and +conversions to other media types, including intermediate forms such as +bytecodes, FPGA bitstreams, artwork and semiconductor topographies (mask +works). + +"Work" shall mean the work of authorship, whether in Source form or other +Object form, made available under the License, as indicated by a Rights notice +that is included in or attached to the work (an example is provided in the +Appendix below). + +"Derivative Works" shall mean any work, whether in Source or Object form, that +is based on (or derived from) the Work and for which the editorial revisions, +annotations, elaborations, or other modifications represent, as a whole, an +original work of authorship. For the purposes of this License, Derivative Works +shall not include works that remain separable from, or merely link (or bind by +name) or physically connect to or interoperate with the interfaces of, the Work +and Derivative Works thereof. + +"Contribution" shall mean any design or work of authorship, including the +original version of the Work and any modifications or additions to that Work or +Derivative Works thereof, that is intentionally submitted to Licensor for +inclusion in the Work by the Rights owner or by an individual or Legal Entity +authorized to submit on behalf of the Rights owner. For the purposes of this +definition, "submitted" means any form of electronic, verbal, or written +communication sent to the Licensor or its representatives, including but not +limited to communication on electronic mailing lists, source code control +systems, and issue tracking systems that are managed by, or on behalf of, the +Licensor for the purpose of discussing and improving the Work, but excluding +communication that is conspicuously marked or otherwise designated in writing +by the Rights owner as "Not a Contribution." + +"Contributor" shall mean Licensor and any individual or Legal Entity on behalf +of whom a Contribution has been received by Licensor and subsequently +incorporated within the Work. + +2. Grant of License. Subject to the terms and conditions of this License, each +Contributor hereby grants to You a perpetual, worldwide, non-exclusive, +no-charge, royalty-free, irrevocable license under the Rights to reproduce, +prepare Derivative Works of, publicly display, publicly perform, sublicense, +and distribute the Work and such Derivative Works in Source or Object form and +do anything in relation to the Work as if the Rights did not exist. + +3. Grant of Patent License. Subject to the terms and conditions of this +License, each Contributor hereby grants to You a perpetual, worldwide, +non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this +section) patent license to make, have made, use, offer to sell, sell, import, +and otherwise transfer the Work, where such license applies only to those +patent claims licensable by such Contributor that are necessarily infringed by +their Contribution(s) alone or by combination of their Contribution(s) with the +Work to which such Contribution(s) was submitted. If You institute patent +litigation against any entity (including a cross-claim or counterclaim in a +lawsuit) alleging that the Work or a Contribution incorporated within the Work +constitutes direct or contributory patent infringement, then any patent +licenses granted to You under this License for that Work shall terminate as of +the date such litigation is filed. + +4. Redistribution. You may reproduce and distribute copies of the Work or +Derivative Works thereof in any medium, with or without modifications, and in +Source or Object form, provided that You meet the following conditions: + + You must give any other recipients of the Work or Derivative Works a copy + of this License; and + + You must cause any modified files to carry prominent notices stating that + You changed the files; and + + You must retain, in the Source form of any Derivative Works that You + distribute, all copyright, patent, trademark, and attribution notices from + the Source form of the Work, excluding those notices that do not pertain to + any part of the Derivative Works; and + + If the Work includes a "NOTICE" text file as part of its distribution, then + any Derivative Works that You distribute must include a readable copy of + the attribution notices contained within such NOTICE file, excluding those + notices that do not pertain to any part of the Derivative Works, in at + least one of the following places: within a NOTICE text file distributed as + part of the Derivative Works; within the Source form or documentation, if + provided along with the Derivative Works; or, within a display generated by + the Derivative Works, if and wherever such third-party notices normally + appear. The contents of the NOTICE file are for informational purposes only + and do not modify the License. You may add Your own attribution notices + within Derivative Works that You distribute, alongside or as an addendum to + the NOTICE text from the Work, provided that such additional attribution + notices cannot be construed as modifying the License. You may add Your own + copyright statement to Your modifications and may provide additional or + different license terms and conditions for use, reproduction, or + distribution of Your modifications, or for any such Derivative Works as a + whole, provided Your use, reproduction, and distribution of the Work + otherwise complies with the conditions stated in this License. + +5. Submission of Contributions. Unless You explicitly state otherwise, any +Contribution intentionally submitted for inclusion in the Work by You to the +Licensor shall be under the terms and conditions of this License, without any +additional terms or conditions. Notwithstanding the above, nothing herein shall +supersede or modify the terms of any separate license agreement you may have +executed with Licensor regarding such Contributions. + +6. Trademarks. This License does not grant permission to use the trade names, +trademarks, service marks, or product names of the Licensor, except as required +for reasonable and customary use in describing the origin of the Work and +reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. Unless required by applicable law or agreed to in +writing, Licensor provides the Work (and each Contributor provides its +Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +KIND, either express or implied, including, without limitation, any warranties +or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A +PARTICULAR PURPOSE. You are solely responsible for determining the +appropriateness of using or redistributing the Work and assume any risks +associated with Your exercise of permissions under this License. + +8. Limitation of Liability. In no event and under no legal theory, whether in +tort (including negligence), contract, or otherwise, unless required by +applicable law (such as deliberate and grossly negligent acts) or agreed to in +writing, shall any Contributor be liable to You for damages, including any +direct, indirect, special, incidental, or consequential damages of any +character arising as a result of this License or out of the use or inability to +use the Work (including but not limited to damages for loss of goodwill, work +stoppage, computer failure or malfunction, or any and all other commercial +damages or losses), even if such Contributor has been advised of the +possibility of such damages. + +9. Accepting Warranty or Additional Liability. While redistributing the Work or +Derivative Works thereof, You may choose to offer, and charge a fee for, +acceptance of support, warranty, indemnity, or other liability obligations +and/or rights consistent with this License. However, in accepting such +obligations, You may act only on Your own behalf and on Your sole +responsibility, not on behalf of any other Contributor, and only if You agree +to indemnify, defend, and hold each Contributor harmless for any liability +incurred by, or claims asserted against, such Contributor by reason of your +accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS diff --git a/Makefile b/Makefile new file mode 100644 index 0000000..8289e2b --- /dev/null +++ b/Makefile @@ -0,0 +1,37 @@ +# ---------------------------------------------------------------------- +# +# File: Makefile +# +# Created: 25.06.2024 +# +# Copyright (C) 2024, ETH Zurich and University of Bologna. +# +# Author: Moritz Scherer, ETH Zurich +# +# ---------------------------------------------------------------------- +# SPDX-License-Identifier: Apache-2.0 +# +# Licensed under the Apache License, Version 2.0 (the License); you may +# not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an AS IS BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +CHIM_ROOT ?= $(shell pwd) +BENDER ?= bender -d $(CHIM_ROOT) + +CHS_ROOT ?= $(shell $(BENDER) path cheshire) +SNITCH_ROOT ?= $(shell $(BENDER) path snitch_cluster) +CHS_XLEN ?= 32 + +CHIM_HW_DIR ?= $(CHIM_ROOT)/hw +CHIM_SW_DIR ?= $(CHIM_ROOT)/sw + +-include $(CHS_ROOT)/cheshire.mk +-include $(CHIM_ROOT)/chimera.mk diff --git a/README.md b/README.md new file mode 100644 index 0000000..b8cc2ad --- /dev/null +++ b/README.md @@ -0,0 +1,70 @@ +# Chimera + +Chimera is a microcontroller SoC template for multi-cluster, heterogeneous systems. Its goal is to provide an energy-efficient, easy-to-extend template to integrate and interact with accelerators. + +Chimera is developed as part of the PULP project, a joint effort between ETH Zurich and the University of Bologna. + +You need to have a working riscv GCC compiler in your path. You also need to have a modern version of modelsim in your path. We recommend using GCC-9.2.0 and Questa-2022.3; on IIS systems you may use the pre-installed packaged SEPP versions for this. + +## License + +Unless specified otherwise in the respective file headers, all code checked into this repository is made available under a permissive license. All hardware sources and tool scripts are licensed under the Solderpad Hardware License 0.51 (see LICENSE) or compatible licenses. Register file code (e.g. hw/regs/*.sv) is generated by a fork of lowRISC's regtool and licensed under Apache 2.0. All software sources are licensed under Apache 2.0. + +## Getting started + +If you are working on an IIS system, you can use the preinstalled SEPP packages to add the correct versions of the RISC-V toolchain and questasim into your path. If you use bash, you might run +``` +riscv bash +questa-2022.3 bash +``` + + +If you are not on an IIS system, please ensure a RISC-V toolchain and questa installation are available in your path by checking that the outputs of +``` +which riscv32-unknown-elf-gcc +which vsim +``` +are not empty. + + +To install the required python packages into your environment and setup dependencies, you can run +``` +pip install -r requirements.txt + +bender checkout + +make chs-hw-init +make snitch-hw-init +make chs-sim-all +``` + +To build files for modelsim: + +`make chim-sim` + +To regenerate software tests: + +`make chim-sw` + +## Making Register modifications + + +To regenerate SoC Regs: + +`make regenerate_soc_regs` + +## Making Bootrom modifications + +To rebuild the snitch bootrom: + +`make snitch_bootrom` + +## Simulation + +``` +cd target/sim/vsim +vsim +source setup.chimera_soc.tcl +source compile.tcl +source start.chimera_soc.tcl +``` diff --git a/bender.mk b/bender.mk new file mode 100644 index 0000000..4da9e5b --- /dev/null +++ b/bender.mk @@ -0,0 +1,29 @@ +# ---------------------------------------------------------------------- +# +# File: bender.mk +# +# Created: 25.06.2024 +# +# Copyright (C) 2024, ETH Zurich and University of Bologna. +# +# Author: Moritz Scherer, ETH Zurich +# +# ---------------------------------------------------------------------- +# SPDX-License-Identifier: Apache-2.0 +# +# Licensed under the Apache License, Version 2.0 (the License); you may +# not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an AS IS BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +COMMON_TARGS ?= +COMMON_TARGS += -t snitch_cluster -t cv32a6_convolve -t cva6 -t rtl + +SIM_TARGS = -t test -t sim diff --git a/chimera.mk b/chimera.mk new file mode 100644 index 0000000..f7985f4 --- /dev/null +++ b/chimera.mk @@ -0,0 +1,81 @@ +# ---------------------------------------------------------------------- +# +# File: chimera.mk +# +# Created: 26.06.2024 +# +# Copyright (C) 2024, ETH Zurich and University of Bologna. +# +# Author: Moritz Scherer, ETH Zurich +# +# ---------------------------------------------------------------------- +# SPDX-License-Identifier: Apache-2.0 +# +# Licensed under the Apache License, Version 2.0 (the License); you may +# not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an AS IS BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# SCHEREMO: This is a test config; change to something reasonable! + +CLINTCORES = 46 +PLICCORES = 92 +PLIC_NUM_INTRS = 92 + +.PHONY: update_plic +update_plic: $(CHS_ROOT)/hw/rv_plic.cfg.hjson + sed -i 's/src: .*/src: $(PLIC_NUM_INTRS),/' $< + sed -i 's/target: .*/target: $(PLICCORES),/' $< + +.PHONY: chs-hw-init +chs-hw-init: update_plic + make -B chs-hw-all CHS_XLEN=$(CHS_XLEN) + +.PHONY: snitch-hw-init +snitch-hw-init: + make -C $(SNITCH_ROOT)/target/snitch_cluster bin/snitch_cluster.vsim + +.PHONY: $(CHIM_SW_DIR)/include/regs/soc_ctrl.h +$(CHIM_SW_DIR)/include/regs/soc_ctrl.h: $(CHIM_ROOT)/hw/regs/chimera_regs.hjson + python $(CHIM_ROOT)/utils/reggen/regtool.py -D $< > $@ + +.PHONY: $(CHIM_SW_DIR)/hw/regs/pcr.md +$(CHIM_HW_DIR)/regs/pcr.md: $(CHIM_ROOT)/hw/regs/chimera_regs.hjson + python $(CHIM_ROOT)/utils/reggen/regtool.py -d $< > $@ + + +.PHONY: snitch_bootrom +CHIM_BROM_SRCS = $(wildcard $(CHIM_ROOT)/hw/bootrom/snitch/*.S $(CHIM_ROOT)/hw/bootrom/snitch/*.c) $(CHIM_SW_LIBS) +CHIM_BROM_FLAGS = $(CHS_SW_LDFLAGS) -Os -fno-zero-initialized-in-bss -flto -fwhole-program -march=rv32im + +CHIM_BOOTROM_ALL += $(CHIM_ROOT)/hw/bootrom/snitch/snitch_bootrom.sv $(CHIM_ROOT)/hw/bootrom/snitch/snitch_bootrom.dump + +snitch_bootrom: $(CHIM_BOOTROM_ALL) + +$(CHIM_ROOT)/hw/bootrom/snitch/snitch_bootrom.elf: $(CHIM_ROOT)/hw/bootrom/snitch/snitch_bootrom.ld $(CHIM_BROM_SRCS) + $(CHS_SW_CC) -I$(CHIM_SW_DIR)/include/regs $(CHS_SW_INCLUDES) -T$< $(CHIM_BROM_FLAGS) -o $@ $(CHIM_BROM_SRCS) + +$(CHIM_ROOT)/hw/bootrom/snitch/snitch_bootrom.bin: $(CHIM_ROOT)/hw/bootrom/snitch/snitch_bootrom.elf + $(CHS_SW_OBJCOPY) -O binary $< $@ + +$(CHIM_ROOT)/hw/bootrom/snitch/snitch_bootrom.sv: $(CHIM_ROOT)/hw/bootrom/snitch/snitch_bootrom.bin $(CHS_ROOT)/util/gen_bootrom.py + $(CHS_ROOT)/util/gen_bootrom.py --sv-module snitch_bootrom $< > $@ + +.PHONY: regenerate_soc_regs +regenerate_soc_regs: $(CHIM_ROOT)/hw/regs/chimera_reg_pkg.sv $(CHIM_ROOT)/hw/regs/chimera_reg_top.sv $(CHIM_SW_DIR)/include/regs/soc_ctrl.h $(CHIM_HW_DIR)/regs/pcr.md + +.PHONY: $(CHIM_ROOT)/hw/regs/chimera_reg_pkg.sv hw/regs/chimera_reg_top.sv +$(CHIM_ROOT)/hw/regs/chimera_reg_pkg.sv $(CHIM_ROOT)/hw/regs/chimera_reg_top.sv: $(CHIM_ROOT)/hw/regs/chimera_regs.hjson + python $(CHIM_ROOT)/utils/reggen/regtool.py -r $< --outdir $(dir $@) + +-include $(CHIM_ROOT)/bender.mk + +-include $(CHIM_ROOT)/sim.mk +-include $(CHIM_ROOT)/sw/sw.mk diff --git a/hw/bootrom/snitch/snitch_bootrom.S b/hw/bootrom/snitch/snitch_bootrom.S new file mode 100644 index 0000000..1c1b297 --- /dev/null +++ b/hw/bootrom/snitch/snitch_bootrom.S @@ -0,0 +1,70 @@ +# Copyright 2020 ETH Zurich and University of Bologna. +# Solderpad Hardware License, Version 0.51, see LICENSE for details. +# SPDX-License-Identifier: SHL-0.51 + +#include + +.global _start +_start: + + csrrc x0, mstatus, 10 + + li x5, 0 + li x6, 0 + li x7, 0 + li x8, 0 + li x9, 0 + li x10, 0 + li x11, 0 + li x12, 0 + li x13, 0 + li x14, 0 + li x15, 0 + li x16, 0 + li x17, 0 + li x18, 0 + li x19, 0 + li x20, 0 + li x21, 0 + li x22, 0 + li x23, 0 + li x24, 0 + li x25, 0 + li x26, 0 + li x27, 0 + li x28, 0 + li x29, 0 + li x30, 0 + li x31, 0 + + call cluster_startup + + // Set trap vector + la t0, _trap_handler_wrap + csrrw x0, mtvec, t0 + + wfi + +run_from_reg: + la t0, __chim_regs // CHIMERA REGS Base Addr, 0x3000_1000 + lw t0, 0(t0) // CHIMERA_SNITCH_BOOT_ADDR_REG_OFFSET + jalr t0 + +exit: + la t0, __chim_regs // CHIMERA REGS Base Addr + sw t0, 4(t0) // CHIMERA_SNITCH_CLUSTER_1_RETURN_REG_OFFSET // SCHEREMO: TODO: Demultiplex different clusters + j _start + +// This wraps the C trap handler to save the (integer-only) caller-save +// registers and perform a proper machine-mode exception return. +.align 4 +_trap_handler_wrap: + lui t6,0x2040 + li t5, 0 + sw t5, 40(t6) + mret + +.global trap_vector +.weak trap_vector +trap_vector: + ret diff --git a/hw/bootrom/snitch/snitch_bootrom.bin b/hw/bootrom/snitch/snitch_bootrom.bin new file mode 100755 index 0000000000000000000000000000000000000000..bb7a3c781b9e4f1e0a5c54de1c89990855ca7be6 GIT binary patch literal 216 zcmXZV%?ZLV7>DtfwDtQ7{{tv=fehgQ(j|IS$O1(d5PImnxPw=58IKhT_3C43;K#$8 zkR+TFUpg~IW+fC;D521ZLSqU|C^V(ej6!n?Ehx04(27E9ipHKv#ee1H6@^>ogB>^3 uwx7S(+S$8sHq2dGXLkDRIvMu|k9F++Z>9Gp3byxo!NIWF{NUR>?)QG5fhSr3 literal 0 HcmV?d00001 diff --git a/hw/bootrom/snitch/snitch_bootrom.ld b/hw/bootrom/snitch/snitch_bootrom.ld new file mode 100644 index 0000000..60ba665 --- /dev/null +++ b/hw/bootrom/snitch/snitch_bootrom.ld @@ -0,0 +1,17 @@ +/* Copyright 2020 ETH Zurich and University of Bologna. */ +/* Solderpad Hardware License, Version 0.51, see LICENSE for details. */ +/* SPDX-License-Identifier: SHL-0.51 */ +ENTRY(_start) + +MEMORY { + snitch_bootrom (rx) : ORIGIN = 0x30000000, LENGTH = 4K +} + +SECTIONS +{ + . = 0x30000000; + .text : { *(.text) } + + __snitch_bootrom = ORIGIN(snitch_bootrom); + __chim_regs = 0x30001000; +} diff --git a/hw/bootrom/snitch/snitch_bootrom.sv b/hw/bootrom/snitch/snitch_bootrom.sv new file mode 100644 index 0000000..687b559 --- /dev/null +++ b/hw/bootrom/snitch/snitch_bootrom.sv @@ -0,0 +1,100 @@ +// Copyright 2022 ETH Zurich and University of Bologna. +// Solderpad Hardware License, Version 0.51, see LICENSE for details. +// SPDX-License-Identifier: SHL-0.51 +// +// Fabian Schuiki +// Florian Zaruba +// Stefan Mach +// Thomas Benz +// Paul Scheffler +// Wolfgang Roenninger +// +// AUTOMATICALLY GENERATED by gen_bootrom.py; edit the script instead. + +module snitch_bootrom #( + parameter int unsigned AddrWidth = 32, + parameter int unsigned DataWidth = 32 +)( + input logic clk_i, + input logic rst_ni, + input logic req_i, + input logic [AddrWidth-1:0] addr_i, + output logic [DataWidth-1:0] data_o +); + localparam unsigned NumWords = 64; + logic [$clog2(NumWords)-1:0] word; + + assign word = addr_i / (DataWidth / 8); + + always_comb begin + data_o = '0; + unique case (word) + 000: data_o = 32'h30057073 /* 0x0000 */; + 001: data_o = 32'h00000293 /* 0x0004 */; + 002: data_o = 32'h00000313 /* 0x0008 */; + 003: data_o = 32'h00000393 /* 0x000c */; + 004: data_o = 32'h00000413 /* 0x0010 */; + 005: data_o = 32'h00000493 /* 0x0014 */; + 006: data_o = 32'h00000513 /* 0x0018 */; + 007: data_o = 32'h00000593 /* 0x001c */; + 008: data_o = 32'h00000613 /* 0x0020 */; + 009: data_o = 32'h00000693 /* 0x0024 */; + 010: data_o = 32'h00000713 /* 0x0028 */; + 011: data_o = 32'h00000793 /* 0x002c */; + 012: data_o = 32'h00000813 /* 0x0030 */; + 013: data_o = 32'h00000893 /* 0x0034 */; + 014: data_o = 32'h00000913 /* 0x0038 */; + 015: data_o = 32'h00000993 /* 0x003c */; + 016: data_o = 32'h00000a13 /* 0x0040 */; + 017: data_o = 32'h00000a93 /* 0x0044 */; + 018: data_o = 32'h00000b13 /* 0x0048 */; + 019: data_o = 32'h00000b93 /* 0x004c */; + 020: data_o = 32'h00000c13 /* 0x0050 */; + 021: data_o = 32'h00000c93 /* 0x0054 */; + 022: data_o = 32'h00000d13 /* 0x0058 */; + 023: data_o = 32'h00000d93 /* 0x005c */; + 024: data_o = 32'h00000e13 /* 0x0060 */; + 025: data_o = 32'h00000e93 /* 0x0064 */; + 026: data_o = 32'h00000f13 /* 0x0068 */; + 027: data_o = 32'h00000f93 /* 0x006c */; + 028: data_o = 32'h05c000ef /* 0x0070 */; + 029: data_o = 32'h00000297 /* 0x0074 */; + 030: data_o = 32'h03c28293 /* 0x0078 */; + 031: data_o = 32'h30529073 /* 0x007c */; + 032: data_o = 32'h10500073 /* 0x0080 */; + 033: data_o = 32'h00001297 /* 0x0084 */; + 034: data_o = 32'hf7c28293 /* 0x0088 */; + 035: data_o = 32'h0002a283 /* 0x008c */; + 036: data_o = 32'h000280e7 /* 0x0090 */; + 037: data_o = 32'h00001297 /* 0x0094 */; + 038: data_o = 32'hf6c28293 /* 0x0098 */; + 039: data_o = 32'h0052a223 /* 0x009c */; + 040: data_o = 32'hf61ff06f /* 0x00a0 */; + 041: data_o = 32'h00000013 /* 0x00a4 */; + 042: data_o = 32'h00000013 /* 0x00a8 */; + 043: data_o = 32'h00000013 /* 0x00ac */; + 044: data_o = 32'h02040fb7 /* 0x00b0 */; + 045: data_o = 32'h00000f13 /* 0x00b4 */; + 046: data_o = 32'h03efa423 /* 0x00b8 */; + 047: data_o = 32'h30200073 /* 0x00bc */; + 048: data_o = 32'h00008067 /* 0x00c0 */; + 049: data_o = 32'h00000000 /* 0x00c4 */; + 050: data_o = 32'h00000000 /* 0x00c8 */; + 051: data_o = 32'h304467f3 /* 0x00cc */; + 052: data_o = 32'h300467f3 /* 0x00d0 */; + 053: data_o = 32'h00008067 /* 0x00d4 */; + 054: data_o = 32'h00000000 /* 0x00d8 */; + 055: data_o = 32'h00000000 /* 0x00dc */; + 056: data_o = 32'h00000000 /* 0x00e0 */; + 057: data_o = 32'h00000000 /* 0x00e4 */; + 058: data_o = 32'h00000000 /* 0x00e8 */; + 059: data_o = 32'h00000000 /* 0x00ec */; + 060: data_o = 32'h00000000 /* 0x00f0 */; + 061: data_o = 32'h00000000 /* 0x00f4 */; + 062: data_o = 32'h00000000 /* 0x00f8 */; + 063: data_o = 32'h00000000 /* 0x00fc */; + default: data_o = '0; + endcase + end + +endmodule diff --git a/hw/bootrom/snitch/snitch_startup.c b/hw/bootrom/snitch/snitch_startup.c new file mode 100644 index 0000000..478459c --- /dev/null +++ b/hw/bootrom/snitch/snitch_startup.c @@ -0,0 +1,44 @@ +/* ===================================================================== + * Title: snitch_startup.c + * Description: + * + * $Date: 28.06.2024 + * + * ===================================================================== */ +/* + * Copyright (C) 2020 ETH Zurich and University of Bologna. + * + * Author: Moritz Scherer, ETH Zurich + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#define set_csr(reg, bit) ({ unsigned long __tmp; \ + if (__builtin_constant_p(bit) && (unsigned long)(bit) < 32) \ + asm volatile ("csrrs %0, " #reg ", %1" : "=r"(__tmp) : "i"(bit)); \ + else \ + asm volatile ("csrrs %0, " #reg ", %1" : "=r"(__tmp) : "r"(bit)); \ + __tmp; }) + +#define IRQ_M_SOFT 3 + +#define MSTATUS_MIE 0x00000008 +#define MIP_MSIP (1 << IRQ_M_SOFT) + +void cluster_startup(){ + set_csr(mie, MIP_MSIP); + set_csr(mstatus, MSTATUS_MIE); // set M global interrupt enable + return; +} diff --git a/hw/chimera_cluster_adapter.sv b/hw/chimera_cluster_adapter.sv new file mode 100644 index 0000000..c8d8fbd --- /dev/null +++ b/hw/chimera_cluster_adapter.sv @@ -0,0 +1,358 @@ +// ---------------------------------------------------------------------- +// +// File: chimera_cluster.sv +// +// Created: 26.06.2024 +// +// Copyright (C) 2024, ETH Zurich and University of Bologna. +// +// Author: Moritz Scherer, ETH Zurich +// +// SPDX-License-Identifier: SHL-0.51 +// +// Copyright and related rights are licensed under the Solderpad Hardware License, +// Version 0.51 (the "License"); you may not use this file except in compliance with +// the License. You may obtain a copy of the License at http://solderpad.org/licenses/SHL-0.51. +// Unless required by applicable law or agreed to in writing, software, hardware and materials +// distributed under this License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and limitations under the License. +// +// ---------------------------------------------------------------------- + +// Wraps a cluster's AXI ports with AXI CDCs on all narrow and wide ports, and converts axi id widths to match; takes care of demux wide requests to memory island / narrow crossbar + +module chimera_cluster_adapter + #( + parameter int WideSlaveIdWidth = 0, // Needs to be defined since there is no wide slave port + + parameter int WidePassThroughRegionStart = '0, // Start address of Memory Island + parameter int WidePassThroughRegionEnd = '0, // End address of Memory Island + + parameter type narrow_in_req_t = logic, + parameter type narrow_in_resp_t = logic, + parameter type narrow_out_req_t = logic, + parameter type narrow_out_resp_t = logic, + parameter type wide_out_req_t = logic, + parameter type wide_out_resp_t = logic, + parameter type wide_in_req_t = logic, + parameter type wide_in_resp_t = logic, + + parameter type clu_narrow_out_req_t = logic, + parameter type clu_narrow_out_resp_t = logic, + parameter type clu_wide_out_req_t = logic, + parameter type clu_wide_out_resp_t = logic + + )( + input logic soc_clk_i, + input logic clu_clk_i, + input logic rst_ni, + + // From SOC + input narrow_in_req_t narrow_in_req_i, + output narrow_in_resp_t narrow_in_resp_o, + output narrow_out_req_t[1:0] narrow_out_req_o, + input narrow_out_resp_t[1:0] narrow_out_resp_i, + output wide_out_req_t wide_out_req_o, + input wide_out_resp_t wide_out_resp_i, + + // To Cluster + output narrow_in_req_t clu_narrow_in_req_o, + input narrow_in_resp_t clu_narrow_in_resp_i, + input clu_narrow_out_req_t clu_narrow_out_req_i, + output clu_narrow_out_resp_t clu_narrow_out_resp_o, + input clu_wide_out_req_t clu_wide_out_req_i, + output clu_wide_out_resp_t clu_wide_out_resp_o + ); + +`include "axi/typedef.svh" + + // SCHEREMO: Define AXI helper types for downstream iw/dw conversion + + localparam int WideDataWidth = $bits(wide_out_req_o.w.data); + localparam int NarrowDataWidth = $bits(narrow_out_req_o[0].w.data); + localparam int AddrWidth = $bits(narrow_out_req_o[0].aw.addr); + localparam int UserWidth = $bits(narrow_out_req_o[0].aw.user); + + localparam int ClusterNarrowMasterIdWidth = $bits(clu_narrow_out_req_i.aw.id); + localparam int ClusterWideMasterIdWidth = $bits(clu_wide_out_req_i.aw.id); + + localparam int SocNarrowMasterIdWidth = $bits(narrow_out_req_o[0].aw.id); + localparam int SocNarrowSlaveIdWidth = $bits(narrow_in_req_i.aw.id); + localparam int SocWideMasterIdWidth = $bits(wide_out_req_o.aw.id); + + typedef logic [UserWidth-1:0] axi_user_width_t; + typedef logic [AddrWidth-1:0] axi_addr_width_t; + + typedef logic [NarrowDataWidth-1:0] axi_narrow_data_width_t; + typedef logic [NarrowDataWidth/8-1:0] axi_narrow_strb_width_t; + + typedef logic [WideDataWidth-1:0] axi_wide_data_width_t; + typedef logic [WideDataWidth/8-1:0] axi_wide_strb_width_t; + + typedef logic [SocNarrowMasterIdWidth-1:0] axi_soc_narrow_mst_id_width_t; + typedef logic [SocNarrowSlaveIdWidth-1:0] axi_soc_narrow_slv_id_width_t; + typedef logic [SocWideMasterIdWidth-1:0] axi_soc_wide_mst_id_width_y; + + `AXI_TYPEDEF_ALL(axi_clu_wide_out, axi_addr_width_t, axi_soc_wide_mst_id_width_y, axi_wide_data_width_t, axi_wide_strb_width_t, axi_user_width_t) + `AXI_TYPEDEF_ALL(axi_narrow_in, axi_addr_width_t, axi_soc_narrow_slv_id_width_t, axi_narrow_data_width_t, axi_narrow_strb_width_t, axi_user_width_t) + `AXI_TYPEDEF_ALL(axi_narrow_out, axi_addr_width_t, axi_soc_narrow_mst_id_width_t, axi_narrow_data_width_t, axi_narrow_strb_width_t, axi_user_width_t) + `AXI_TYPEDEF_ALL(axi_chimera_cluster_wrapper_out_wide_to_narrow, axi_addr_width_t, axi_soc_narrow_mst_id_width_t, axi_wide_data_width_t, axi_wide_strb_width_t, axi_user_width_t) + + // Direct mst outputs of cluster -> has extra id bits on mst, gets iw converted + + clu_narrow_out_req_t axi_from_cluster_iwc_req; + clu_narrow_out_resp_t axi_from_cluster_iwc_resp; + clu_wide_out_req_t axi_from_cluster_wide_iwc_req; + clu_wide_out_resp_t axi_from_cluster_wide_iwc_resp; + + // Id width adapted mst outputs of cluster + + narrow_out_req_t axi_from_cluster_req; + narrow_out_resp_t axi_from_cluster_resp; + wide_out_req_t axi_from_cluster_wide_req; + wide_out_resp_t axi_from_cluster_wide_resp; + + // Wide mst is demuxed to memory island and rest of SoC + + wide_out_req_t axi_from_cluster_wide_premux_req, axi_from_cluster_wide_memisl_req, axi_from_cluster_wide_to_narrow_req; + wide_out_resp_t axi_from_cluster_wide_premux_resp, axi_from_cluster_wide_memisl_resp, axi_from_cluster_wide_to_narrow_resp; + + // Rest of SoC is width converted from wide to narrow + + axi_chimera_cluster_wrapper_out_wide_to_narrow_req_t axi_from_cluster_wide_to_narrow_iwc_req; + axi_chimera_cluster_wrapper_out_wide_to_narrow_resp_t axi_from_cluster_wide_to_narrow_iwc_resp; + + // Direct slv ports from SoC crossbar + + narrow_in_resp_t axi_to_cluster_resp; + narrow_in_req_t axi_to_cluster_req; + + assign clu_narrow_in_req_o = axi_to_cluster_req; + assign axi_to_cluster_resp = clu_narrow_in_resp_i; + assign axi_from_cluster_iwc_req = clu_narrow_out_req_i; + assign clu_narrow_out_resp_o = axi_from_cluster_iwc_resp; + + assign axi_from_cluster_wide_iwc_req = clu_wide_out_req_i; + assign clu_wide_out_resp_o = axi_from_cluster_wide_iwc_resp; + + // WIDE-TO-NARROW CONVERSION + // Catch requests over the wide port which do not go to the memory island; reroute them over the narrow AXI bus. + + logic ar_wide_sel, aw_wide_sel; + + // assign ar_wide_sel = (axi_from_cluster_wide_premux_req.ar.addr >= WidePassThroughRegionStart) && (axi_from_cluster_wide_premux_req.ar.addr < WidePassThroughRegionEnd); + // assign aw_wide_sel = (axi_from_cluster_wide_premux_req.aw.addr >= WidePassThroughRegionStart) && (axi_from_cluster_wide_premux_req.aw.addr < WidePassThroughRegionEnd); + + assign ar_wide_sel = '0; + assign aw_wide_sel = '0; + + axi_demux_simple #( + .AxiIdWidth(WideSlaveIdWidth), + .AtopSupport(0), + .axi_req_t(wide_out_req_t), + .axi_resp_t(wide_out_resp_t), + .NoMstPorts(2), + .MaxTrans(2), + .AxiLookBits(WideSlaveIdWidth), + .UniqueIds('1) + ) + i_wide_demux ( + .clk_i(soc_clk_i), + .rst_ni, + .test_i('0), + .slv_req_i(axi_from_cluster_wide_premux_req), + .slv_aw_select_i(aw_wide_sel), + .slv_ar_select_i(ar_wide_sel), + .slv_resp_o(axi_from_cluster_wide_premux_resp), + .mst_reqs_o({axi_from_cluster_wide_memisl_req, axi_from_cluster_wide_to_narrow_req}), + .mst_resps_i({axi_from_cluster_wide_memisl_resp, axi_from_cluster_wide_to_narrow_resp}) + ); + + assign wide_out_req_o = axi_from_cluster_wide_memisl_req; + assign axi_from_cluster_wide_memisl_resp = wide_out_resp_i; + + axi_iw_converter #( + .AxiSlvPortIdWidth ( WideSlaveIdWidth ), + .AxiMstPortIdWidth ( SocNarrowMasterIdWidth ), + .AxiSlvPortMaxUniqIds ( 1 ), + .AxiSlvPortMaxTxnsPerId ( 1 ), + .AxiSlvPortMaxTxns ( 2 ), + .AxiMstPortMaxUniqIds ( 2 ), + .AxiMstPortMaxTxnsPerId ( 2 ), + .AxiAddrWidth ( AddrWidth ), + .AxiDataWidth ( WideDataWidth ), + .AxiUserWidth ( UserWidth ), + .slv_req_t ( wide_out_req_t ), + .slv_resp_t ( wide_out_resp_t ), + .mst_req_t ( axi_chimera_cluster_wrapper_out_wide_to_narrow_req_t ), + .mst_resp_t ( axi_chimera_cluster_wrapper_out_wide_to_narrow_resp_t ) + ) + wide_to_narrow_mst_iw_converter( + .clk_i ( soc_clk_i ), + .rst_ni ( rst_ni ), + .slv_req_i ( axi_from_cluster_wide_to_narrow_req ), + .slv_resp_o ( axi_from_cluster_wide_to_narrow_resp ), + .mst_req_o ( axi_from_cluster_wide_to_narrow_iwc_req ), + .mst_resp_i ( axi_from_cluster_wide_to_narrow_iwc_resp ) + ); + + axi_dw_converter #( + .AxiMaxReads(2), + + .AxiSlvPortDataWidth( WideDataWidth ), + .AxiMstPortDataWidth( NarrowDataWidth ), + .AxiAddrWidth( AddrWidth ), + .AxiIdWidth( SocNarrowMasterIdWidth ), + + .aw_chan_t(axi_narrow_out_aw_chan_t), + .b_chan_t(axi_narrow_out_b_chan_t), + .ar_chan_t(axi_narrow_out_ar_chan_t), + + .slv_r_chan_t(axi_chimera_cluster_wrapper_out_wide_to_narrow_r_chan_t), + .slv_w_chan_t(axi_chimera_cluster_wrapper_out_wide_to_narrow_w_chan_t), + .mst_r_chan_t(axi_narrow_out_r_chan_t), + .mst_w_chan_t(axi_narrow_out_w_chan_t), + + .axi_mst_req_t ( narrow_out_req_t ), + .axi_mst_resp_t ( narrow_out_resp_t ), + .axi_slv_req_t ( axi_chimera_cluster_wrapper_out_wide_to_narrow_req_t ), + .axi_slv_resp_t ( axi_chimera_cluster_wrapper_out_wide_to_narrow_resp_t ) + ) + i_wide_to_narrow_dw_converter + ( + .clk_i(soc_clk_i), + .rst_ni, + .slv_req_i(axi_from_cluster_wide_to_narrow_iwc_req), + .slv_resp_o(axi_from_cluster_wide_to_narrow_iwc_resp), + .mst_req_o(narrow_out_req_o[1]), + .mst_resp_i(narrow_out_resp_i[1]) + ); + + // NARROW MASTER PORT ID WIDTH CONVERSION + + axi_iw_converter #( + .AxiSlvPortIdWidth ( ClusterNarrowMasterIdWidth), + .AxiMstPortIdWidth ( SocNarrowMasterIdWidth ), + + .AxiSlvPortMaxUniqIds ( 2 ), + .AxiSlvPortMaxTxnsPerId ( 2 ), + .AxiSlvPortMaxTxns ( 4 ), + + .AxiMstPortMaxUniqIds ( 2 ), + .AxiMstPortMaxTxnsPerId ( 4 ), + + .AxiAddrWidth ( AddrWidth ), + .AxiDataWidth ( NarrowDataWidth ), + .AxiUserWidth ( UserWidth ), + .slv_req_t ( clu_narrow_out_req_t ), + .slv_resp_t ( clu_narrow_out_resp_t ), + .mst_req_t ( narrow_out_req_t ), + .mst_resp_t ( narrow_out_resp_t ) + ) + narrow_mst_iw_converter ( + .clk_i ( clu_clk_i ), + .rst_ni ( rst_ni ), + .slv_req_i ( axi_from_cluster_iwc_req ), + .slv_resp_o ( axi_from_cluster_iwc_resp ), + .mst_req_o ( axi_from_cluster_req ), + .mst_resp_i ( axi_from_cluster_resp ) + ); + + // WIDE MASTER PORT ID WIDTH CONVERSION + + axi_iw_converter #( + .AxiSlvPortIdWidth ( ClusterWideMasterIdWidth), + .AxiMstPortIdWidth ( WideSlaveIdWidth), + + .AxiSlvPortMaxUniqIds ( 2 ), + .AxiSlvPortMaxTxnsPerId ( 2 ), + .AxiSlvPortMaxTxns ( 4 ), + + .AxiMstPortMaxUniqIds ( 2 ), + .AxiMstPortMaxTxnsPerId ( 4 ), + + .AxiAddrWidth ( AddrWidth ), + .AxiDataWidth ( WideDataWidth ), + .AxiUserWidth ( UserWidth ), + .slv_req_t ( clu_wide_out_req_t ), + .slv_resp_t ( clu_wide_out_resp_t ), + .mst_req_t ( wide_out_req_t ), + .mst_resp_t ( wide_out_resp_t ) + ) + wide_mst_iw_converter ( + .clk_i ( clu_clk_i ), + .rst_ni ( rst_ni ), + .slv_req_i ( axi_from_cluster_wide_iwc_req ), + .slv_resp_o ( axi_from_cluster_wide_iwc_resp ), + .mst_req_o ( axi_from_cluster_wide_req ), + .mst_resp_i ( axi_from_cluster_wide_resp ) + ); + + // AXI CDCS + + axi_cdc #( + .aw_chan_t(axi_narrow_in_aw_chan_t), + .w_chan_t(axi_narrow_in_w_chan_t), + .b_chan_t(axi_narrow_in_b_chan_t), + .ar_chan_t(axi_narrow_in_ar_chan_t), + .r_chan_t(axi_narrow_in_r_chan_t), + .axi_req_t(narrow_in_req_t), + .axi_resp_t(narrow_in_resp_t) + ) + narrow_slv_cdc ( + .src_clk_i(soc_clk_i), + .src_rst_ni(rst_ni), + .src_req_i(narrow_in_req_i), + .src_resp_o(narrow_in_resp_o), + + .dst_clk_i(clu_clk_i), + .dst_rst_ni(rst_ni), + .dst_req_o(axi_to_cluster_req), + .dst_resp_i(axi_to_cluster_resp) + ); + + + axi_cdc #( + .aw_chan_t(axi_narrow_out_aw_chan_t), + .w_chan_t(axi_narrow_out_w_chan_t), + .b_chan_t(axi_narrow_out_b_chan_t), + .ar_chan_t(axi_narrow_out_ar_chan_t), + .r_chan_t(axi_narrow_out_r_chan_t), + .axi_req_t(narrow_out_req_t), + .axi_resp_t(narrow_out_resp_t) + ) + narrow_mst_cdc ( + .src_clk_i(clu_clk_i), + .src_rst_ni(rst_ni), + .src_req_i(axi_from_cluster_req), + .src_resp_o(axi_from_cluster_resp), + + .dst_clk_i(soc_clk_i), + .dst_rst_ni(rst_ni), + .dst_req_o(narrow_out_req_o[0]), + .dst_resp_i(narrow_out_resp_i[0]) + ); + + axi_cdc #( + .aw_chan_t(axi_clu_wide_out_aw_chan_t), + .w_chan_t(axi_clu_wide_out_w_chan_t), + .b_chan_t(axi_clu_wide_out_b_chan_t), + .ar_chan_t(axi_clu_wide_out_ar_chan_t), + .r_chan_t(axi_clu_wide_out_r_chan_t), + .axi_req_t(wide_out_req_t), + .axi_resp_t(wide_out_resp_t) + ) + wide_mst_cdc ( + .src_clk_i(clu_clk_i), + .src_rst_ni(rst_ni), + .src_req_i(axi_from_cluster_wide_req), + .src_resp_o(axi_from_cluster_wide_resp), + + .dst_clk_i(soc_clk_i), + .dst_rst_ni(rst_ni), + .dst_req_o(axi_from_cluster_wide_premux_req), + .dst_resp_i(axi_from_cluster_wide_premux_resp) + ); + +endmodule: chimera_cluster_adapter diff --git a/hw/chimera_pkg.sv b/hw/chimera_pkg.sv new file mode 100644 index 0000000..29684e0 --- /dev/null +++ b/hw/chimera_pkg.sv @@ -0,0 +1,113 @@ +// ---------------------------------------------------------------------- +// +// File: chimera_pkg.sv +// +// Created: 24.06.2024 +// +// Copyright (C) 2024, ETH Zurich and University of Bologna. +// +// Author: Moritz Scherer, ETH Zurich +// +// SPDX-License-Identifier: SHL-0.51 +// +// Copyright and related rights are licensed under the Solderpad Hardware License, +// Version 0.51 (the "License"); you may not use this file except in compliance with +// the License. You may obtain a copy of the License at http://solderpad.org/licenses/SHL-0.51. +// Unless required by applicable law or agreed to in writing, software, hardware and materials +// distributed under this License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and limitations under the License. +// +// ---------------------------------------------------------------------- + +package chimera_pkg; + + import cheshire_pkg::*; + + // ACCEL CFG + localparam int ExtClusters = 5; + + typedef struct packed { + logic [iomsb(ExtClusters):0] hasWideMasterPort; + byte_bt [iomsb(ExtClusters):0] NrCores; + } ClusterConfig; + + localparam ClusterConfig ChimeraClusterCfg = { + hasWideMasterPort: {1'b1, 1'b1, 1'b1, 1'b1, 1'b1}, + NrCores: {8'h9, 8'h9, 8'h9, 8'h9, 8'h9} + }; + + function automatic int _sumVector(byte_bt [iomsb(ExtClusters):0] vector, int vectorLen); + int sum = 0; + for(int i=0; i 5 clusters, 1 host core, 1 DMA, 1 DBG Unit + cfg.AxiMstIdWidth = 4; + + cfg.MemIslWidePorts = $countones(ChimeraClusterCfg.hasWideMasterPort); + cfg.AxiExtNumWideMst = $countones(ChimeraClusterCfg.hasWideMasterPort); + // SCHEREMO: Two ports for each cluster: one to convert stray wides, one for the original narrow + cfg.AxiExtNumMst = ExtClusters + $countones(ChimeraClusterCfg.hasWideMasterPort); + cfg.AxiExtNumSlv = ExtClusters; + cfg.AxiExtNumRules = ExtClusters; + cfg.AxiExtRegionIdx = {8'h4, 8'h3, 8'h2, 8'h1, 8'h0}; + cfg.AxiExtRegionStart = {64'h4080_0000, 64'h4060_0000, 64'h4040_0000, 64'h4020_0000, 64'h4000_0000}; + cfg.AxiExtRegionEnd = {64'h40A0_0000, 64'h4080_0000, 64'h4060_0000, 64'h4040_0000, 64'h4020_0000}; + + // REG CFG + cfg.RegExtNumSlv = ExtRegNum; + cfg.RegExtNumRules = ExtRegNum; + cfg.RegExtRegionIdx = {8'h1, 8'h0}; // SnitchBootROM + cfg.RegExtRegionStart = {TopLevelRegionStart, SnitchBootROMRegionStart}; + cfg.RegExtRegionEnd = {TopLevelRegionEnd, SnitchBootROMRegionEnd}; + + // ACCEL HART/IRQ CFG + cfg.NumExtIrqHarts = ExtCores; + cfg.NumExtDbgHarts = ExtCores; + cfg.NumExtOutIntrTgts = ExtCores; + + return cfg; + endfunction // gen_chimera_cfg + + localparam int numCfgs = 1; + + localparam cheshire_cfg_t [numCfgs-1:0] ChimeraCfg = + {gen_chimera_cfg() + }; +endpackage diff --git a/hw/chimera_top_wrapper.sv b/hw/chimera_top_wrapper.sv new file mode 100644 index 0000000..34d9291 --- /dev/null +++ b/hw/chimera_top_wrapper.sv @@ -0,0 +1,488 @@ +// ---------------------------------------------------------------------- +// +// File: chimera_top_wrapper.sv +// +// Created: 24.06.2024 +// +// Copyright (C) 2024, ETH Zurich and University of Bologna. +// +// Author: Moritz Scherer, ETH Zurich +// +// SPDX-License-Identifier: SHL-0.51 +// +// Copyright and related rights are licensed under the Solderpad Hardware License, +// Version 0.51 (the "License"); you may not use this file except in compliance with +// the License. You may obtain a copy of the License at http://solderpad.org/licenses/SHL-0.51. +// Unless required by applicable law or agreed to in writing, software, hardware and materials +// distributed under this License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and limitations under the License. +// +// ---------------------------------------------------------------------- + +module chimera_top_wrapper + import cheshire_pkg::*; + import chimera_pkg::*; + import chimera_reg_pkg::*; + #( + parameter int unsigned SelectedCfg = 0 + ) + ( + input logic soc_clk_i, + input logic clu_clk_i, + input logic rst_ni, + input logic test_mode_i, + input logic [1:0] boot_mode_i, + input logic rtc_i, + // JTAG interface + input logic jtag_tck_i, + input logic jtag_trst_ni, + input logic jtag_tms_i, + input logic jtag_tdi_i, + output logic jtag_tdo_o, + output logic jtag_tdo_oe_o, + // UART interface + output logic uart_tx_o, + input logic uart_rx_i, + // UART modem flow control + output logic uart_rts_no, + output logic uart_dtr_no, + input logic uart_cts_ni, + input logic uart_dsr_ni, + input logic uart_dcd_ni, + input logic uart_rin_ni, + // I2C interface + output logic i2c_sda_o, + input logic i2c_sda_i, + output logic i2c_sda_en_o, + output logic i2c_scl_o, + input logic i2c_scl_i, + output logic i2c_scl_en_o, + // SPI host interface + output logic spih_sck_o, + output logic spih_sck_en_o, + output logic [SpihNumCs-1:0] spih_csb_o, + output logic [SpihNumCs-1:0] spih_csb_en_o, + output logic [ 3:0] spih_sd_o, + output logic [ 3:0] spih_sd_en_o, + input logic [ 3:0] spih_sd_i, + // GPIO interface + input logic [31:0] gpio_i, + output logic [31:0] gpio_o, + output logic [31:0] gpio_en_o, + // Serial link interface + input logic [SlinkNumChan-1:0] slink_rcv_clk_i, + output logic [SlinkNumChan-1:0] slink_rcv_clk_o, + input logic [SlinkNumChan-1:0][SlinkNumLanes-1:0] slink_i, + output logic [SlinkNumChan-1:0][SlinkNumLanes-1:0] slink_o, + // VGA interface + output logic vga_hsync_o, + output logic vga_vsync_o, + output logic [Cfg.VgaRedWidth -1:0] vga_red_o, + output logic [Cfg.VgaGreenWidth-1:0] vga_green_o, + output logic [Cfg.VgaBlueWidth -1:0] vga_blue_o + ); + +`include "axi/typedef.svh" +`include "common_cells/registers.svh" +`include "common_cells/assertions.svh" +`include "cheshire/typedef.svh" + + // Cheshire config + localparam cheshire_cfg_t Cfg = ChimeraCfg[SelectedCfg]; + `CHESHIRE_TYPEDEF_ALL(, Cfg) + + localparam type axi_wide_mst_req_t = mem_isl_wide_axi_mst_req_t; + localparam type axi_wide_mst_rsp_t = mem_isl_wide_axi_mst_rsp_t; + localparam type axi_wide_slv_req_t = mem_isl_wide_axi_slv_req_t; + localparam type axi_wide_slv_rsp_t = mem_isl_wide_axi_slv_rsp_t; + + chimera_reg2hw_t reg2hw; + + // External AXI crossbar ports + axi_mst_req_t [iomsb(Cfg.AxiExtNumMst):0] axi_mst_req; + axi_mst_rsp_t [iomsb(Cfg.AxiExtNumMst):0] axi_mst_rsp; + axi_wide_mst_req_t [iomsb(Cfg.AxiExtNumWideMst):0] axi_wide_mst_req; + axi_wide_mst_rsp_t [iomsb(Cfg.AxiExtNumWideMst):0] axi_wide_mst_rsp; + axi_slv_req_t [iomsb(Cfg.AxiExtNumSlv):0] axi_slv_req; + axi_slv_rsp_t [iomsb(Cfg.AxiExtNumSlv):0] axi_slv_rsp; + + // External reg demux slaves + reg_req_t [iomsb(Cfg.RegExtNumSlv):0] reg_slv_req; + reg_rsp_t [iomsb(Cfg.RegExtNumSlv):0] reg_slv_rsp; + + // Interrupts from and to clusters + logic [iomsb(Cfg.NumExtInIntrs):0] intr_ext_in; + logic [iomsb(Cfg.NumExtOutIntrTgts):0][iomsb(Cfg.NumExtOutIntrs):0] intr_ext_out; + + // Interrupt requests to cluster cores + logic [iomsb(NumIrqCtxts*Cfg.NumExtIrqHarts):0] xeip_ext; + logic [iomsb(Cfg.NumExtIrqHarts):0] mtip_ext; + logic [iomsb(Cfg.NumExtIrqHarts):0] msip_ext; + + // Debug interface to cluster cores + logic dbg_active; + logic [iomsb(Cfg.NumExtDbgHarts):0] dbg_ext_req; + logic [iomsb(Cfg.NumExtDbgHarts):0] dbg_ext_unavail; + + cheshire_soc #( + .Cfg(Cfg), + .ExtHartinfo('0), + .axi_ext_llc_req_t(axi_mst_req_t), + .axi_ext_llc_rsp_t(axi_mst_rsp_t), + .axi_ext_mst_req_t(axi_mst_req_t), + .axi_ext_mst_rsp_t(axi_mst_rsp_t), + .axi_ext_wide_mst_req_t(axi_wide_mst_req_t), + .axi_ext_wide_mst_rsp_t(axi_wide_mst_rsp_t), + .axi_ext_slv_req_t(axi_slv_req_t), + .axi_ext_slv_rsp_t(axi_slv_rsp_t), + .reg_ext_req_t(reg_req_t), + .reg_ext_rsp_t(reg_rsp_t) + ) i_cheshire + ( + .clk_i(soc_clk_i), + .rst_ni, + .test_mode_i, + .boot_mode_i, + .rtc_i, + // External AXI LLC (DRAM) port + .axi_llc_mst_req_o(), + .axi_llc_mst_rsp_i('0), + // External AXI crossbar ports + .axi_ext_mst_req_i(axi_mst_req), + .axi_ext_mst_rsp_o(axi_mst_rsp), + .axi_ext_wide_mst_req_i(axi_wide_mst_req), + .axi_ext_wide_mst_rsp_o(axi_wide_mst_rsp), + .axi_ext_slv_req_o(axi_slv_req), + .axi_ext_slv_rsp_i(axi_slv_rsp), + // External reg demux slaves + .reg_ext_slv_req_o(reg_slv_req), + .reg_ext_slv_rsp_i(reg_slv_rsp), + // Interrupts from and to external targets + .intr_ext_i(intr_ext_in), + .intr_ext_o(intr_ext_out), + // Interrupt requests to external harts + .xeip_ext_o(xeip_ext), + .mtip_ext_o(mtip_ext), + .msip_ext_o(msip_ext), + // Debug interface to external harts + .dbg_active_o(dbg_active), + .dbg_ext_req_o(dbg_ext_req), + .dbg_ext_unavail_i(dbg_ext_unavail), + // JTAG interface + .jtag_tck_i, + .jtag_trst_ni, + .jtag_tms_i, + .jtag_tdi_i, + .jtag_tdo_o, + .jtag_tdo_oe_o, + // UART interface + .uart_tx_o, + .uart_rx_i, + // UART modem flow control + .uart_rts_no, + .uart_dtr_no, + .uart_cts_ni, + .uart_dsr_ni, + .uart_dcd_ni, + .uart_rin_ni, + // I2C interface + .i2c_sda_o, + .i2c_sda_i, + .i2c_sda_en_o, + .i2c_scl_o, + .i2c_scl_i, + .i2c_scl_en_o, + // SPI host interface + .spih_sck_o, + .spih_sck_en_o, + .spih_csb_o, + .spih_csb_en_o, + .spih_sd_o, + .spih_sd_en_o, + .spih_sd_i, + // GPIO interface + .gpio_i, + .gpio_o, + .gpio_en_o, + // Serial link interface + .slink_rcv_clk_i, + .slink_rcv_clk_o, + .slink_i, + .slink_o, + // VGA interface + .vga_hsync_o, + .vga_vsync_o, + .vga_red_o, + .vga_green_o, + .vga_blue_o + ); + + // TOP-LEVEL REG + + chimera_reg_top #( + .reg_req_t(reg_req_t), + .reg_rsp_t(reg_rsp_t) + ) + i_reg_top ( + .clk_i(soc_clk_i), + .rst_ni, + .reg_req_i(reg_slv_req[TopLevelIdx]), + .reg_rsp_o(reg_slv_rsp[TopLevelIdx]), + .reg2hw(reg2hw), + .devmode_i('1) + ); + + + // SNITCH BOOTROM + + logic [31:0] snitch_bootrom_addr; + logic [31:0] snitch_bootrom_data, snitch_bootrom_data_q; + logic snitch_bootrom_req, snitch_bootrom_req_q; + logic snitch_bootrom_we, snitch_bootrom_we_q; + + // Delay response by one cycle to fulfill mem protocol + + `FF(snitch_bootrom_data_q, snitch_bootrom_data, '0, soc_clk_i, rst_ni) + `FF(snitch_bootrom_req_q, snitch_bootrom_req, '0, soc_clk_i, rst_ni) + `FF(snitch_bootrom_we_q, snitch_bootrom_we, '0, soc_clk_i, rst_ni) + + reg_to_mem #( + .AW ( 32 ), + .DW ( 32 ), + .req_t ( reg_req_t ), + .rsp_t ( reg_rsp_t ) + ) i_reg_to_snitch_bootrom ( + .clk_i(soc_clk_i), + .rst_ni, + .reg_req_i ( reg_slv_req[SnitchBootROMIdx] ), + .reg_rsp_o ( reg_slv_rsp[SnitchBootROMIdx] ), + .req_o ( snitch_bootrom_req ), + .gnt_i ( snitch_bootrom_req ), + .we_o ( snitch_bootrom_we ), + .addr_o ( snitch_bootrom_addr ), + .wdata_o ( ), + .wstrb_o ( ), + .rdata_i ( snitch_bootrom_data_q ), + .rvalid_i ( snitch_bootrom_req_q ), + .rerror_i ( snitch_bootrom_we_q ) + ); + + snitch_bootrom #( + .AddrWidth ( 32 ), + .DataWidth ( 32 ) + ) i_snitch_bootrom ( + .clk_i(soc_clk_i), + .rst_ni, + .req_i ( snitch_bootrom_req ), + .addr_i ( snitch_bootrom_addr ), + .data_o ( snitch_bootrom_data ) + ); + + // Cluster clock gates + + logic [ExtClusters-1:0] cluster_clock_gate_en; + logic [ExtClusters-1:0] clu_clk_gated; + assign cluster_clock_gate_en = {reg2hw.cluster_5_clk_gate_en, reg2hw.cluster_4_clk_gate_en, reg2hw.cluster_3_clk_gate_en, reg2hw.cluster_2_clk_gate_en, reg2hw.cluster_1_clk_gate_en}; + + genvar extClusterIdx; + generate + for(extClusterIdx=0; extClusterIdx HW type + typedef struct packed { + chimera_reg2hw_snitch_boot_addr_reg_t snitch_boot_addr; // [228:197] + chimera_reg2hw_snitch_intr_handler_addr_reg_t snitch_intr_handler_addr; // [196:165] + chimera_reg2hw_snitch_cluster_1_return_reg_t snitch_cluster_1_return; // [164:133] + chimera_reg2hw_snitch_cluster_2_return_reg_t snitch_cluster_2_return; // [132:101] + chimera_reg2hw_snitch_cluster_3_return_reg_t snitch_cluster_3_return; // [100:69] + chimera_reg2hw_snitch_cluster_4_return_reg_t snitch_cluster_4_return; // [68:37] + chimera_reg2hw_snitch_cluster_5_return_reg_t snitch_cluster_5_return; // [36:5] + chimera_reg2hw_cluster_1_clk_gate_en_reg_t cluster_1_clk_gate_en; // [4:4] + chimera_reg2hw_cluster_2_clk_gate_en_reg_t cluster_2_clk_gate_en; // [3:3] + chimera_reg2hw_cluster_3_clk_gate_en_reg_t cluster_3_clk_gate_en; // [2:2] + chimera_reg2hw_cluster_4_clk_gate_en_reg_t cluster_4_clk_gate_en; // [1:1] + chimera_reg2hw_cluster_5_clk_gate_en_reg_t cluster_5_clk_gate_en; // [0:0] + } chimera_reg2hw_t; + + // HW -> register type + typedef struct packed { + chimera_hw2reg_snitch_cluster_1_return_reg_t snitch_cluster_1_return; // [164:132] + chimera_hw2reg_snitch_cluster_2_return_reg_t snitch_cluster_2_return; // [131:99] + chimera_hw2reg_snitch_cluster_3_return_reg_t snitch_cluster_3_return; // [98:66] + chimera_hw2reg_snitch_cluster_4_return_reg_t snitch_cluster_4_return; // [65:33] + chimera_hw2reg_snitch_cluster_5_return_reg_t snitch_cluster_5_return; // [32:0] + } chimera_hw2reg_t; + + // Register offsets + parameter logic [BlockAw-1:0] CHIMERA_SNITCH_BOOT_ADDR_OFFSET = 6'h 0; + parameter logic [BlockAw-1:0] CHIMERA_SNITCH_INTR_HANDLER_ADDR_OFFSET = 6'h 4; + parameter logic [BlockAw-1:0] CHIMERA_SNITCH_CLUSTER_1_RETURN_OFFSET = 6'h 8; + parameter logic [BlockAw-1:0] CHIMERA_SNITCH_CLUSTER_2_RETURN_OFFSET = 6'h c; + parameter logic [BlockAw-1:0] CHIMERA_SNITCH_CLUSTER_3_RETURN_OFFSET = 6'h 10; + parameter logic [BlockAw-1:0] CHIMERA_SNITCH_CLUSTER_4_RETURN_OFFSET = 6'h 14; + parameter logic [BlockAw-1:0] CHIMERA_SNITCH_CLUSTER_5_RETURN_OFFSET = 6'h 18; + parameter logic [BlockAw-1:0] CHIMERA_CLUSTER_1_CLK_GATE_EN_OFFSET = 6'h 1c; + parameter logic [BlockAw-1:0] CHIMERA_CLUSTER_2_CLK_GATE_EN_OFFSET = 6'h 20; + parameter logic [BlockAw-1:0] CHIMERA_CLUSTER_3_CLK_GATE_EN_OFFSET = 6'h 24; + parameter logic [BlockAw-1:0] CHIMERA_CLUSTER_4_CLK_GATE_EN_OFFSET = 6'h 28; + parameter logic [BlockAw-1:0] CHIMERA_CLUSTER_5_CLK_GATE_EN_OFFSET = 6'h 2c; + + // Register index + typedef enum int { + CHIMERA_SNITCH_BOOT_ADDR, + CHIMERA_SNITCH_INTR_HANDLER_ADDR, + CHIMERA_SNITCH_CLUSTER_1_RETURN, + CHIMERA_SNITCH_CLUSTER_2_RETURN, + CHIMERA_SNITCH_CLUSTER_3_RETURN, + CHIMERA_SNITCH_CLUSTER_4_RETURN, + CHIMERA_SNITCH_CLUSTER_5_RETURN, + CHIMERA_CLUSTER_1_CLK_GATE_EN, + CHIMERA_CLUSTER_2_CLK_GATE_EN, + CHIMERA_CLUSTER_3_CLK_GATE_EN, + CHIMERA_CLUSTER_4_CLK_GATE_EN, + CHIMERA_CLUSTER_5_CLK_GATE_EN + } chimera_id_e; + + // Register width information to check illegal writes + parameter logic [3:0] CHIMERA_PERMIT [12] = '{ + 4'b 1111, // index[ 0] CHIMERA_SNITCH_BOOT_ADDR + 4'b 1111, // index[ 1] CHIMERA_SNITCH_INTR_HANDLER_ADDR + 4'b 1111, // index[ 2] CHIMERA_SNITCH_CLUSTER_1_RETURN + 4'b 1111, // index[ 3] CHIMERA_SNITCH_CLUSTER_2_RETURN + 4'b 1111, // index[ 4] CHIMERA_SNITCH_CLUSTER_3_RETURN + 4'b 1111, // index[ 5] CHIMERA_SNITCH_CLUSTER_4_RETURN + 4'b 1111, // index[ 6] CHIMERA_SNITCH_CLUSTER_5_RETURN + 4'b 0001, // index[ 7] CHIMERA_CLUSTER_1_CLK_GATE_EN + 4'b 0001, // index[ 8] CHIMERA_CLUSTER_2_CLK_GATE_EN + 4'b 0001, // index[ 9] CHIMERA_CLUSTER_3_CLK_GATE_EN + 4'b 0001, // index[10] CHIMERA_CLUSTER_4_CLK_GATE_EN + 4'b 0001 // index[11] CHIMERA_CLUSTER_5_CLK_GATE_EN + }; + +endpackage + diff --git a/hw/regs/chimera_reg_top.sv b/hw/regs/chimera_reg_top.sv new file mode 100644 index 0000000..8c111e0 --- /dev/null +++ b/hw/regs/chimera_reg_top.sv @@ -0,0 +1,599 @@ +// Copyright lowRISC contributors. +// Licensed under the Apache License, Version 2.0, see LICENSE for details. +// SPDX-License-Identifier: Apache-2.0 +// +// Register Top module auto-generated by `reggen` + + +`include "common_cells/assertions.svh" + +module chimera_reg_top #( + parameter type reg_req_t = logic, + parameter type reg_rsp_t = logic, + parameter int AW = 6 +) ( + input logic clk_i, + input logic rst_ni, + input reg_req_t reg_req_i, + output reg_rsp_t reg_rsp_o, + // To HW + output chimera_reg_pkg::chimera_reg2hw_t reg2hw, // Write + input chimera_reg_pkg::chimera_hw2reg_t hw2reg, // Read + + + // Config + input devmode_i // If 1, explicit error return for unmapped register access +); + + import chimera_reg_pkg::* ; + + localparam int DW = 32; + localparam int DBW = DW/8; // Byte Width + + // register signals + logic reg_we; + logic reg_re; + logic [BlockAw-1:0] reg_addr; + logic [DW-1:0] reg_wdata; + logic [DBW-1:0] reg_be; + logic [DW-1:0] reg_rdata; + logic reg_error; + + logic addrmiss, wr_err; + + logic [DW-1:0] reg_rdata_next; + + // Below register interface can be changed + reg_req_t reg_intf_req; + reg_rsp_t reg_intf_rsp; + + + assign reg_intf_req = reg_req_i; + assign reg_rsp_o = reg_intf_rsp; + + + assign reg_we = reg_intf_req.valid & reg_intf_req.write; + assign reg_re = reg_intf_req.valid & ~reg_intf_req.write; + assign reg_addr = reg_intf_req.addr[BlockAw-1:0]; + assign reg_wdata = reg_intf_req.wdata; + assign reg_be = reg_intf_req.wstrb; + assign reg_intf_rsp.rdata = reg_rdata; + assign reg_intf_rsp.error = reg_error; + assign reg_intf_rsp.ready = 1'b1; + + assign reg_rdata = reg_rdata_next ; + assign reg_error = (devmode_i & addrmiss) | wr_err; + + + // Define SW related signals + // Format: __{wd|we|qs} + // or _{wd|we|qs} if field == 1 or 0 + logic [31:0] snitch_boot_addr_qs; + logic [31:0] snitch_boot_addr_wd; + logic snitch_boot_addr_we; + logic [31:0] snitch_intr_handler_addr_qs; + logic [31:0] snitch_intr_handler_addr_wd; + logic snitch_intr_handler_addr_we; + logic [31:0] snitch_cluster_1_return_qs; + logic [31:0] snitch_cluster_2_return_qs; + logic [31:0] snitch_cluster_3_return_qs; + logic [31:0] snitch_cluster_4_return_qs; + logic [31:0] snitch_cluster_5_return_qs; + logic cluster_1_clk_gate_en_qs; + logic cluster_1_clk_gate_en_wd; + logic cluster_1_clk_gate_en_we; + logic cluster_2_clk_gate_en_qs; + logic cluster_2_clk_gate_en_wd; + logic cluster_2_clk_gate_en_we; + logic cluster_3_clk_gate_en_qs; + logic cluster_3_clk_gate_en_wd; + logic cluster_3_clk_gate_en_we; + logic cluster_4_clk_gate_en_qs; + logic cluster_4_clk_gate_en_wd; + logic cluster_4_clk_gate_en_we; + logic cluster_5_clk_gate_en_qs; + logic cluster_5_clk_gate_en_wd; + logic cluster_5_clk_gate_en_we; + + // Register instances + // R[snitch_boot_addr]: V(False) + + prim_subreg #( + .DW (32), + .SWACCESS("RW"), + .RESVAL (32'hbadcab1e) + ) u_snitch_boot_addr ( + .clk_i (clk_i ), + .rst_ni (rst_ni ), + + // from register interface + .we (snitch_boot_addr_we), + .wd (snitch_boot_addr_wd), + + // from internal hardware + .de (1'b0), + .d ('0 ), + + // to internal hardware + .qe (), + .q (reg2hw.snitch_boot_addr.q ), + + // to register interface (read) + .qs (snitch_boot_addr_qs) + ); + + + // R[snitch_intr_handler_addr]: V(False) + + prim_subreg #( + .DW (32), + .SWACCESS("RW"), + .RESVAL (32'hbadcab1e) + ) u_snitch_intr_handler_addr ( + .clk_i (clk_i ), + .rst_ni (rst_ni ), + + // from register interface + .we (snitch_intr_handler_addr_we), + .wd (snitch_intr_handler_addr_wd), + + // from internal hardware + .de (1'b0), + .d ('0 ), + + // to internal hardware + .qe (), + .q (reg2hw.snitch_intr_handler_addr.q ), + + // to register interface (read) + .qs (snitch_intr_handler_addr_qs) + ); + + + // R[snitch_cluster_1_return]: V(False) + + prim_subreg #( + .DW (32), + .SWACCESS("RO"), + .RESVAL (32'h0) + ) u_snitch_cluster_1_return ( + .clk_i (clk_i ), + .rst_ni (rst_ni ), + + .we (1'b0), + .wd ('0 ), + + // from internal hardware + .de (hw2reg.snitch_cluster_1_return.de), + .d (hw2reg.snitch_cluster_1_return.d ), + + // to internal hardware + .qe (), + .q (reg2hw.snitch_cluster_1_return.q ), + + // to register interface (read) + .qs (snitch_cluster_1_return_qs) + ); + + + // R[snitch_cluster_2_return]: V(False) + + prim_subreg #( + .DW (32), + .SWACCESS("RO"), + .RESVAL (32'h0) + ) u_snitch_cluster_2_return ( + .clk_i (clk_i ), + .rst_ni (rst_ni ), + + .we (1'b0), + .wd ('0 ), + + // from internal hardware + .de (hw2reg.snitch_cluster_2_return.de), + .d (hw2reg.snitch_cluster_2_return.d ), + + // to internal hardware + .qe (), + .q (reg2hw.snitch_cluster_2_return.q ), + + // to register interface (read) + .qs (snitch_cluster_2_return_qs) + ); + + + // R[snitch_cluster_3_return]: V(False) + + prim_subreg #( + .DW (32), + .SWACCESS("RO"), + .RESVAL (32'h0) + ) u_snitch_cluster_3_return ( + .clk_i (clk_i ), + .rst_ni (rst_ni ), + + .we (1'b0), + .wd ('0 ), + + // from internal hardware + .de (hw2reg.snitch_cluster_3_return.de), + .d (hw2reg.snitch_cluster_3_return.d ), + + // to internal hardware + .qe (), + .q (reg2hw.snitch_cluster_3_return.q ), + + // to register interface (read) + .qs (snitch_cluster_3_return_qs) + ); + + + // R[snitch_cluster_4_return]: V(False) + + prim_subreg #( + .DW (32), + .SWACCESS("RO"), + .RESVAL (32'h0) + ) u_snitch_cluster_4_return ( + .clk_i (clk_i ), + .rst_ni (rst_ni ), + + .we (1'b0), + .wd ('0 ), + + // from internal hardware + .de (hw2reg.snitch_cluster_4_return.de), + .d (hw2reg.snitch_cluster_4_return.d ), + + // to internal hardware + .qe (), + .q (reg2hw.snitch_cluster_4_return.q ), + + // to register interface (read) + .qs (snitch_cluster_4_return_qs) + ); + + + // R[snitch_cluster_5_return]: V(False) + + prim_subreg #( + .DW (32), + .SWACCESS("RO"), + .RESVAL (32'h0) + ) u_snitch_cluster_5_return ( + .clk_i (clk_i ), + .rst_ni (rst_ni ), + + .we (1'b0), + .wd ('0 ), + + // from internal hardware + .de (hw2reg.snitch_cluster_5_return.de), + .d (hw2reg.snitch_cluster_5_return.d ), + + // to internal hardware + .qe (), + .q (reg2hw.snitch_cluster_5_return.q ), + + // to register interface (read) + .qs (snitch_cluster_5_return_qs) + ); + + + // R[cluster_1_clk_gate_en]: V(False) + + prim_subreg #( + .DW (1), + .SWACCESS("RW"), + .RESVAL (1'h0) + ) u_cluster_1_clk_gate_en ( + .clk_i (clk_i ), + .rst_ni (rst_ni ), + + // from register interface + .we (cluster_1_clk_gate_en_we), + .wd (cluster_1_clk_gate_en_wd), + + // from internal hardware + .de (1'b0), + .d ('0 ), + + // to internal hardware + .qe (), + .q (reg2hw.cluster_1_clk_gate_en.q ), + + // to register interface (read) + .qs (cluster_1_clk_gate_en_qs) + ); + + + // R[cluster_2_clk_gate_en]: V(False) + + prim_subreg #( + .DW (1), + .SWACCESS("RW"), + .RESVAL (1'h0) + ) u_cluster_2_clk_gate_en ( + .clk_i (clk_i ), + .rst_ni (rst_ni ), + + // from register interface + .we (cluster_2_clk_gate_en_we), + .wd (cluster_2_clk_gate_en_wd), + + // from internal hardware + .de (1'b0), + .d ('0 ), + + // to internal hardware + .qe (), + .q (reg2hw.cluster_2_clk_gate_en.q ), + + // to register interface (read) + .qs (cluster_2_clk_gate_en_qs) + ); + + + // R[cluster_3_clk_gate_en]: V(False) + + prim_subreg #( + .DW (1), + .SWACCESS("RW"), + .RESVAL (1'h0) + ) u_cluster_3_clk_gate_en ( + .clk_i (clk_i ), + .rst_ni (rst_ni ), + + // from register interface + .we (cluster_3_clk_gate_en_we), + .wd (cluster_3_clk_gate_en_wd), + + // from internal hardware + .de (1'b0), + .d ('0 ), + + // to internal hardware + .qe (), + .q (reg2hw.cluster_3_clk_gate_en.q ), + + // to register interface (read) + .qs (cluster_3_clk_gate_en_qs) + ); + + + // R[cluster_4_clk_gate_en]: V(False) + + prim_subreg #( + .DW (1), + .SWACCESS("RW"), + .RESVAL (1'h0) + ) u_cluster_4_clk_gate_en ( + .clk_i (clk_i ), + .rst_ni (rst_ni ), + + // from register interface + .we (cluster_4_clk_gate_en_we), + .wd (cluster_4_clk_gate_en_wd), + + // from internal hardware + .de (1'b0), + .d ('0 ), + + // to internal hardware + .qe (), + .q (reg2hw.cluster_4_clk_gate_en.q ), + + // to register interface (read) + .qs (cluster_4_clk_gate_en_qs) + ); + + + // R[cluster_5_clk_gate_en]: V(False) + + prim_subreg #( + .DW (1), + .SWACCESS("RW"), + .RESVAL (1'h0) + ) u_cluster_5_clk_gate_en ( + .clk_i (clk_i ), + .rst_ni (rst_ni ), + + // from register interface + .we (cluster_5_clk_gate_en_we), + .wd (cluster_5_clk_gate_en_wd), + + // from internal hardware + .de (1'b0), + .d ('0 ), + + // to internal hardware + .qe (), + .q (reg2hw.cluster_5_clk_gate_en.q ), + + // to register interface (read) + .qs (cluster_5_clk_gate_en_qs) + ); + + + + + logic [11:0] addr_hit; + always_comb begin + addr_hit = '0; + addr_hit[ 0] = (reg_addr == CHIMERA_SNITCH_BOOT_ADDR_OFFSET); + addr_hit[ 1] = (reg_addr == CHIMERA_SNITCH_INTR_HANDLER_ADDR_OFFSET); + addr_hit[ 2] = (reg_addr == CHIMERA_SNITCH_CLUSTER_1_RETURN_OFFSET); + addr_hit[ 3] = (reg_addr == CHIMERA_SNITCH_CLUSTER_2_RETURN_OFFSET); + addr_hit[ 4] = (reg_addr == CHIMERA_SNITCH_CLUSTER_3_RETURN_OFFSET); + addr_hit[ 5] = (reg_addr == CHIMERA_SNITCH_CLUSTER_4_RETURN_OFFSET); + addr_hit[ 6] = (reg_addr == CHIMERA_SNITCH_CLUSTER_5_RETURN_OFFSET); + addr_hit[ 7] = (reg_addr == CHIMERA_CLUSTER_1_CLK_GATE_EN_OFFSET); + addr_hit[ 8] = (reg_addr == CHIMERA_CLUSTER_2_CLK_GATE_EN_OFFSET); + addr_hit[ 9] = (reg_addr == CHIMERA_CLUSTER_3_CLK_GATE_EN_OFFSET); + addr_hit[10] = (reg_addr == CHIMERA_CLUSTER_4_CLK_GATE_EN_OFFSET); + addr_hit[11] = (reg_addr == CHIMERA_CLUSTER_5_CLK_GATE_EN_OFFSET); + end + + assign addrmiss = (reg_re || reg_we) ? ~|addr_hit : 1'b0 ; + + // Check sub-word write is permitted + always_comb begin + wr_err = (reg_we & + ((addr_hit[ 0] & (|(CHIMERA_PERMIT[ 0] & ~reg_be))) | + (addr_hit[ 1] & (|(CHIMERA_PERMIT[ 1] & ~reg_be))) | + (addr_hit[ 2] & (|(CHIMERA_PERMIT[ 2] & ~reg_be))) | + (addr_hit[ 3] & (|(CHIMERA_PERMIT[ 3] & ~reg_be))) | + (addr_hit[ 4] & (|(CHIMERA_PERMIT[ 4] & ~reg_be))) | + (addr_hit[ 5] & (|(CHIMERA_PERMIT[ 5] & ~reg_be))) | + (addr_hit[ 6] & (|(CHIMERA_PERMIT[ 6] & ~reg_be))) | + (addr_hit[ 7] & (|(CHIMERA_PERMIT[ 7] & ~reg_be))) | + (addr_hit[ 8] & (|(CHIMERA_PERMIT[ 8] & ~reg_be))) | + (addr_hit[ 9] & (|(CHIMERA_PERMIT[ 9] & ~reg_be))) | + (addr_hit[10] & (|(CHIMERA_PERMIT[10] & ~reg_be))) | + (addr_hit[11] & (|(CHIMERA_PERMIT[11] & ~reg_be))))); + end + + assign snitch_boot_addr_we = addr_hit[0] & reg_we & !reg_error; + assign snitch_boot_addr_wd = reg_wdata[31:0]; + + assign snitch_intr_handler_addr_we = addr_hit[1] & reg_we & !reg_error; + assign snitch_intr_handler_addr_wd = reg_wdata[31:0]; + + assign cluster_1_clk_gate_en_we = addr_hit[7] & reg_we & !reg_error; + assign cluster_1_clk_gate_en_wd = reg_wdata[0]; + + assign cluster_2_clk_gate_en_we = addr_hit[8] & reg_we & !reg_error; + assign cluster_2_clk_gate_en_wd = reg_wdata[0]; + + assign cluster_3_clk_gate_en_we = addr_hit[9] & reg_we & !reg_error; + assign cluster_3_clk_gate_en_wd = reg_wdata[0]; + + assign cluster_4_clk_gate_en_we = addr_hit[10] & reg_we & !reg_error; + assign cluster_4_clk_gate_en_wd = reg_wdata[0]; + + assign cluster_5_clk_gate_en_we = addr_hit[11] & reg_we & !reg_error; + assign cluster_5_clk_gate_en_wd = reg_wdata[0]; + + // Read data return + always_comb begin + reg_rdata_next = '0; + unique case (1'b1) + addr_hit[0]: begin + reg_rdata_next[31:0] = snitch_boot_addr_qs; + end + + addr_hit[1]: begin + reg_rdata_next[31:0] = snitch_intr_handler_addr_qs; + end + + addr_hit[2]: begin + reg_rdata_next[31:0] = snitch_cluster_1_return_qs; + end + + addr_hit[3]: begin + reg_rdata_next[31:0] = snitch_cluster_2_return_qs; + end + + addr_hit[4]: begin + reg_rdata_next[31:0] = snitch_cluster_3_return_qs; + end + + addr_hit[5]: begin + reg_rdata_next[31:0] = snitch_cluster_4_return_qs; + end + + addr_hit[6]: begin + reg_rdata_next[31:0] = snitch_cluster_5_return_qs; + end + + addr_hit[7]: begin + reg_rdata_next[0] = cluster_1_clk_gate_en_qs; + end + + addr_hit[8]: begin + reg_rdata_next[0] = cluster_2_clk_gate_en_qs; + end + + addr_hit[9]: begin + reg_rdata_next[0] = cluster_3_clk_gate_en_qs; + end + + addr_hit[10]: begin + reg_rdata_next[0] = cluster_4_clk_gate_en_qs; + end + + addr_hit[11]: begin + reg_rdata_next[0] = cluster_5_clk_gate_en_qs; + end + + default: begin + reg_rdata_next = '1; + end + endcase + end + + // Unused signal tieoff + + // wdata / byte enable are not always fully used + // add a blanket unused statement to handle lint waivers + logic unused_wdata; + logic unused_be; + assign unused_wdata = ^reg_wdata; + assign unused_be = ^reg_be; + + // Assertions for Register Interface + `ASSERT(en2addrHit, (reg_we || reg_re) |-> $onehot0(addr_hit)) + +endmodule + +module chimera_reg_top_intf +#( + parameter int AW = 6, + localparam int DW = 32 +) ( + input logic clk_i, + input logic rst_ni, + REG_BUS.in regbus_slave, + // To HW + output chimera_reg_pkg::chimera_reg2hw_t reg2hw, // Write + input chimera_reg_pkg::chimera_hw2reg_t hw2reg, // Read + // Config + input devmode_i // If 1, explicit error return for unmapped register access +); + localparam int unsigned STRB_WIDTH = DW/8; + +`include "register_interface/typedef.svh" +`include "register_interface/assign.svh" + + // Define structs for reg_bus + typedef logic [AW-1:0] addr_t; + typedef logic [DW-1:0] data_t; + typedef logic [STRB_WIDTH-1:0] strb_t; + `REG_BUS_TYPEDEF_ALL(reg_bus, addr_t, data_t, strb_t) + + reg_bus_req_t s_reg_req; + reg_bus_rsp_t s_reg_rsp; + + // Assign SV interface to structs + `REG_BUS_ASSIGN_TO_REQ(s_reg_req, regbus_slave) + `REG_BUS_ASSIGN_FROM_RSP(regbus_slave, s_reg_rsp) + + + + chimera_reg_top #( + .reg_req_t(reg_bus_req_t), + .reg_rsp_t(reg_bus_rsp_t), + .AW(AW) + ) i_regs ( + .clk_i, + .rst_ni, + .reg_req_i(s_reg_req), + .reg_rsp_o(s_reg_rsp), + .reg2hw, // Write + .hw2reg, // Read + .devmode_i + ); + +endmodule + + diff --git a/hw/regs/chimera_regs.hjson b/hw/regs/chimera_regs.hjson new file mode 100644 index 0000000..0562e81 --- /dev/null +++ b/hw/regs/chimera_regs.hjson @@ -0,0 +1,156 @@ +// Copyright 2024 ETH Zurich and University of Bologna. +// Solderpad Hardware License, Version 0.51, see LICENSE for details. +// SPDX-License-Identifier: SHL-0.51 +// Moritz Scherer + +{ + name: "chimera", + clock_primary: "clk_i", + bus_interfaces: [ + { protocol: "reg_iface", direction: "device" } + ], + regwidth: "32", + registers :[ + { + name: "SNITCH_BOOT_ADDR", + desc: "Set boot address for all snitch cores", + swaccess: "rw", + hwaccess: "hro", + resval: "0xBADCAB1E", + hwqe: "0", + fields: [ + { bits: "31:0" } + ], + } + { + name: "SNITCH_INTR_HANDLER_ADDR", + desc: "Set interrupt handler address for all snitch cores", + swaccess: "rw", + hwaccess: "hro", + resval: "0xBADCAB1E", + hwqe: "0", + fields: [ + { bits: "31:0" } + ], + } + { + name: "SNITCH_CLUSTER_1_RETURN", + desc: "Register to store return value of Snitch cluster 1", + swaccess: "ro", + hwaccess: "hrw", + resval: "0", + hwqe: "0", + fields: [ + { bits: "31:0" } + ], + } + + { + name: "SNITCH_CLUSTER_2_RETURN", + desc: "Register to store return value of Snitch cluster 2", + swaccess: "ro", + hwaccess: "hrw", + resval: "0", + hwqe: "0", + fields: [ + { bits: "31:0" } + ], + } + + { + name: "SNITCH_CLUSTER_3_RETURN", + desc: "Register to store return value of Snitch cluster 3", + swaccess: "ro", + hwaccess: "hrw", + resval: "0", + hwqe: "0", + fields: [ + { bits: "31:0" } + ], + } + + { + name: "SNITCH_CLUSTER_4_RETURN", + desc: "Register to store return value of Snitch cluster 4", + swaccess: "ro", + hwaccess: "hrw", + resval: "0", + hwqe: "0", + fields: [ + { bits: "31:0" } + ], + } + + { + name: "SNITCH_CLUSTER_5_RETURN", + desc: "Register to store return value of Snitch cluster 5", + swaccess: "ro", + hwaccess: "hrw", + resval: "0", + hwqe: "0", + fields: [ + { bits: "31:0" } + ], + } + + { + name: "CLUSTER_1_CLK_GATE_EN", + desc: "Enable clock gate for cluster 1", + swaccess: "rw", + hwaccess: "hro", + resval: "0", + hwqe: "0", + fields: [ + { bits: "0:0" } + ], + } + + { + name: "CLUSTER_2_CLK_GATE_EN", + desc: "Enable clock gate for cluster 2", + swaccess: "rw", + hwaccess: "hro", + resval: "0", + hwqe: "0", + fields: [ + { bits: "0:0" } + ], + } + + { + name: "CLUSTER_3_CLK_GATE_EN", + desc: "Enable clock gate for cluster 3", + swaccess: "rw", + hwaccess: "hro", + resval: "0", + hwqe: "0", + fields: [ + { bits: "0:0" } + ], + } + + { + name: "CLUSTER_4_CLK_GATE_EN", + desc: "Enable clock gate for cluster 4", + swaccess: "rw", + hwaccess: "hro", + resval: "0", + hwqe: "0", + fields: [ + { bits: "0:0" } + ], + } + + { + name: "CLUSTER_5_CLK_GATE_EN", + desc: "Enable clock gate for cluster 5", + swaccess: "rw", + hwaccess: "hro", + resval: "0", + hwqe: "0", + fields: [ + { bits: "0:0" } + ], + } + ] +} diff --git a/hw/rv_plic.cfg.hjson b/hw/rv_plic.cfg.hjson new file mode 100644 index 0000000..402bb1a --- /dev/null +++ b/hw/rv_plic.cfg.hjson @@ -0,0 +1,15 @@ +// Copyright 2022 ETH Zurich and University of Bologna. +// Licensed under the Apache License, Version 2.0, see LICENSE for details. +// SPDX-License-Identifier: Apache-2.0 +// +// Moritz Scherer + +{ + instance_name: "rv_plic", + param_values: { + src: 92, + target: 92, + prio: 7, + nonstd_regs: 0 // Do *not* include these: MSIPs are not used and we use a 64 MiB address space + }, +} diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 0000000..bbed9bc --- /dev/null +++ b/requirements.txt @@ -0,0 +1,6 @@ +hjson +tabulate +pyyaml +mako +jsonref +jsonschema diff --git a/sim.mk b/sim.mk new file mode 100644 index 0000000..8ca3fe3 --- /dev/null +++ b/sim.mk @@ -0,0 +1,38 @@ +# ---------------------------------------------------------------------- +# +# File: sim.mk +# +# Created: 25.06.2024 +# +# Copyright (C) 2024, ETH Zurich and University of Bologna. +# +# Author: Moritz Scherer, ETH Zurich +# +# ---------------------------------------------------------------------- +# SPDX-License-Identifier: Apache-2.0 +# +# Licensed under the Apache License, Version 2.0 (the License); you may +# not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an AS IS BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +.PHONY: sim sim-clean + +chim-sim-clean: + @rm -rf target/sim/vsim/work + @rm -rf target/sim/vsim/transcript + @rm -f $(CHIM_ROOT)/target/sim/vsim/compile.tcl + +chim-sim: $(CHIM_ROOT)/target/sim/vsim/compile.tcl + +$(CHIM_ROOT)/target/sim/vsim/compile.tcl: chs-hw-init snitch-hw-init + @bender script vsim $(COMMON_TARGS) $(SIM_TARGS) --vlog-arg="$(VLOG_ARGS)"> $@ + echo 'vlog "$(realpath $(CHS_ROOT))/target/sim/src/elfloader.cpp" -ccflags "-std=c++11"' >> $@ + diff --git a/sw/include/regs/soc_ctrl.h b/sw/include/regs/soc_ctrl.h new file mode 100644 index 0000000..af175c7 --- /dev/null +++ b/sw/include/regs/soc_ctrl.h @@ -0,0 +1,64 @@ +// Generated register defines for chimera + +// Copyright information found in source file: +// Copyright 2024 ETH Zurich and University of Bologna. + +// Licensing information found in source file: +// +// SPDX-License-Identifier: SHL-0.51 + +#ifndef _CHIMERA_REG_DEFS_ +#define _CHIMERA_REG_DEFS_ + +#ifdef __cplusplus +extern "C" { +#endif +// Register width +#define CHIMERA_PARAM_REG_WIDTH 32 + +// Set boot address for all snitch cores +#define CHIMERA_SNITCH_BOOT_ADDR_REG_OFFSET 0x0 + +// Set interrupt handler address for all snitch cores +#define CHIMERA_SNITCH_INTR_HANDLER_ADDR_REG_OFFSET 0x4 + +// Register to store return value of Snitch cluster 1 +#define CHIMERA_SNITCH_CLUSTER_1_RETURN_REG_OFFSET 0x8 + +// Register to store return value of Snitch cluster 2 +#define CHIMERA_SNITCH_CLUSTER_2_RETURN_REG_OFFSET 0xc + +// Register to store return value of Snitch cluster 3 +#define CHIMERA_SNITCH_CLUSTER_3_RETURN_REG_OFFSET 0x10 + +// Register to store return value of Snitch cluster 4 +#define CHIMERA_SNITCH_CLUSTER_4_RETURN_REG_OFFSET 0x14 + +// Register to store return value of Snitch cluster 5 +#define CHIMERA_SNITCH_CLUSTER_5_RETURN_REG_OFFSET 0x18 + +// Enable clock gate for cluster 1 +#define CHIMERA_CLUSTER_1_CLK_GATE_EN_REG_OFFSET 0x1c +#define CHIMERA_CLUSTER_1_CLK_GATE_EN_CLUSTER_1_CLK_GATE_EN_BIT 0 + +// Enable clock gate for cluster 2 +#define CHIMERA_CLUSTER_2_CLK_GATE_EN_REG_OFFSET 0x20 +#define CHIMERA_CLUSTER_2_CLK_GATE_EN_CLUSTER_2_CLK_GATE_EN_BIT 0 + +// Enable clock gate for cluster 3 +#define CHIMERA_CLUSTER_3_CLK_GATE_EN_REG_OFFSET 0x24 +#define CHIMERA_CLUSTER_3_CLK_GATE_EN_CLUSTER_3_CLK_GATE_EN_BIT 0 + +// Enable clock gate for cluster 4 +#define CHIMERA_CLUSTER_4_CLK_GATE_EN_REG_OFFSET 0x28 +#define CHIMERA_CLUSTER_4_CLK_GATE_EN_CLUSTER_4_CLK_GATE_EN_BIT 0 + +// Enable clock gate for cluster 5 +#define CHIMERA_CLUSTER_5_CLK_GATE_EN_REG_OFFSET 0x2c +#define CHIMERA_CLUSTER_5_CLK_GATE_EN_CLUSTER_5_CLK_GATE_EN_BIT 0 + +#ifdef __cplusplus +} // extern "C" +#endif +#endif // _CHIMERA_REG_DEFS_ +// End generated register defines for chimera \ No newline at end of file diff --git a/sw/sw.mk b/sw/sw.mk new file mode 100644 index 0000000..0f9f567 --- /dev/null +++ b/sw/sw.mk @@ -0,0 +1,41 @@ +# ---------------------------------------------------------------------- +# +# File: sw.mk +# +# Created: 26.06.2024 +# +# Copyright (C) 2024, ETH Zurich and University of Bologna. +# +# Author: Moritz Scherer, ETH Zurich +# +# ---------------------------------------------------------------------- +# SPDX-License-Identifier: Apache-2.0 +# +# Licensed under the Apache License, Version 2.0 (the License); you may +# not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an AS IS BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +CHS_SW_INCLUDES += -I$(CHIM_SW_DIR)/include +CHS_SW_FLAGS += -falign-functions=64 -march=rv32im + +CHIM_SW_TEST_SRCS_S = $(wildcard $(CHIM_SW_DIR)/tests/*.S) +CHIM_SW_TEST_SRCS_C = $(wildcard $(CHIM_SW_DIR)/tests/*.c) + +CHIM_SW_TEST_DRAM_DUMP = $(CHIM_SW_TEST_SRCS_S:.S=.dram.dump) $(CHIM_SW_TEST_SRCS_C:.c=.dram.dump) +CHIM_SW_TEST_SPM_DUMP = $(CHIM_SW_TEST_SRCS_S:.S=.spm.dump) $(CHIM_SW_TEST_SRCS_C:.c=.spm.dump) +CHIM_SW_TEST_MEMISL_DUMP = $(CHIM_SW_TEST_SRCS_S:.S=.memisl.dump) $(CHIM_SW_TEST_SRCS_C:.c=.memisl.dump) +CHIM_SW_TEST_SPM_ROMH = $(CHIM_SW_TEST_SRCS_S:.S=.rom.memh) $(CHIM_SW_TEST_SRCS_C:.c=.rom.memh) +CHIM_SW_TEST_SPM_GPTH = $(CHIM_SW_TEST_SRCS_S:.S=.gpt.memh) $(CHIM_SW_TEST_SRCS_C:.c=.gpt.memh) + +CHIM_SW_TESTS += $(CHIM_SW_TEST_DRAM_DUMP) $(CHIM_SW_TEST_SPM_DUMP) $(CHIM_SW_TEST_MEMISL_DUMP) $(CHIM_SW_TEST_SPM_ROMH) $(CHIM_SW_TEST_SPM_GPTH) + +chim-sw: $(CHIM_SW_TESTS) + diff --git a/sw/tests/testCluster.c b/sw/tests/testCluster.c new file mode 100644 index 0000000..17be31c --- /dev/null +++ b/sw/tests/testCluster.c @@ -0,0 +1,59 @@ +/* ===================================================================== + * Title: testCluster.c + * Description: + * + * $Date: 26.06.2024 + * + * ===================================================================== */ +/* + * Copyright (C) 2020 ETH Zurich and University of Bologna. + * + * Author: Moritz Scherer, ETH Zurich + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include + +#define CLUSTERMEMORYSTART 0x40000008 +#define CLUSTERDISTANCE 0x200000 +#define NUMCLUSTERS 5 + +#define TESTVAL 0x00E0D0C0 + +int main(){ + volatile int32_t* clusterMemPtr = (volatile int32_t*)CLUSTERMEMORYSTART; + volatile int32_t result; + + uint8_t ret = 0; + for (int i=0; i +#include "regs/soc_ctrl.h" + +#define TOPLEVELREGREGION 0x30001000 + +int main(){ + volatile uint8_t* regPtr = (volatile uint8_t*) TOPLEVELREGREGION; + + *(regPtr + CHIMERA_CLUSTER_1_CLK_GATE_EN_REG_OFFSET) = 1; + *(regPtr + CHIMERA_CLUSTER_4_CLK_GATE_EN_REG_OFFSET) = 1; + *(regPtr + CHIMERA_CLUSTER_5_CLK_GATE_EN_REG_OFFSET) = 1; + + while(1){} + + return 0; + +} diff --git a/sw/tests/testClusterOffload.c b/sw/tests/testClusterOffload.c new file mode 100644 index 0000000..724fc2d --- /dev/null +++ b/sw/tests/testClusterOffload.c @@ -0,0 +1,62 @@ +/* ===================================================================== + * Title: testClusterOffload.c + * Description: + * + * $Date: 28.06.2024 + * + * ===================================================================== */ +/* + * Copyright (C) 2020 ETH Zurich and University of Bologna. + * + * Author: Moritz Scherer, ETH Zurich + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include + +#define SOC_CTRL_BASEADDR 0x30001000 +#define TESTVAL 0x50CCE55 +#define FAILVAL 0xBADCAB1E + +#define TARGETHARTID 1 +#define IRQID 1 + +#define CLINTADDR 0x02040000 +#define CLINTMSIP1OFFSET 0x28 + +static int32_t* clintPointer = (int32_t*) CLINTADDR; + +int32_t testReturn(int32_t hartid){ + + return TESTVAL; +} + +int main(){ + + volatile int32_t* snitchBootAddr = (volatile int32_t*) (SOC_CTRL_BASEADDR + CHIMERA_SNITCH_BOOT_ADDR_REG_OFFSET); + volatile int32_t* snitchReturnAddr = (volatile int32_t*) (SOC_CTRL_BASEADDR + CHIMERA_SNITCH_CLUSTER_1_RETURN_REG_OFFSET); + + *snitchBootAddr = testReturn; + + *(clintPointer + CLINTMSIP1OFFSET/4) = 1; + + while(!*snitchReturnAddr){ + + } + + return *snitchReturnAddr; +} diff --git a/target/sim/src/fixture_chimera_soc.sv b/target/sim/src/fixture_chimera_soc.sv new file mode 100644 index 0000000..d1fe1eb --- /dev/null +++ b/target/sim/src/fixture_chimera_soc.sv @@ -0,0 +1,144 @@ +// Copyright 2022 ETH Zurich and University of Bologna. +// Solderpad Hardware License, Version 0.51, see LICENSE for details. +// SPDX-License-Identifier: SHL-0.51 +// +// Nicole Narr +// Christopher Reinwardt +// Paul Scheffler + +module fixture_chimera_soc #( + /// The selected simulation configuration from the `tb_cheshire_pkg`. + parameter int unsigned SelectedCfg = 32'd0 + + ); + +`include "cheshire/typedef.svh" + + import cheshire_pkg::*; + import tb_cheshire_pkg::*; + import chimera_pkg::*; + + localparam cheshire_cfg_t DutCfg = ChimeraCfg[SelectedCfg]; + + `CHESHIRE_TYPEDEF_ALL(, DutCfg) + + + /////////// + // DUT // + /////////// + + logic soc_clk; + logic clu_clk; + logic rst_n; + logic test_mode; + logic [1:0] boot_mode; + logic rtc; + + logic jtag_tck; + logic jtag_trst_n; + logic jtag_tms; + logic jtag_tdi; + logic jtag_tdo; + + logic uart_tx; + logic uart_rx; + + logic i2c_sda_o; + logic i2c_sda_i; + logic i2c_sda_en; + logic i2c_scl_o; + logic i2c_scl_i; + logic i2c_scl_en; + + logic spih_sck_o; + logic spih_sck_en; + logic [SpihNumCs-1:0] spih_csb_o; + logic [SpihNumCs-1:0] spih_csb_en; + logic [ 3:0] spih_sd_o; + logic [ 3:0] spih_sd_i; + logic [ 3:0] spih_sd_en; + + logic [SlinkNumChan-1:0] slink_rcv_clk_i; + logic [SlinkNumChan-1:0] slink_rcv_clk_o; + logic [SlinkNumChan-1:0][SlinkNumLanes-1:0] slink_i; + logic [SlinkNumChan-1:0][SlinkNumLanes-1:0] slink_o; + + chimera_top_wrapper #( + .SelectedCfg(SelectedCfg) + ) dut ( + .soc_clk_i ( soc_clk ), + .clu_clk_i ( clu_clk ), + .rst_ni ( rst_n ), + .test_mode_i ( test_mode ), + .boot_mode_i ( boot_mode ), + .rtc_i ( rtc ), + .jtag_tck_i ( jtag_tck ), + .jtag_trst_ni ( jtag_trst_n ), + .jtag_tms_i ( jtag_tms ), + .jtag_tdi_i ( jtag_tdi ), + .jtag_tdo_o ( jtag_tdo ), + .jtag_tdo_oe_o ( ), + .uart_tx_o ( uart_tx ), + .uart_rx_i ( uart_rx ), + .uart_rts_no ( ), + .uart_dtr_no ( ), + .uart_cts_ni ( 1'b0 ), + .uart_dsr_ni ( 1'b0 ), + .uart_dcd_ni ( 1'b0 ), + .uart_rin_ni ( 1'b0 ), + .i2c_sda_o ( i2c_sda_o ), + .i2c_sda_i ( i2c_sda_i ), + .i2c_sda_en_o ( i2c_sda_en ), + .i2c_scl_o ( i2c_scl_o ), + .i2c_scl_i ( i2c_scl_i ), + .i2c_scl_en_o ( i2c_scl_en ), + .spih_sck_o ( spih_sck_o ), + .spih_sck_en_o ( spih_sck_en ), + .spih_csb_o ( spih_csb_o ), + .spih_csb_en_o ( spih_csb_en ), + .spih_sd_o ( spih_sd_o ), + .spih_sd_en_o ( spih_sd_en ), + .spih_sd_i ( spih_sd_i ), + .gpio_i ( '0 ), + .gpio_o ( ), + .gpio_en_o ( ), + .slink_rcv_clk_i ( slink_rcv_clk_i ), + .slink_rcv_clk_o ( slink_rcv_clk_o ), + .slink_i ( slink_i ), + .slink_o ( slink_o ), + .vga_hsync_o ( ), + .vga_vsync_o ( ), + .vga_red_o ( ), + .vga_green_o ( ), + .vga_blue_o ( ) + ); + + //////////////////////// + // Tristate Adapter // + //////////////////////// + + wire i2c_sda; + wire i2c_scl; + + wire spih_sck; + wire [SpihNumCs-1:0] spih_csb; + wire [ 3:0] spih_sd; + + vip_cheshire_soc_tristate vip_tristate (.*); + + /////////// + // VIP // + /////////// + + axi_mst_req_t axi_slink_mst_req; + axi_mst_rsp_t axi_slink_mst_rsp; + + assign axi_slink_mst_req = '0; + + vip_chimera_soc #( + .DutCfg ( DutCfg ), + .axi_ext_mst_req_t(axi_mst_req_t), + .axi_ext_mst_rsp_t(axi_mst_rsp_t) + ) vip (.*); + +endmodule diff --git a/target/sim/src/tb_chimera_pkg.sv b/target/sim/src/tb_chimera_pkg.sv new file mode 100644 index 0000000..d2256e3 --- /dev/null +++ b/target/sim/src/tb_chimera_pkg.sv @@ -0,0 +1,45 @@ +// Copyright 2022 ETH Zurich and University of Bologna. +// Solderpad Hardware License, Version 0.51, see LICENSE for details. +// SPDX-License-Identifier: SHL-0.51 +// +// Thomas Benz + +/// This package contains parameters used in the simulation environment +package tb_chimera_pkg; + + import cheshire_pkg::*; + + // A dedicated RT config + function automatic cheshire_cfg_t gen_cheshire_rt_cfg(); + cheshire_cfg_t ret = DefaultCfg; + ret.AxiRt = 1; + return ret; + endfunction + + // An embedded 32 bit config + function automatic cheshire_cfg_t gen_cheshire_emb_cfg(); + cheshire_cfg_t ret = DefaultCfg; + ret.Vga = 0; + ret.SerialLink = 0; + ret.AxiUserWidth = 64; + return ret; + endfunction // gen_cheshire_emb_cfg + + function automatic cheshire_cfg_t gen_cheshire_memisl_cfg(); + cheshire_cfg_t ret = gen_cheshire_emb_cfg(); + ret.MemoryIsland = 1; + return ret; + endfunction // gen_cheshire_memisl_cfg + + // Number of Cheshire configurations + localparam int unsigned NumCheshireConfigs = 32'd4; + + // Assemble a configuration array indexed by a numeric parameter + localparam cheshire_cfg_t [NumCheshireConfigs-1:0] TbCheshireConfigs = { + gen_cheshire_memisl_cfg(), // 3: Embedded + Memory Island configuration + gen_cheshire_emb_cfg(), // 2: Embedded configuration + gen_cheshire_rt_cfg(), // 1: RT-enabled configuration + DefaultCfg // 0: Default configuration + }; + +endpackage diff --git a/target/sim/src/tb_chimera_soc.sv b/target/sim/src/tb_chimera_soc.sv new file mode 100644 index 0000000..fc255a8 --- /dev/null +++ b/target/sim/src/tb_chimera_soc.sv @@ -0,0 +1,67 @@ +// Copyright 2022 ETH Zurich and University of Bologna. +// Solderpad Hardware License, Version 0.51, see LICENSE for details. +// SPDX-License-Identifier: SHL-0.51 +// +// Nicole Narr +// Christopher Reinwardt + +module tb_chimera_soc #( + /// The selected simulation configuration from the `tb_chimera_pkg`. + parameter int unsigned SelectedCfg = 32'd0 +); + + fixture_chimera_soc #(.SelectedCfg(SelectedCfg)) fix(); + + string preload_elf; + string boot_hex; + logic [1:0] boot_mode; + logic [1:0] preload_mode; + bit [31:0] exit_code; + + initial begin + // Fetch plusargs or use safe (fail-fast) defaults + if (!$value$plusargs("BOOTMODE=%d", boot_mode)) boot_mode = 0; + if (!$value$plusargs("PRELMODE=%d", preload_mode)) preload_mode = 0; + if (!$value$plusargs("BINARY=%s", preload_elf)) preload_elf = ""; + if (!$value$plusargs("IMAGE=%s", boot_hex)) boot_hex = ""; + + // Set boot mode and preload boot image if there is one + fix.vip.set_boot_mode(boot_mode); + fix.vip.i2c_eeprom_preload(boot_hex); + fix.vip.spih_norflash_preload(boot_hex); + + // Wait for reset + fix.vip.wait_for_reset(); + + // Preload in idle mode or wait for completion in autonomous boot + if (boot_mode == 0) begin + // Idle boot: preload with the specified mode + case (preload_mode) + 0: begin // JTAG + fix.vip.jtag_init(); + fix.vip.jtag_elf_run(preload_elf); + fix.vip.jtag_wait_for_eoc(exit_code); + end 1: begin // Serial Link + fix.vip.slink_elf_run(preload_elf); + fix.vip.slink_wait_for_eoc(exit_code); + end 2: begin // UART + fix.vip.uart_debug_elf_run_and_wait(preload_elf, exit_code); + end default: begin + $fatal(1, "Unsupported preload mode %d (reserved)!", boot_mode); + end + endcase + end else if (boot_mode == 1) begin + $fatal(1, "Unsupported boot mode %d (SD Card)!", boot_mode); + end else begin + // Autonomous boot: Only poll return code + fix.vip.jtag_init(); + fix.vip.jtag_wait_for_eoc(exit_code); + end + + // Wait for the UART to finish reading the current byte + wait (fix.vip.uart_reading_byte == 0); + + $finish; + end + +endmodule diff --git a/target/sim/src/vip_chimera_soc.sv b/target/sim/src/vip_chimera_soc.sv new file mode 100644 index 0000000..022712a --- /dev/null +++ b/target/sim/src/vip_chimera_soc.sv @@ -0,0 +1,932 @@ +// Copyright 2022 ETH Zurich and University of Bologna. +// Solderpad Hardware License, Version 0.51, see LICENSE for details. +// SPDX-License-Identifier: SHL-0.51 +// +// Nicole Narr +// Christopher Reinwardt +// Paul Scheffler + +// Collects all existing verification IP (VIP) in one module for use in testbenches of +// Cheshire-based SoCs and Chips. IOs are of inout direction where applicable. + +module vip_chimera_soc import cheshire_pkg::*; #( + // DUT (must be set) + parameter cheshire_cfg_t DutCfg = '0, + // Timing + parameter type axi_ext_mst_req_t = logic, + parameter type axi_ext_mst_rsp_t = logic, + + parameter time ClkPeriodClu = 2ns, + parameter time ClkPeriodSys = 5ns, + parameter time ClkPeriodJtag = 20ns, + parameter time ClkPeriodRtc = 30518ns, + parameter int unsigned RstCycles = 5, + parameter real TAppl = 0.1, + parameter real TTest = 0.9, + // UART + parameter int unsigned UartBaudRate = 115200, + parameter int unsigned UartParityEna = 0, + parameter int unsigned UartBurstBytes = 256, + parameter int unsigned UartWaitCycles = 60, + // Serial Link + parameter int unsigned SlinkMaxWaitAx = 100, + parameter int unsigned SlinkMaxWaitR = 5, + parameter int unsigned SlinkMaxWaitResp = 20, + parameter int unsigned SlinkBurstBytes = 1024, + parameter int unsigned SlinkMaxTxns = 32, + parameter int unsigned SlinkMaxTxnsPerId = 16, + parameter bit SlinkAxiDebug = 0, + // Derived Parameters; *do not override* + parameter int unsigned AxiStrbWidth = DutCfg.AxiDataWidth/8, + parameter int unsigned AxiStrbBits = $clog2(DutCfg.AxiDataWidth/8) +) ( + output logic soc_clk, + output logic clu_clk, + output logic rst_n, + output logic test_mode, + output logic [1:0] boot_mode, + output logic rtc, + input axi_ext_mst_req_t axi_slink_mst_req, + output axi_ext_mst_rsp_t axi_slink_mst_rsp, + // JTAG interface + output logic jtag_tck, + output logic jtag_trst_n, + output logic jtag_tms, + output logic jtag_tdi, + input logic jtag_tdo, + // UART interface + input logic uart_tx, + output logic uart_rx, + // I2C interface + inout wire i2c_sda, + inout wire i2c_scl, + // SPI host interface + inout wire spih_sck, + inout wire [SpihNumCs-1:0] spih_csb, + inout wire [ 3:0] spih_sd, + // Serial link interface + output logic [SlinkNumChan-1:0] slink_rcv_clk_i, + input logic [SlinkNumChan-1:0] slink_rcv_clk_o, + output logic [SlinkNumChan-1:0][SlinkNumLanes-1:0] slink_i, + input logic [SlinkNumChan-1:0][SlinkNumLanes-1:0] slink_o +); + + `include "cheshire/typedef.svh" + `include "axi/assign.svh" + + `CHESHIRE_TYPEDEF_ALL(, DutCfg) + + /////////// + // DPI // + /////////// + + import "DPI-C" function byte read_elf(input string filename); + import "DPI-C" function byte get_entry(output longint entry); + import "DPI-C" function byte get_section(output longint address, output longint len); + import "DPI-C" context function byte read_section(input longint address, inout byte buffer[], input longint len); + + + // CLU Clock Gen + + clk_rst_gen #( + .ClkPeriod ( ClkPeriodClu ), + .RstClkCycles ( RstCycles ) + ) i_clk_rst_clu ( + .clk_o ( clu_clk ), + .rst_no ( ) + ); + + /////////////////////////////// + // SoC Clock, Reset, Modes // + /////////////////////////////// + + clk_rst_gen #( + .ClkPeriod ( ClkPeriodSys ), + .RstClkCycles ( RstCycles ) + ) i_clk_rst_sys ( + .clk_o ( soc_clk ), + .rst_no ( rst_n ) + ); + + clk_rst_gen #( + .ClkPeriod ( ClkPeriodRtc ), + .RstClkCycles ( RstCycles ) + ) i_clk_rst_rtc ( + .clk_o ( rtc ), + .rst_no ( ) + ); + + initial begin + test_mode = '0; + boot_mode = '0; + end + + task wait_for_reset; + @(posedge rst_n); + @(posedge soc_clk); + endtask + + task set_test_mode(input logic mode); + test_mode = mode; + endtask + + task set_boot_mode(input logic [1:0] mode); + boot_mode = mode; + endtask + + //////////// + // JTAG // + //////////// + + localparam dm::sbcs_t JtagInitSbcs = dm::sbcs_t'{ + sbautoincrement: 1'b1, sbreadondata: 1'b1, sbaccess: 3, default: '0}; + + // Generate clock + clk_rst_gen #( + .ClkPeriod ( ClkPeriodJtag ), + .RstClkCycles ( RstCycles ) + ) i_clk_jtag ( + .clk_o ( jtag_tck ), + .rst_no ( ) + ); + + // Define test bus and driver + JTAG_DV jtag(jtag_tck); + + typedef jtag_test::riscv_dbg #( + .IrLength ( 5 ), + .TA ( ClkPeriodJtag * TAppl ), + .TT ( ClkPeriodJtag * TTest ) + ) riscv_dbg_t; + + riscv_dbg_t::jtag_driver_t jtag_dv = new (jtag); + riscv_dbg_t jtag_dbg = new (jtag_dv); + + // Connect DUT to test bus + assign jtag_trst_n = jtag.trst_n; + assign jtag_tms = jtag.tms; + assign jtag_tdi = jtag.tdi; + assign jtag.tdo = jtag_tdo; + + initial begin + @(negedge rst_n); + jtag_dbg.reset_master(); + end + + task automatic jtag_write( + input dm::dm_csr_e addr, + input word_bt data, + input bit wait_cmd = 0, + input bit wait_sba = 0 + ); + jtag_dbg.write_dmi(addr, data); + if (wait_cmd) begin + dm::abstractcs_t acs; + do begin + jtag_dbg.read_dmi_exp_backoff(dm::AbstractCS, acs); + if (acs.cmderr) $fatal(1, "[JTAG] Abstract command error!"); + end while (acs.busy); + end + if (wait_sba) begin + dm::sbcs_t sbcs; + do begin + jtag_dbg.read_dmi_exp_backoff(dm::SBCS, sbcs); + if (sbcs.sberror | sbcs.sbbusyerror) $fatal(1, "[JTAG] System bus error!"); + end while (sbcs.sbbusy); + end + endtask + + task automatic jtag_poll_bit0( + input doub_bt addr, + output word_bt data, + input int unsigned idle_cycles + ); + automatic dm::sbcs_t sbcs = dm::sbcs_t'{sbreadonaddr: 1'b1, sbaccess: 2, default: '0}; + jtag_write(dm::SBCS, sbcs, 0, 1); + jtag_write(dm::SBAddress1, addr[63:32]); + do begin + jtag_write(dm::SBAddress0, addr[31:0]); + jtag_dbg.wait_idle(idle_cycles); + jtag_dbg.read_dmi_exp_backoff(dm::SBData0, data); + end while (~data[0]); + endtask + + // Initialize the debug module + task automatic jtag_init; + jtag_idcode_t idcode; + dm::dmcontrol_t dmcontrol = '{dmactive: 1, default: '0}; + // Check ID code + repeat(100) @(posedge jtag_tck); + jtag_dbg.get_idcode(idcode); + if (idcode != DutCfg.DbgIdCode) + $fatal(1, "[JTAG] Unexpected ID code: expected 0x%h, got 0x%h!", DutCfg.DbgIdCode, idcode); + // Activate, wait for debug module + jtag_write(dm::DMControl, dmcontrol); + do jtag_dbg.read_dmi_exp_backoff(dm::DMControl, dmcontrol); + while (~dmcontrol.dmactive); + // Activate, wait for system bus + jtag_write(dm::SBCS, JtagInitSbcs, 0, 1); + $display("[JTAG] Initialization success"); + endtask + + task automatic jtag_read_reg32( + input doub_bt addr, + output word_bt data, + input int unsigned idle_cycles = 20 + ); + automatic dm::sbcs_t sbcs = dm::sbcs_t'{sbreadonaddr: 1'b1, sbaccess: 2, default: '0}; + jtag_write(dm::SBCS, sbcs, 0, 1); + jtag_write(dm::SBAddress1, addr[63:32]); + jtag_write(dm::SBAddress0, addr[31:0]); + jtag_dbg.wait_idle(idle_cycles); + jtag_dbg.read_dmi_exp_backoff(dm::SBData0, data); + $display("[JTAG] Read 0x%h from 0x%h", data, addr); + endtask + + task automatic jtag_write_reg32( + input doub_bt addr, + input word_bt data, + input bit check_write, + input int unsigned check_write_wait_cycles = 20 + ); + automatic dm::sbcs_t sbcs = dm::sbcs_t'{sbaccess: 2, default: '0}; + $display("[JTAG] Writing 0x%h to 0x%h", data, addr); + jtag_write(dm::SBCS, sbcs, 0, 1); + jtag_write(dm::SBAddress1, addr[63:32]); + jtag_write(dm::SBAddress0, addr[31:0]); + jtag_write(dm::SBData0, data); + jtag_dbg.wait_idle(check_write_wait_cycles); + if (check_write) begin + word_bt rdata; + jtag_read_reg32(addr, rdata); + if (rdata != data) $fatal(1,"[JTAG] - Read back incorrect data 0x%h!", rdata); + else $display("[JTAG] - Read back correct data"); + end + endtask + + // Load a binary + task automatic jtag_elf_preload(input string binary, output doub_bt entry); + longint sec_addr, sec_len; + $display("[JTAG] Preloading ELF binary: %s", binary); + if (read_elf(binary)) + $fatal(1, "[JTAG] Failed to load ELF!"); + while (get_section(sec_addr, sec_len)) begin + byte bf[] = new [sec_len]; + $display("[JTAG] Preloading section at 0x%h (%0d bytes)", sec_addr, sec_len); + if (read_section(sec_addr, bf, sec_len)) $fatal(1, "[JTAG] Failed to read ELF section!"); + jtag_write(dm::SBCS, JtagInitSbcs, 1, 1); + // Write address as 64-bit double + jtag_write(dm::SBAddress1, sec_addr[63:32]); + jtag_write(dm::SBAddress0, sec_addr[31:0]); + for (longint i = 0; i <= sec_len ; i += 8) begin + bit checkpoint = (i != 0 && i % 512 == 0); + if (checkpoint) + $display("[JTAG] - %0d/%0d bytes (%0d%%)", i, sec_len, i*100/(sec_len>1 ? sec_len-1 : 1)); + jtag_write(dm::SBData1, {bf[i+7], bf[i+6], bf[i+5], bf[i+4]}); + jtag_write(dm::SBData0, {bf[i+3], bf[i+2], bf[i+1], bf[i]}, 1, 1); + end + end + void'(get_entry(entry)); + $display("[JTAG] Preload complete"); + endtask + + // Halt the core and preload a binary + task automatic jtag_elf_halt_load(input string binary, output doub_bt entry); + dm::dmstatus_t status; + // Halt hart 0 + jtag_write(dm::DMControl, dm::dmcontrol_t'{haltreq: 1, dmactive: 1, default: '0}); + do jtag_dbg.read_dmi_exp_backoff(dm::DMStatus, status); + while (~status.allhalted); + $display("[JTAG] Halted hart 0"); + // Preload binary + jtag_elf_preload(binary, entry); + endtask + + // Run a binary + task automatic jtag_elf_run(input string binary); + doub_bt entry; + jtag_elf_halt_load(binary, entry); + // Repoint execution + jtag_write(dm::Data1, entry[63:32]); + jtag_write(dm::Data0, entry[31:0]); + jtag_write(dm::Command, 32'h0023_07b1, 0, 1); + // Resume hart 0 + jtag_write(dm::DMControl, dm::dmcontrol_t'{resumereq: 1, dmactive: 1, default: '0}); + $display("[JTAG] Resumed hart 0 from 0x%h", entry); + endtask + + // Wait for termination signal and get return code + task automatic jtag_wait_for_eoc(output word_bt exit_code); + jtag_poll_bit0(AmRegs + cheshire_reg_pkg::CHESHIRE_SCRATCH_2_OFFSET, exit_code, 800); + exit_code >>= 1; + if (exit_code) $error("[JTAG] FAILED: return code %0d", exit_code); + else $display("[JTAG] SUCCESS"); + endtask + + //////////// + // UART // + //////////// + + localparam time UartBaudPeriod = 1000ns*1000*1000/UartBaudRate; + + localparam byte_bt UartDebugCmdRead = 'h11; + localparam byte_bt UartDebugCmdWrite = 'h12; + localparam byte_bt UartDebugCmdExec = 'h13; + localparam byte_bt UartDebugAck = 'h06; + localparam byte_bt UartDebugEot = 'h04; + localparam byte_bt UartDebugEoc = 'h14; + + byte_bt uart_boot_byte; + logic uart_boot_ena; + logic uart_boot_eoc; + logic uart_reading_byte; + + initial begin + uart_rx = 1; + uart_boot_eoc = 0; + uart_boot_ena = 0; + uart_reading_byte = 0; + end + + task automatic uart_read_byte(output byte_bt bite); + // Start bit + @(negedge uart_tx); + uart_reading_byte = 1; + #(UartBaudPeriod/2); + // 8-bit byte + for (int i = 0; i < 8; i++) begin + #UartBaudPeriod bite[i] = uart_tx; + end + // Parity bit + if(UartParityEna) begin + bit parity; + #UartBaudPeriod parity = uart_tx; + if(parity ^ (^bite)) + $error("[UART] - Parity error detected!"); + end + // Stop bit + #UartBaudPeriod; + uart_reading_byte=0; + endtask + + task automatic uart_write_byte(input byte_bt bite); + // Start bit + uart_rx = 1'b0; + // 8-bit byte + for (int i = 0; i < 8; i++) + #UartBaudPeriod uart_rx = bite[i]; + // Parity bit + if (UartParityEna) + #UartBaudPeriod uart_rx = (^bite); + // Stop bit + #UartBaudPeriod uart_rx = 1'b1; + #UartBaudPeriod; + endtask + + task automatic uart_boot_scoop(output byte_bt bite); + // Assert our intention to scoop the next received byte + uart_boot_ena = 1; + // Wait until read task notifies us a scooped byte is available + @(negedge uart_boot_ena); + // Grab scooped byte + bite = uart_boot_byte; + endtask + + task automatic uart_boot_scoop_expect(input string name, input byte_bt exp); + byte_bt bite; + uart_boot_scoop(bite); + if (bite != exp) + $fatal(1, "[UART] Expected %s (%0x) after read command, received %0x", name, exp, bite); + endtask + + // Continually read characters and print lines + // TODO: we should be able to support CR properly, but buffers are hard to deal with... + initial begin + static byte_bt uart_read_buf [$]; + byte_bt bite; + wait_for_reset(); + forever begin + uart_read_byte(bite); + if (uart_boot_ena) begin + uart_boot_byte = bite; + uart_boot_ena = 0; + end else if (bite == "\n") begin + $display("[UART] %s", {>>8{uart_read_buf}}); + uart_read_buf.delete(); + end else if (bite == UartDebugEoc) begin + uart_boot_eoc = 1; + end else begin + uart_read_buf.push_back(bite); + end + end + end + + // A length of zero indcates a write (write lengths are inferred from their queue) + task automatic uart_debug_rw(doub_bt addr, doub_bt len_or_w, ref byte_bt data [$]); + byte_bt bite; + doub_bt len = len_or_w ? len_or_w : data.size(); + // Send command, address, and length + uart_write_byte(len_or_w ? UartDebugCmdRead : UartDebugCmdWrite); + for (int i = 0; i < 8; ++i) + uart_write_byte(addr[8*i +: 8]); + for (int i = 0; i < 8; ++i) + uart_write_byte(len[8*i +: 8]); + // Receive and check ACK + uart_boot_scoop_expect("ACK", UartDebugAck); + // Send or receive requested data + for (int i = 0; i < len; ++i) begin + if (len_or_w) begin + uart_boot_scoop(bite); + data.push_back(bite); + end else begin + uart_write_byte(data[i]); + end + end + // Receive and check EOT + uart_boot_scoop_expect("EOT", UartDebugEot); + endtask + + // Load a binary + task automatic uart_debug_elf_preload(input string binary, output doub_bt entry); + longint sec_addr, sec_len; + $display("[UART] Preloading ELF binary: %s", binary); + if (read_elf(binary)) + $fatal(1, "[UART] Failed to load ELF!"); + while (get_section(sec_addr, sec_len)) begin + byte bf[] = new [sec_len]; + $display("[UART] Preloading section at 0x%h (%0d bytes)", sec_addr, sec_len); + if (read_section(sec_addr, bf, sec_len)) $fatal(1, "[UART] Failed to read ELF section!"); + // Write section in blocks + for (longint i = 0; i <= sec_len ; i += UartBurstBytes) begin + byte_bt bytes [$]; + if (i != 0) + $display("[UART] - %0d/%0d bytes (%0d%%)", i, sec_len, i*100/(sec_len>1 ? sec_len-1 : 1)); + for (int b = 0; b < UartBurstBytes; b++) begin + if (i+b >= sec_len) break; + bytes.push_back(bf [i+b]); + end + uart_debug_rw(sec_addr + i, 0, bytes); + end + end + void'(get_entry(entry)); + $display("[UART] Preload complete"); + endtask + + task automatic uart_debug_elf_run_and_wait(input string binary, output word_bt exit_code); + byte_bt bite; + doub_bt entry; + // Wait some time for boot ROM to settle (No way to query this using only UART) + $display("[UART] Waiting for debug loop to start"); + #(UartWaitCycles*UartBaudPeriod); + // We send an ACK challenge to the debug server and wait for an ACK response + $display("[UART] Sending ACK chellenge"); + uart_write_byte(UartDebugAck); + uart_boot_scoop_expect("ACK", UartDebugAck); + // Preload + uart_debug_elf_preload(binary, entry); + $display("[UART] Sending EXEC command for address %0x", entry); + // Send exec command and receive ACK + uart_write_byte(UartDebugCmdExec); + for (int i = 0; i < 8; ++i) + uart_write_byte(entry[8*i +: 8]); + uart_boot_scoop_expect("ACK", UartDebugAck); + // Wait for EOC and read return code + wait (uart_boot_eoc == 1); + $display("[UART] Received EOC signal"); + uart_boot_eoc = 0; + for (int i = 0; i < 4; ++i) + uart_boot_scoop(exit_code[8*i +: 8]); + // Report exit code + if (exit_code) $error("[UART] FAILED: return code %0d", exit_code); + else $display("[UART] SUCCESS"); + endtask + + /////////// + // I2C // + /////////// + + // Write-protect only chip 0 + bit [3:0] i2c_wp = 4'b0001; + + // We connect 2 chips available at different addresses; + // however, the boot ROM will always boot from chip 0. + for (genvar i = 0; i < 2; i++) begin : gen_i2c_eeproms + M24FC1025 i_i2c_eeprom ( + .RESET ( rst_n ), + .A0 ( i[0] ), + .A1 ( 1'b0 ), + .A2 ( 1'b1 ), + .WP ( i2c_wp[i] ), + .SDA ( i2c_sda ), + .SCL ( i2c_scl ) + ); + end + + // Preload function called by testbench + task automatic i2c_eeprom_preload(string image); + // We overlay the entire memory with an alternating pattern + for (int k = 0; k < $size(gen_i2c_eeproms[0].i_i2c_eeprom.MemoryBlock); ++k) + gen_i2c_eeproms[0].i_i2c_eeprom.MemoryBlock[k] = 'h9a; + // We load an image into chip 0 only if it exists + if (image != "") + $readmemh(image, gen_i2c_eeproms[0].i_i2c_eeprom.MemoryBlock); + endtask + + //////////////// + // SPI Host // + //////////////// + + // We connect one chip at CS1, where we can boot from this flash. + s25fs512s #( + .UserPreload ( 0 ) + ) i_spi_norflash ( + .SI ( spih_sd[0] ), + .SO ( spih_sd[1] ), + .WPNeg ( spih_sd[2] ), + .RESETNeg ( spih_sd[3] ), + .SCK ( spih_sck ), + .CSNeg ( spih_csb[1] ) + ); + + // Preload function called by testbench + task automatic spih_norflash_preload(string image); + // We overlay the entire memory with an alternating pattern + for (int k = 0; k < $size(i_spi_norflash.Mem); ++k) + i_spi_norflash.Mem[k] = 'h9a; + // We load an image into chip 0 only if it exists + if (image != "") + $readmemh(image, i_spi_norflash.Mem); + endtask + + /////////////////// + // Serial Link // + /////////////////// + + axi_mst_req_t slink_axi_mst_req, slink_axi_slv_req; + axi_mst_rsp_t slink_axi_mst_rsp, slink_axi_slv_rsp; + + AXI_BUS_DV #( + .AXI_ADDR_WIDTH ( DutCfg.AddrWidth ), + .AXI_DATA_WIDTH ( DutCfg.AxiDataWidth ), + .AXI_ID_WIDTH ( DutCfg.AxiMstIdWidth ), + .AXI_USER_WIDTH ( DutCfg.AxiUserWidth ) + ) slink_mst_vip_dv ( + .clk_i ( soc_clk ) + ); + + AXI_BUS #( + .AXI_ADDR_WIDTH ( DutCfg.AddrWidth ), + .AXI_DATA_WIDTH ( DutCfg.AxiDataWidth ), + .AXI_ID_WIDTH ( DutCfg.AxiMstIdWidth ), + .AXI_USER_WIDTH ( DutCfg.AxiUserWidth ) + ) slink_mst_ext(), slink_mst_vip(), slink_mst(); + + AXI_BUS #( + .AXI_ADDR_WIDTH ( DutCfg.AddrWidth ), + .AXI_DATA_WIDTH ( DutCfg.AxiDataWidth ), + .AXI_ID_WIDTH ( DutCfg.AxiMstIdWidth+1 ), + .AXI_USER_WIDTH ( DutCfg.AxiUserWidth ) + ) slink_mst_mux(); + + AXI_BUS_DV #( + .AXI_ADDR_WIDTH ( DutCfg.AddrWidth ), + .AXI_DATA_WIDTH ( DutCfg.AxiDataWidth ), + .AXI_ID_WIDTH ( DutCfg.AxiMstIdWidth ), + .AXI_USER_WIDTH ( DutCfg.AxiUserWidth ) + ) slink_slv ( + .clk_i ( soc_clk ) + ); + + // Multiplex internal and external AXI requests + axi_mux_intf #( + .SLV_AXI_ID_WIDTH ( DutCfg.AxiMstIdWidth ), + .MST_AXI_ID_WIDTH ( DutCfg.AxiMstIdWidth+1 ), + .AXI_ADDR_WIDTH ( DutCfg.AddrWidth ), + .AXI_DATA_WIDTH ( DutCfg.AxiDataWidth ), + .AXI_USER_WIDTH ( DutCfg.AxiUserWidth ), + .NO_SLV_PORTS ( 2 ) + ) i_axi_mux_slink ( + .clk_i ( soc_clk ), + .rst_ni ( rst_n ), + .test_i ( test_mode ), + .slv ( '{slink_mst_vip, slink_mst_ext} ), + .mst ( slink_mst_mux ) + ); + + // Serialize away added AXI index bits + axi_id_serialize_intf #( + .AXI_SLV_PORT_ID_WIDTH ( DutCfg.AxiMstIdWidth+1 ), + .AXI_SLV_PORT_MAX_TXNS ( SlinkMaxTxns ), + .AXI_MST_PORT_ID_WIDTH ( DutCfg.AxiMstIdWidth ), + .AXI_MST_PORT_MAX_UNIQ_IDS ( 2**DutCfg.AxiMstIdWidth ), + .AXI_MST_PORT_MAX_TXNS_PER_ID ( SlinkMaxTxnsPerId ), + .AXI_ADDR_WIDTH ( DutCfg.AddrWidth ), + .AXI_DATA_WIDTH ( DutCfg.AxiDataWidth ), + .AXI_USER_WIDTH ( DutCfg.AxiUserWidth ) + ) i_axi_id_serialize_slink ( + .clk_i ( soc_clk ), + .rst_ni ( rst_n ), + .slv ( slink_mst_mux ), + .mst ( slink_mst ) + ); + + `AXI_ASSIGN (slink_mst_vip, slink_mst_vip_dv) + + `AXI_ASSIGN_FROM_REQ(slink_mst_ext, axi_slink_mst_req) + `AXI_ASSIGN_TO_RESP(axi_slink_mst_rsp, slink_mst_ext) + + `AXI_ASSIGN_TO_REQ(slink_axi_mst_req, slink_mst) + `AXI_ASSIGN_FROM_RESP(slink_mst, slink_axi_mst_rsp) + + `AXI_ASSIGN_FROM_REQ(slink_slv, slink_axi_slv_req) + `AXI_ASSIGN_TO_RESP(slink_axi_slv_rsp, slink_slv) + + // Mirror instance of serial link, reflecting another chip + serial_link #( + .axi_req_t ( axi_mst_req_t ), + .axi_rsp_t ( axi_mst_rsp_t ), + .cfg_req_t ( reg_req_t ), + .cfg_rsp_t ( reg_rsp_t ), + .aw_chan_t ( axi_mst_aw_chan_t ), + .ar_chan_t ( axi_mst_ar_chan_t ), + .r_chan_t ( axi_mst_r_chan_t ), + .w_chan_t ( axi_mst_w_chan_t ), + .b_chan_t ( axi_mst_b_chan_t ), + .hw2reg_t ( serial_link_single_channel_reg_pkg::serial_link_single_channel_hw2reg_t ), + .reg2hw_t ( serial_link_single_channel_reg_pkg::serial_link_single_channel_reg2hw_t ), + .NumChannels ( SlinkNumChan ), + .NumLanes ( SlinkNumLanes ), + .MaxClkDiv ( SlinkMaxClkDiv ) + ) i_serial_link ( + .clk_i ( soc_clk ), + .rst_ni ( rst_n ), + .clk_sl_i ( clk ), + .rst_sl_ni ( rst_n ), + .clk_reg_i ( clk ), + .rst_reg_ni ( rst_n ), + .testmode_i ( test_mode ), + .axi_in_req_i ( slink_axi_mst_req ), + .axi_in_rsp_o ( slink_axi_mst_rsp ), + .axi_out_req_o ( slink_axi_slv_req ), + .axi_out_rsp_i ( slink_axi_slv_rsp ), + .cfg_req_i ( '0 ), + .cfg_rsp_o ( ), + .ddr_rcv_clk_i ( slink_rcv_clk_o ), + .ddr_rcv_clk_o ( slink_rcv_clk_i ), + .ddr_i ( slink_o ), + .ddr_o ( slink_i ), + .isolated_i ( '0 ), + .isolate_o ( ), + .clk_ena_o ( ), + .reset_no ( ) + ); + + // We terminate the slave interface with a random agent + axi_test::axi_rand_slave #( + .AW ( DutCfg.AddrWidth ), + .DW ( DutCfg.AxiDataWidth ), + .IW ( DutCfg.AxiMstIdWidth ), + .UW ( DutCfg.AxiUserWidth ), + .MAPPED ( 1'b1 ), + .TA ( ClkPeriodSys * TAppl ), + .TT ( ClkPeriodSys * TTest ), + .RAND_RESP ( 0 ), + .AX_MIN_WAIT_CYCLES ( 0 ), + .AX_MAX_WAIT_CYCLES ( SlinkMaxWaitAx ), + .R_MIN_WAIT_CYCLES ( 0 ), + .R_MAX_WAIT_CYCLES ( SlinkMaxWaitR ), + .RESP_MIN_WAIT_CYCLES ( 0 ), + .RESP_MAX_WAIT_CYCLES ( SlinkMaxWaitResp ) + ) i_slink_rand_slv = new (slink_slv); + + initial begin + i_slink_rand_slv.run(); + end + + // We use an AXI driver to inject serial link transfers + typedef axi_test::axi_driver #( + .AW ( DutCfg.AddrWidth ), + .DW ( DutCfg.AxiDataWidth ), + .IW ( DutCfg.AxiMstIdWidth ), + .UW ( DutCfg.AxiUserWidth ), + .TA ( ClkPeriodSys * TAppl ), + .TT ( ClkPeriodSys * TTest ) + ) slink_axi_driver_t; + + slink_axi_driver_t slink_axi_driver = new (slink_mst_vip_dv); + + initial begin + @(negedge rst_n); + slink_axi_driver.reset_master(); + end + + task automatic slink_write_beats( + input addr_t addr, + input axi_pkg::size_t size, + ref axi_data_t beats [$] + ); + slink_axi_driver_t::ax_beat_t ax = new(); + slink_axi_driver_t::w_beat_t w = new(); + slink_axi_driver_t::b_beat_t b; + int i = 0; + int size_bytes = (1 << size); + if (beats.size() == 0) + $fatal(1, "[SLINK] Zero-length write requested!"); + @(posedge clk); + if (SlinkAxiDebug) $display("[SLINK] Write to address: %h, len: %0d", addr, beats.size()-1); + ax.ax_addr = addr; + ax.ax_id = '0; + ax.ax_len = beats.size() - 1; + ax.ax_size = size; + ax.ax_burst = axi_pkg::BURST_INCR; + if (SlinkAxiDebug) $display("[SLINK] - Sending AW "); + slink_axi_driver.send_aw(ax); + do begin + w.w_strb = i == 0 ? (~('1 << size_bytes)) << addr[AxiStrbBits-1:0] : '1; + w.w_data = beats[i]; + w.w_last = (i == ax.ax_len); + if (SlinkAxiDebug) $display("[SLINK] - Sending W (%0d)", i); + slink_axi_driver.send_w(w); + addr += size_bytes; + addr &= size_bytes - 1; + i++; + end while (i <= ax.ax_len); + if (SlinkAxiDebug) $display("[SLINK] - Receiving B"); + slink_axi_driver.recv_b(b); + if (b.b_resp != axi_pkg::RESP_OKAY) + $error("[SLINK] - Write error response: %d!", b.b_resp); + if (SlinkAxiDebug) $display("[SLINK] - Done"); + endtask + + task automatic slink_read_beats( + input addr_t addr, + input axi_pkg::size_t size, + input axi_pkg::len_t len, + ref axi_data_t beats [$] + ); + slink_axi_driver_t::ax_beat_t ax = new(); + slink_axi_driver_t::r_beat_t r; + int i = 0; + @(posedge clk) + if (SlinkAxiDebug) $display("[SLINK] Read from address: %h, len: %0d", addr, len); + ax.ax_addr = addr; + ax.ax_id = '0; + ax.ax_len = len; + ax.ax_size = size; + ax.ax_burst = axi_pkg::BURST_INCR; + if (SlinkAxiDebug) $display("[SLINK] - Sending AR"); + slink_axi_driver.send_ar(ax); + do begin + if (SlinkAxiDebug) $display("[SLINK] - Receiving R (%0d)", i); + slink_axi_driver.recv_r(r); + beats.push_back(r.r_data); + addr += (1 << size); + addr &= (1 << size) - 1; + i++; + if (r.r_resp != axi_pkg::RESP_OKAY) + $error("[SLINK] - Read error response: %d!", r.r_resp); + end while (!r.r_last); + if (SlinkAxiDebug) $display("[SLINK] - Done"); + endtask + + task automatic slink_write_32(input addr_t addr, input word_bt data); + axi_data_t beats [$]; + beats.push_back(data << (8 * addr[AxiStrbBits-1:0])); + slink_write_beats(addr, 2, beats); + endtask + + task automatic slink_poll_bit0( + input doub_bt addr, + output word_bt data, + input int unsigned idle_cycles + ); + do begin + axi_data_t beats [$]; + #(ClkPeriodSys * idle_cycles); + slink_read_beats(addr, 2, 0, beats); + data = beats[0] >> addr[AxiStrbBits-1:0]; + end while (~data[0]); + endtask + + // Load a binary + task automatic slink_elf_preload(input string binary, output doub_bt entry); + longint sec_addr, sec_len, bus_offset, write_addr; + $display("[SLINK] Preloading ELF binary: %s", binary); + if (read_elf(binary)) + $fatal(1, "[SLINK] Failed to load ELF!"); + while (get_section(sec_addr, sec_len)) begin + byte bf[] = new [sec_len]; + $display("[SLINK] Preloading section at 0x%h (%0d bytes)", sec_addr, sec_len); + if (read_section(sec_addr, bf, sec_len)) $fatal(1, "[SLINK] Failed to read ELF section!"); + // Write section as fixed-size bursts + bus_offset = sec_addr[AxiStrbBits-1:0]; + for (longint i = 0; i <= sec_len ; i += SlinkBurstBytes) begin + axi_data_t beats [$]; + if (i != 0) + $display("[SLINK] - %0d/%0d bytes (%0d%%)", i, sec_len, i*100/(sec_len>1 ? sec_len-1 : 1)); + // Assemble beats for current burst from section buffer + for (int b = 0; b < SlinkBurstBytes; b += AxiStrbWidth) begin + axi_data_t beat; + // We handle incomplete bursts + if (i+b-bus_offset >= sec_len) break; + for (int e = 0; e < AxiStrbWidth; ++e) + if (i+b+e < bus_offset) begin + beat[8*e +: 8] = '0; + end else if (i+b+e-bus_offset >= sec_len) begin + beat[8*e +: 8] = '0; + end else begin + beat[8*e +: 8] = bf [i+b+e-bus_offset]; + end + + beats.push_back(beat); + end + write_addr = sec_addr + (i==0 ? 0 : i - sec_addr%AxiStrbWidth); + // Write this burst + slink_write_beats(write_addr, AxiStrbBits, beats); + end + end + void'(get_entry(entry)); + $display("[SLINK] Preload complete"); + endtask + + // Run a binary + task automatic slink_elf_run(input string binary); + doub_bt entry; + // Wait for bootrom to ungate Serial Link + if (DutCfg.LlcNotBypass) begin + word_bt regval; + $display("[SLINK] Wait for LLC configuration"); + slink_poll_bit0(AmLlc + axi_llc_reg_pkg::AXI_LLC_CFG_SPM_LOW_OFFSET, regval, 20); + end + // Preload + slink_elf_preload(binary, entry); + // Write entry point + slink_write_32(AmRegs + cheshire_reg_pkg::CHESHIRE_SCRATCH_1_OFFSET, entry[63:32]); + slink_write_32(AmRegs + cheshire_reg_pkg::CHESHIRE_SCRATCH_0_OFFSET, entry[32:0]); + // Resume hart 0 + slink_write_32(AmRegs + cheshire_reg_pkg::CHESHIRE_SCRATCH_2_OFFSET, 2); + $display("[SLINK] Wrote launch signal and entry point 0x%h", entry); + endtask + + // Wait for termination signal and get return code + task automatic slink_wait_for_eoc(output word_bt exit_code); + slink_poll_bit0(AmRegs + cheshire_reg_pkg::CHESHIRE_SCRATCH_2_OFFSET, exit_code, 800); + exit_code >>= 1; + if (exit_code) $error("[SLINK] FAILED: return code %0d", exit_code); + else $display("[SLINK] SUCCESS"); + endtask + + +endmodule + +// Map pad IO to tristate wires to adapt from SoC IO (not needed for chip instances). + +module vip_cheshire_soc_tristate import cheshire_pkg::*; ( + // I2C pad IO + output logic i2c_sda_i, + input logic i2c_sda_o, + input logic i2c_sda_en, + output logic i2c_scl_i, + input logic i2c_scl_o, + input logic i2c_scl_en, + // SPI host pad IO + input logic spih_sck_o, + input logic spih_sck_en, + input logic [SpihNumCs-1:0] spih_csb_o, + input logic [SpihNumCs-1:0] spih_csb_en, + output logic [ 3:0] spih_sd_i, + input logic [ 3:0] spih_sd_o, + input logic [ 3:0] spih_sd_en, + // I2C wires + inout wire i2c_sda, + inout wire i2c_scl, + // SPI host wires + inout wire spih_sck, + inout wire [SpihNumCs-1:0] spih_csb, + inout wire [ 3:0] spih_sd +); + + // I2C + bufif1 (i2c_sda_i, i2c_sda, ~i2c_sda_en); + bufif1 (i2c_sda, i2c_sda_o, i2c_sda_en); + bufif1 (i2c_scl_i, i2c_scl, ~i2c_scl_en); + bufif1 (i2c_scl, i2c_scl_o, i2c_scl_en); + pullup (i2c_sda); + pullup (i2c_scl); + + // SPI + bufif1 (spih_sck, spih_sck_o, spih_sck_en); + pullup (spih_sck); + + for (genvar i = 0; i < 4; ++i) begin : gen_spih_sd_io + bufif1 (spih_sd_i[i], spih_sd[i], ~spih_sd_en[i]); + bufif1 (spih_sd[i], spih_sd_o[i], spih_sd_en[i]); + pullup (spih_sd[i]); + end + + for (genvar i = 0; i < SpihNumCs; ++i) begin : gen_spih_cs_io + bufif1 (spih_csb[i], spih_csb_o[i], spih_csb_en[i]); + pullup (spih_csb[i]); + end + +endmodule diff --git a/target/sim/vsim/setup.chimera_soc.tcl b/target/sim/vsim/setup.chimera_soc.tcl new file mode 100644 index 0000000..103c912 --- /dev/null +++ b/target/sim/vsim/setup.chimera_soc.tcl @@ -0,0 +1,27 @@ +# ---------------------------------------------------------------------- +# +# File: setup.chimera_soc.tcl +# +# Created: 25.06.2024 +# +# Copyright (C) 2024, ETH Zurich and University of Bologna. +# +# Author: Moritz Scherer, ETH Zurich +# +# ---------------------------------------------------------------------- +# SPDX-License-Identifier: Apache-2.0 +# +# Licensed under the Apache License, Version 2.0 (the License); you may +# not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an AS IS BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set BINARY ../../../sw/tests/testCluster.memisl.elf +set SELCFG 0 diff --git a/target/sim/vsim/start.chimera_soc.tcl b/target/sim/vsim/start.chimera_soc.tcl new file mode 100644 index 0000000..c2fda08 --- /dev/null +++ b/target/sim/vsim/start.chimera_soc.tcl @@ -0,0 +1,30 @@ +# Copyright 2022 ETH Zurich and University of Bologna. +# Licensed under the Apache License, Version 2.0, see LICENSE for details. +# SPDX-License-Identifier: Apache-2.0 +# +# Nicole Narr +# Christopher Reinwardt +# Alessandro Ottaviano +# Paul Scheffler + +set TESTBENCH tb_chimera_soc + +# Set voptargs only if not already set to make overridable. +# Default on fast simulation flags. +if {![info exists VOPTARGS]} { + set VOPTARGS "+acc" +} + +set flags "-permissive -suppress 3009 -suppress 8386 -error 7 " +if {[info exists SELCFG]} { append flags "-GSelectedCfg=${SELCFG} " } + +set pargs "" +if {[info exists BOOTMODE]} { append pargs "+BOOTMODE=${BOOTMODE} " } +if {[info exists PRELMODE]} { append pargs "+PRELMODE=${PRELMODE} " } +if {[info exists BINARY]} { append pargs "+BINARY=${BINARY} " } +if {[info exists IMAGE]} { append pargs "+IMAGE=${IMAGE} " } + +eval "vsim -c ${TESTBENCH} -t 1ps -vopt -voptargs=\"${VOPTARGS}\"" ${pargs} ${flags} + +set StdArithNoWarnings 1 +set NumericStdNoWarnings 1 diff --git a/utils/reggen/reggen/README.md b/utils/reggen/reggen/README.md new file mode 100644 index 0000000..ea244dc --- /dev/null +++ b/utils/reggen/reggen/README.md @@ -0,0 +1,113 @@ +# Register generator `reggen` and `regtool` + +The utility script `regtool.py` and collateral under `reggen` are Python +tools to read register descriptions in Hjson and generate various output +formats. The tool can output HTML documentation, standard JSON, compact +standard JSON (whitespace removed) and Hjson. The example commands assume +`$REPO_TOP` is set to the toplevel directory of the repository. + +### Setup + +If packages have not previously been installed you will need to set a +few things up. First use `pip3` to install some required packages: + +```console +$ pip3 install --user hjson +$ pip3 install --user mistletoe +$ pip3 install --user mako +``` + +### Register JSON Format + +For details on the register JSON format, see the +[register tool documentation]({{< relref "doc/rm/register_tool/index.md" >}}). +To ensure things stay up to date, the register JSON format information +is documented by the tool itself. +The documentation can be generated by running the following commands: + +```console +$ cd $REPO_TOP/util +$ ./build_docs.py +``` +Under the hood, the `build_docs.py` tool will automatically use the `reggen` +tool to produce Markdown and processing that into HTML. + +### Examples using standalone regtool + +Normally for documentation the `build_docs.py` tool will automatically +use `reggen`. The script `regtool.py` provides a standalone way to run +`reggen`. See the +[register tool documentation]({{< relref "doc/rm/register_tool/index.md" >}}) +for details about how to invoke the tool. + +The following shows an example of how to generate RTL from a register +description: + +```console +$ cd $REPO_TOP/util +$ mkdir /tmp/rtl +$ ./regtool.py -r -t /tmp/rtl ../hw/ip/uart/data/uart.hjson +$ ls /tmp/rtl + uart_reg_pkg.sv uart_reg_top.sv +``` + +The following shows an example of how to generate a DV UVM class from +a register description: + +```console +$ cd $REPO_TOP/util +$ mkdir /tmp/dv +$ ./regtool.py -s -t /tmp/dv ../hw/ip/uart/data/uart.hjson +$ ls /tmp/dv + uart_ral_pkg.sv +``` + +By default, the generated block, register and field models are derived from +`dv_base_reg` classes provided at `hw/dv/sv/dv_base_reg`. If required, the user +can supply the `--dv-base-prefix my_base` switch to have the models derive from +a custom, user-defined RAL classes instead: + +```console +$ cd $REPO_TOP/util +$ mkdir /tmp/dv +$ ./regtool.py -s -t /tmp/dv ../hw/ip/uart/data/uart.hjson \ + --dv-base-prefix my_base +$ ls /tmp/dv + uart_ral_pkg.sv +``` + +This makes the following assumptions: +- A FuseSoC core file aggregating the `my_base` RAL classes with the VLNV + name `lowrisc:dv:my_base_reg` is provided in the cores search path. +- These custom classes are derived from the corresponding `dv_base_reg` classes + and have the following names: + - `my_base_reg_pkg.sv`: The RAL package that includes the below sources + - `my_base_reg_block.sv`: The register block abstraction + - `my_base_reg.sv`: The register abstraction + - `my_base_reg_field.sv`: The register field abstraction + - `my_base_mem.sv`: The memory abstraction +- If any of the above class specializations is not needed, it can be + `typedef`'ed in `my_base_reg_pkg`: + ```systemverilog + package my_base_reg_pkg; + import dv_base_reg_pkg::*; + typedef dv_base_reg_field my_base_reg_field; + typedef dv_base_mem my_base_mem; + `include "my_base_reg.sv" + `include "my_base_reg_block.sv" + endpackage + ``` + +The following shows an example of how to generate a FPV csr read write assertion +module from a register description: + +```console +$ cd $REPO_TOP/util +$ mkdir /tmp/fpv/vip +$ ./regtool.py -f -t /tmp/fpv/vip ../hw/ip/uart/data/uart.hjson +$ ls /tmp/fpv + uart_csr_assert_fpv.sv +``` + +If the target directory is not specified, the tool creates the DV file +under the `hw/ip/{module}/dv/` directory. diff --git a/utils/reggen/reggen/__init__.py b/utils/reggen/reggen/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/utils/reggen/reggen/access.py b/utils/reggen/reggen/access.py new file mode 100644 index 0000000..286fc87 --- /dev/null +++ b/utils/reggen/reggen/access.py @@ -0,0 +1,121 @@ +# Copyright lowRISC contributors. +# Licensed under the Apache License, Version 2.0, see LICENSE for details. +# SPDX-License-Identifier: Apache-2.0 +"""Enumerated types for fields +Generated by validation, used by backends +""" + +from enum import Enum + +from .lib import check_str + + +class JsonEnum(Enum): + def for_json(x) -> str: + return str(x) + + +class SwWrAccess(JsonEnum): + WR = 1 + NONE = 2 + + +class SwRdAccess(JsonEnum): + RD = 1 + RC = 2 # Special handling for port + NONE = 3 + + +class SwAccess(JsonEnum): + RO = 1 + RW = 2 + WO = 3 + W1C = 4 + W1S = 5 + W0C = 6 + RC = 7 + R0W1C = 8 + NONE = 9 + + +class HwAccess(JsonEnum): + HRO = 1 + HRW = 2 + HWO = 3 + NONE = 4 # No access allowed + + +# swaccess permitted values +# text description, access enum, wr access enum, rd access enum, ok in window +SWACCESS_PERMITTED = { + 'none': ("No access", # noqa: E241 + SwAccess.NONE, SwWrAccess.NONE, SwRdAccess.NONE, False), # noqa: E241 + 'ro': ("Read Only", # noqa: E241 + SwAccess.RO, SwWrAccess.NONE, SwRdAccess.RD, True), # noqa: E241 + 'rc': ("Read Only, reading clears", # noqa: E241 + SwAccess.RC, SwWrAccess.WR, SwRdAccess.RC, False), # noqa: E241 + 'rw': ("Read/Write", # noqa: E241 + SwAccess.RW, SwWrAccess.WR, SwRdAccess.RD, True), # noqa: E241 + 'r0w1c': ("Read zero, Write with 1 clears", # noqa: E241 + SwAccess.W1C, SwWrAccess.WR, SwRdAccess.NONE, False), # noqa: E241 + 'rw1s': ("Read, Write with 1 sets", # noqa: E241 + SwAccess.W1S, SwWrAccess.WR, SwRdAccess.RD, False), # noqa: E241 + 'rw1c': ("Read, Write with 1 clears", # noqa: E241 + SwAccess.W1C, SwWrAccess.WR, SwRdAccess.RD, False), # noqa: E241 + 'rw0c': ("Read, Write with 0 clears", # noqa: E241 + SwAccess.W0C, SwWrAccess.WR, SwRdAccess.RD, False), # noqa: E241 + 'wo': ("Write Only", # noqa: E241 + SwAccess.WO, SwWrAccess.WR, SwRdAccess.NONE, True) # noqa: E241 +} + +# hwaccess permitted values +HWACCESS_PERMITTED = { + 'hro': ("Read Only", HwAccess.HRO), + 'hrw': ("Read/Write", HwAccess.HRW), + 'hwo': ("Write Only", HwAccess.HWO), + 'none': ("No Access Needed", HwAccess.NONE) +} + + +class SWAccess: + def __init__(self, where: str, raw: object): + self.key = check_str(raw, 'swaccess for {}'.format(where)) + try: + self.value = SWACCESS_PERMITTED[self.key] + except KeyError: + raise ValueError('Unknown swaccess key, {}, for {}.' + .format(self.key, where)) from None + + def dv_rights(self) -> str: + if self.key in ['none', 'ro', 'rc']: + return "RO" + elif self.key in ['rw', 'r0w1c', 'rw1s', 'rw1c', 'rw0c']: + return "RW" + else: + assert self.key == 'wo' + return "WO" + + def swrd(self) -> SwRdAccess: + return self.value[3] + + def allows_read(self) -> bool: + return self.value[3] != SwRdAccess.NONE + + def allows_write(self) -> bool: + return self.value[2] == SwWrAccess.WR + + +class HWAccess: + def __init__(self, where: str, raw: object): + self.key = check_str(raw, 'hwaccess for {}'.format(where)) + try: + self.value = HWACCESS_PERMITTED[self.key] + except KeyError: + raise ValueError('Unknown hwaccess key, {}, for {}.' + .format(self.key, where)) from None + + def allows_read(self) -> bool: + return self.key in ['hro', 'hrw'] + + def allows_write(self) -> bool: + return self.key in ['hrw', 'hwo'] diff --git a/utils/reggen/reggen/alert.py b/utils/reggen/reggen/alert.py new file mode 100644 index 0000000..a23ff49 --- /dev/null +++ b/utils/reggen/reggen/alert.py @@ -0,0 +1,54 @@ +# Copyright lowRISC contributors. +# Licensed under the Apache License, Version 2.0, see LICENSE for details. +# SPDX-License-Identifier: Apache-2.0 + +from typing import Dict, List + +from .bits import Bits +from .signal import Signal +from .lib import check_keys, check_name, check_str, check_list + + +class Alert(Signal): + def __init__(self, name: str, desc: str, bit: int, fatal: bool): + super().__init__(name, desc, Bits(bit, bit)) + self.bit = bit + self.fatal = fatal + + @staticmethod + def from_raw(what: str, + lsb: int, + raw: object) -> 'Alert': + rd = check_keys(raw, what, ['name', 'desc'], []) + + name = check_name(rd['name'], 'name field of ' + what) + desc = check_str(rd['desc'], 'desc field of ' + what) + + # Make sense of the alert name, which should be prefixed with recov_ or + # fatal_. + pfx = name.split('_')[0] + if pfx == 'recov': + fatal = False + elif pfx == 'fatal': + fatal = True + else: + raise ValueError('Invalid name field of {}: alert names must be ' + 'prefixed with "recov_" or "fatal_". Saw {!r}.' + .format(what, name)) + + return Alert(name, desc, lsb, fatal) + + @staticmethod + def from_raw_list(what: str, raw: object) -> List['Alert']: + ret = [] + for idx, entry in enumerate(check_list(raw, what)): + entry_what = 'entry {} of {}'.format(idx, what) + alert = Alert.from_raw(entry_what, idx, entry) + ret.append(alert) + return ret + + def _asdict(self) -> Dict[str, object]: + return { + 'name': self.name, + 'desc': self.desc, + } diff --git a/utils/reggen/reggen/bits.py b/utils/reggen/reggen/bits.py new file mode 100644 index 0000000..c8d48f7 --- /dev/null +++ b/utils/reggen/reggen/bits.py @@ -0,0 +1,87 @@ +# Copyright lowRISC contributors. +# Licensed under the Apache License, Version 2.0, see LICENSE for details. +# SPDX-License-Identifier: Apache-2.0 + +'''Support code for bit ranges in reggen''' + +from typing import Tuple + +from .lib import check_str +from .params import ReggenParams + + +class Bits: + def __init__(self, msb: int, lsb: int): + assert 0 <= lsb <= msb + self.msb = msb + self.lsb = lsb + + def bitmask(self) -> int: + return (1 << (self.msb + 1)) - (1 << self.lsb) + + def width(self) -> int: + return 1 + self.msb - self.lsb + + def max_value(self) -> int: + return (1 << self.width()) - 1 + + def extract_field(self, reg_val: int) -> int: + return (reg_val & self.bitmask()) >> self.lsb + + @staticmethod + def from_raw(where: str, + reg_width: int, + params: ReggenParams, + raw: object) -> 'Bits': + # Bits should be specified as msb:lsb or as just a single bit index. + if isinstance(raw, int): + msb = raw + lsb = raw + else: + str_val = check_str(raw, 'bits field for {}'.format(where)) + msb, lsb = Bits._parse_str(where, params, str_val) + + # Check that the bit indices look sensible + if msb < lsb: + raise ValueError('msb for {} is {}: less than {}, the msb.' + .format(where, msb, lsb)) + if lsb < 0: + raise ValueError('lsb for {} is {}, which is negative.' + .format(where, lsb)) + if msb >= reg_width: + raise ValueError("msb for {} is {}, which doesn't fit in {} bits." + .format(where, msb, reg_width)) + + return Bits(msb, lsb) + + @staticmethod + def _parse_str(where: str, + params: ReggenParams, + str_val: str) -> Tuple[int, int]: + try: + idx = int(str_val) + return (idx, idx) + except ValueError: + # Doesn't look like an integer. Never mind: try msb:lsb + pass + + parts = str_val.split(':') + if len(parts) != 2: + raise ValueError('bits field for {} is not an ' + 'integer or of the form msb:lsb. Saw {!r}.' + .format(where, str_val)) + return (params.expand(parts[0], + 'msb of bits field for {}'.format(where)), + params.expand(parts[1], + 'lsb of bits field for {}'.format(where))) + + def make_translated(self, bit_offset: int) -> 'Bits': + assert 0 <= bit_offset + return Bits(self.msb + bit_offset, self.lsb + bit_offset) + + def as_str(self) -> str: + if self.lsb == self.msb: + return str(self.lsb) + else: + assert self.lsb < self.msb + return '{}:{}'.format(self.msb, self.lsb) diff --git a/utils/reggen/reggen/bus_interfaces.py b/utils/reggen/reggen/bus_interfaces.py new file mode 100644 index 0000000..37c5818 --- /dev/null +++ b/utils/reggen/reggen/bus_interfaces.py @@ -0,0 +1,187 @@ +# Copyright lowRISC contributors. +# Licensed under the Apache License, Version 2.0, see LICENSE for details. +# SPDX-License-Identifier: Apache-2.0 + +'''Code representing a list of bus interfaces for a block''' +from enum import Enum +from typing import Dict, List, Optional, Tuple + +from .inter_signal import InterSignal +from .lib import check_list, check_keys, check_str, check_optional_str + +class BusProtocol(Enum): + TLUL = "tlul" + REG_IFACE = "reg_iface" + + @classmethod + def has_value(cls, v): + return v in cls._value2member_map_ + + +class BusInterfaces: + def __init__(self, + has_unnamed_host: bool, + named_hosts: List[str], + has_unnamed_device: bool, + named_devices: List[str], + interface_list: List[Dict]): + assert has_unnamed_device or named_devices + assert len(named_hosts) == len(set(named_hosts)) + assert len(named_devices) == len(set(named_devices)) + + self.has_unnamed_host = has_unnamed_host + self.named_hosts = named_hosts + self.has_unnamed_device = has_unnamed_device + self.named_devices = named_devices + self.interface_list = interface_list + + @staticmethod + def from_raw(raw: object, where: str) -> 'BusInterfaces': + has_unnamed_host = False + named_hosts = [] + interface_list = [] + + has_unnamed_device = False + named_devices = [] + + for idx, raw_entry in enumerate(check_list(raw, where)): + entry_what = 'entry {} of {}'.format(idx + 1, where) + ed = check_keys(raw_entry, entry_what, + ['protocol', 'direction'], + ['name']) + + protocol = check_str(ed['protocol'], + 'protocol field of ' + entry_what) + if not BusProtocol.has_value(protocol): + raise ValueError('Unknown protocol {!r} at {}' + .format(protocol, entry_what)) + + direction = check_str(ed['direction'], + 'direction field of ' + entry_what) + if direction not in ['device', 'host']: + raise ValueError('Unknown interface direction {!r} at {}' + .format(direction, entry_what)) + + name = check_optional_str(ed.get('name'), + 'name field of ' + entry_what) + + if direction == 'host': + if name is None: + if has_unnamed_host: + raise ValueError('Multiple un-named host ' + 'interfaces at {}' + .format(where)) + has_unnamed_host = True + else: + if name in named_hosts: + raise ValueError('Duplicate host interface ' + 'with name {!r} at {}' + .format(name, where)) + named_hosts.append(name) + else: + if name is None: + if has_unnamed_device: + raise ValueError('Multiple un-named device ' + 'interfaces at {}' + .format(where)) + has_unnamed_device = True + else: + if name in named_devices: + raise ValueError('Duplicate device interface ' + 'with name {!r} at {}' + .format(name, where)) + named_devices.append(name) + interface_list.append({'name': name, 'protocol': BusProtocol(protocol), 'is_host': direction=='host'}) + + if not (has_unnamed_device or named_devices): + raise ValueError('No device interface at ' + where) + + return BusInterfaces(has_unnamed_host, named_hosts, + has_unnamed_device, named_devices, interface_list) + + def has_host(self) -> bool: + return bool(self.has_unnamed_host or self.named_hosts) + + def _interfaces(self) -> List[Tuple[bool, Optional[str]]]: + ret = [] # type: List[Tuple[bool, Optional[str]]] + if self.has_unnamed_host: + ret.append((True, None)) + for name in self.named_hosts: + ret.append((True, name)) + + if self.has_unnamed_device: + ret.append((False, None)) + for name in self.named_devices: + ret.append((False, name)) + + return ret + + @staticmethod + def _if_dict(is_host: bool, name: Optional[str]) -> Dict[str, object]: + ret = { + 'protocol': 'tlul', + 'direction': 'host' if is_host else 'device' + } # type: Dict[str, object] + + if name is not None: + ret['name'] = name + + return ret + + def as_dicts(self) -> List[Dict[str, object]]: + return [BusInterfaces._if_dict(is_host, name) + for is_host, name in self._interfaces()] + + def get_port_name(self, is_host: bool, name: Optional[str]) -> str: + if is_host: + tl_suffix = 'tl_h' + else: + tl_suffix = 'tl_d' if self.has_host() else 'tl' + + return (tl_suffix if name is None + else '{}_{}'.format(name, tl_suffix)) + + def get_port_names(self, inc_hosts: bool, inc_devices: bool) -> List[str]: + ret = [] + for is_host, name in self._interfaces(): + if not (inc_hosts if is_host else inc_devices): + continue + ret.append(self.get_port_name(is_host, name)) + return ret + + def _if_inter_signal(self, + is_host: bool, + name: Optional[str]) -> InterSignal: + return InterSignal(self.get_port_name(is_host, name), + None, 'tl', 'tlul_pkg', 'req_rsp', 'rsp', 1, None) + + def inter_signals(self) -> List[InterSignal]: + return [self._if_inter_signal(is_host, name) + for is_host, name in self._interfaces()] + + def has_interface(self, is_host: bool, name: Optional[str]) -> bool: + if is_host: + if name is None: + return self.has_unnamed_host + else: + return name in self.named_hosts + else: + if name is None: + return self.has_unnamed_device + else: + return name in self.named_devices + + def find_port_name(self, is_host: bool, name: Optional[str]) -> str: + '''Look up the given host/name pair and return its port name. + + Raises a KeyError if there is no match. + + ''' + if not self.has_interface(is_host, name): + called = ('with no name' + if name is None else 'called {!r}'.format(name)) + raise KeyError('There is no {} bus interface {}.' + .format('host' if is_host else 'device', + called)) + + return self.get_port_name(is_host, name) diff --git a/utils/reggen/reggen/enum_entry.py b/utils/reggen/reggen/enum_entry.py new file mode 100644 index 0000000..fe1e9ec --- /dev/null +++ b/utils/reggen/reggen/enum_entry.py @@ -0,0 +1,35 @@ +# Copyright lowRISC contributors. +# Licensed under the Apache License, Version 2.0, see LICENSE for details. +# SPDX-License-Identifier: Apache-2.0 + +from typing import Dict + +from .lib import check_keys, check_str, check_int + +REQUIRED_FIELDS = { + 'name': ['s', "name of the member of the enum"], + 'desc': ['t', "description when field has this value"], + 'value': ['d', "value of this member of the enum"] +} + + +class EnumEntry: + def __init__(self, where: str, max_val: int, raw: object): + rd = check_keys(raw, where, + list(REQUIRED_FIELDS.keys()), + []) + + self.name = check_str(rd['name'], 'name field of {}'.format(where)) + self.desc = check_str(rd['desc'], 'desc field of {}'.format(where)) + self.value = check_int(rd['value'], 'value field of {}'.format(where)) + if not (0 <= self.value <= max_val): + raise ValueError("value for {} is {}, which isn't representable " + "in the field (representable range: 0 .. {})." + .format(where, self.value, max_val)) + + def _asdict(self) -> Dict[str, object]: + return { + 'name': self.name, + 'desc': self.desc, + 'value': str(self.value) + } diff --git a/utils/reggen/reggen/field.py b/utils/reggen/reggen/field.py new file mode 100644 index 0000000..a2beb73 --- /dev/null +++ b/utils/reggen/reggen/field.py @@ -0,0 +1,291 @@ +# Copyright lowRISC contributors. +# Licensed under the Apache License, Version 2.0, see LICENSE for details. +# SPDX-License-Identifier: Apache-2.0 + +from typing import Dict, List, Optional + +from .access import SWAccess, HWAccess +from .bits import Bits +from .enum_entry import EnumEntry +from .lib import (check_keys, check_str, check_name, + check_list, check_str_list, check_xint) +from .params import ReggenParams + +REQUIRED_FIELDS = { + 'bits': ['b', "bit or bit range (msb:lsb)"] +} + +OPTIONAL_FIELDS = { + 'name': ['s', "name of the field"], + 'desc': ['t', "description of field (required if the field has a name)"], + 'swaccess': [ + 's', "software access permission, copied from " + "register if not provided in field. " + "(Tool adds if not provided.)" + ], + 'hwaccess': [ + 's', "hardware access permission, copied from " + "register if not prvided in field. " + "(Tool adds if not provided.)" + ], + 'resval': [ + 'x', "reset value, comes from register resval " + "if not provided in field. Zero if neither " + "are provided and the field is readable, " + "x if neither are provided and the field " + "is wo. Must match if both are provided." + ], + 'enum': ['l', "list of permitted enumeration groups"], + 'tags': [ + 's', + "tags for the field, followed by the format 'tag_name:item1:item2...'" + ] +} + + +class Field: + def __init__(self, + name: str, + desc: Optional[str], + tags: List[str], + swaccess: SWAccess, + hwaccess: HWAccess, + hwqe: bool, + hwre: bool, + bits: Bits, + resval: Optional[int], + enum: Optional[List[EnumEntry]]): + self.name = name + self.desc = desc + self.tags = tags + self.swaccess = swaccess + self.hwaccess = hwaccess + self.hwqe = hwqe + self.hwre = hwre + self.bits = bits + self.resval = resval + self.enum = enum + + @staticmethod + def from_raw(reg_name: str, + field_idx: int, + num_fields: int, + default_swaccess: SWAccess, + default_hwaccess: HWAccess, + reg_resval: Optional[int], + reg_width: int, + reg_hwqe: bool, + reg_hwre: bool, + params: ReggenParams, + raw: object) -> 'Field': + where = 'field {} of {} register'.format(field_idx, reg_name) + rd = check_keys(raw, where, + list(REQUIRED_FIELDS.keys()), + list(OPTIONAL_FIELDS.keys())) + + raw_name = rd.get('name') + if raw_name is None: + name = ('field{}'.format(field_idx + 1) + if num_fields > 1 else reg_name) + else: + name = check_name(raw_name, 'name of {}'.format(where)) + + raw_desc = rd.get('desc') + if raw_desc is None and raw_name is not None: + raise ValueError('Missing desc field for {}' + .format(where)) + if raw_desc is None: + desc = None + else: + desc = check_str(raw_desc, 'desc field for {}'.format(where)) + + tags = check_str_list(rd.get('tags', []), + 'tags for {}'.format(where)) + + raw_swaccess = rd.get('swaccess') + if raw_swaccess is not None: + swaccess = SWAccess(where, raw_swaccess) + else: + swaccess = default_swaccess + + raw_hwaccess = rd.get('hwaccess') + if raw_hwaccess is not None: + hwaccess = HWAccess(where, raw_hwaccess) + else: + hwaccess = default_hwaccess + + bits = Bits.from_raw(where, reg_width, params, rd['bits']) + + raw_resval = rd.get('resval') + if raw_resval is None: + # The field doesn't define a reset value. Use bits from reg_resval + # if it's defined, otherwise None (which means "x"). + if reg_resval is None: + resval = None + else: + resval = bits.extract_field(reg_resval) + else: + # The field does define a reset value. It should be an integer or + # 'x'. In the latter case, we set resval to None (as above). + resval = check_xint(raw_resval, 'resval field for {}'.format(where)) + if resval is None: + # We don't allow a field to be explicitly 'x' on reset but for + # the containing register to have a reset value. + if reg_resval is not None: + raise ValueError('resval field for {} is "x", but the ' + 'register defines a resval as well.' + .format(where)) + else: + # Check that the reset value is representable with bits + if not (0 <= resval <= bits.max_value()): + raise ValueError("resval field for {} is {}, which " + "isn't representable as an unsigned " + "{}-bit integer." + .format(where, resval, bits.width())) + + # If the register had a resval, check this value matches it. + if reg_resval is not None: + resval_from_reg = bits.extract_field(reg_resval) + if resval != resval_from_reg: + raise ValueError('resval field for {} is {}, but the ' + 'register defines a resval as well, ' + 'where bits {}:{} would give {}.' + .format(where, resval, + bits.msb, bits.lsb, + resval_from_reg)) + + raw_enum = rd.get('enum') + if raw_enum is None: + enum = None + else: + enum = [] + raw_entries = check_list(raw_enum, + 'enum field for {}'.format(where)) + enum_val_to_name = {} # type: Dict[int, str] + for idx, raw_entry in enumerate(raw_entries): + entry = EnumEntry('entry {} in enum list for {}' + .format(idx + 1, where), + bits.max_value(), + raw_entry) + if entry.value in enum_val_to_name: + raise ValueError('In {}, duplicate enum entries for ' + 'value {} ({} and {}).' + .format(where, + entry.value, + enum_val_to_name[entry.value], + entry.name)) + enum.append(entry) + enum_val_to_name[entry.value] = entry.name + + return Field(name, desc, tags, + swaccess, hwaccess, + reg_hwqe, reg_hwre, bits, resval, enum) + + def has_incomplete_enum(self) -> bool: + return (self.enum is not None and + len(self.enum) != 1 + self.bits.max_value()) + + def get_n_bits(self, hwext: bool, bittype: List[str]) -> int: + '''Get the size of this field in bits + + bittype should be a list of the types of signals to count. The elements + should come from the following list: + + - 'q': A signal for the value of the field. Only needed if HW can read + its contents. + + - 'd': A signal for the next value of the field. Only needed if HW can + write its contents. + + - 'qe': A write enable signal for bus accesses. Only needed if HW can + read the field's contents and the field has the hwqe flag. + + - 're': A read enable signal for bus accesses. Only needed if HW can + read the field's contents and the field has the hwre flag. + + - 'de': A write enable signal for hardware accesses. Only needed if HW + can write the field's contents and the register data is stored in the + register block (true if the hwext flag is false). + + ''' + n_bits = 0 + if "q" in bittype and self.hwaccess.allows_read(): + n_bits += self.bits.width() + if "d" in bittype and self.hwaccess.allows_write(): + n_bits += self.bits.width() + if "qe" in bittype and self.hwaccess.allows_read(): + n_bits += int(self.hwqe) + if "re" in bittype and self.hwaccess.allows_read(): + n_bits += int(self.hwre) + if "de" in bittype and self.hwaccess.allows_write(): + n_bits += int(not hwext) + return n_bits + + def make_multi(self, + reg_width: int, + min_reg_idx: int, + max_reg_idx: int, + cname: str, + creg_idx: int, + stripped: bool) -> List['Field']: + assert 0 <= min_reg_idx <= max_reg_idx + + # Check that we won't overflow reg_width. We assume that the LSB should + # be preserved: if msb=5, lsb=2 then the replicated copies will be + # [5:2], [11:8] etc. + num_copies = 1 + max_reg_idx - min_reg_idx + field_width = self.bits.msb + 1 + + if field_width * num_copies > reg_width: + raise ValueError('Cannot replicate field {} {} times: the ' + 'resulting width would be {}, but the register ' + 'width is just {}.' + .format(self.name, num_copies, + field_width * num_copies, reg_width)) + + desc = ('For {}{}'.format(cname, creg_idx) + if stripped else self.desc) + enum = None if stripped else self.enum + + ret = [] + for reg_idx in range(min_reg_idx, max_reg_idx + 1): + name = '{}_{}'.format(self.name, reg_idx) + + bit_offset = field_width * (reg_idx - min_reg_idx) + bits = (self.bits + if bit_offset == 0 + else self.bits.make_translated(bit_offset)) + + ret.append(Field(name, desc, + self.tags, self.swaccess, self.hwaccess, + self.hwqe, self.hwre, bits, self.resval, enum)) + + return ret + + def make_suffixed(self, suffix: str, + cname: str, + creg_idx: int, + stripped: bool) -> 'Field': + desc = ('For {}{}'.format(cname, creg_idx) + if stripped else self.desc) + enum = None if stripped else self.enum + + return Field(self.name + suffix, + desc, self.tags, self.swaccess, self.hwaccess, + self.hwqe, self.hwre, self.bits, self.resval, enum) + + def _asdict(self) -> Dict[str, object]: + rd = { + 'bits': self.bits.as_str(), + 'name': self.name, + 'swaccess': self.swaccess.key, + 'hwaccess': self.hwaccess.key, + 'resval': 'x' if self.resval is None else str(self.resval), + 'tags': self.tags + } # type: Dict[str, object] + + if self.desc is not None: + rd['desc'] = self.desc + if self.enum is not None: + rd['enum'] = self.enum + return rd diff --git a/utils/reggen/reggen/fpv_csr.sv.tpl b/utils/reggen/reggen/fpv_csr.sv.tpl new file mode 100644 index 0000000..01f20c7 --- /dev/null +++ b/utils/reggen/reggen/fpv_csr.sv.tpl @@ -0,0 +1,177 @@ +// Copyright lowRISC contributors. +// Licensed under the Apache License, Version 2.0, see LICENSE for details. +// SPDX-License-Identifier: Apache-2.0 + +// FPV CSR read and write assertions auto-generated by `reggen` containing data structure +// Do Not Edit directly +// TODO: This automation currently only support register without HW write access +<% + from reggen import (gen_fpv) + from reggen.register import Register + + from topgen import lib + + lblock = block.name.lower() + use_reg_iface = any([interface['protocol'] == BusProtocol.REG_IFACE and not interace['is_host'] for interface in block.bus_interfaces.interface_list]) + + # This template shouldn't be instantiated if the device interface + # doesn't actually have any registers. + assert rb.flat_regs + +%>\ +<%def name="construct_classes(block)">\ + +% if use_reg_iface: +`include "common_cells/assertions.svh" +% else: +`include "prim_assert.sv" +% endif +`ifdef UVM + import uvm_pkg::*; +`endif + +// Block: ${lblock} +module ${mod_base}_csr_assert_fpv import tlul_pkg::*; + import top_pkg::*;( + input clk_i, + input rst_ni, + + // tile link ports + input tl_h2d_t h2d, + input tl_d2h_t d2h +); +<% + addr_width = rb.get_addr_width() + addr_msb = addr_width - 1 + hro_regs_list = [r for r in rb.flat_regs if not r.hwaccess.allows_write()] + num_hro_regs = len(hro_regs_list) + hro_map = {r.offset: (idx, r) for idx, r in enumerate(hro_regs_list)} +%>\ + +// Currently FPV csr assertion only support HRO registers. +% if num_hro_regs > 0: +`ifndef VERILATOR +`ifndef SYNTHESIS + + parameter bit[3:0] MAX_A_SOURCE = 10; // used for FPV only to reduce runtime + + typedef struct packed { + logic [TL_DW-1:0] wr_data; + logic [TL_AW-1:0] addr; + logic wr_pending; + logic rd_pending; + } pend_item_t; + + bit disable_sva; + + // mask register to convert byte to bit + logic [TL_DW-1:0] a_mask_bit; + + assign a_mask_bit[7:0] = h2d.a_mask[0] ? '1 : '0; + assign a_mask_bit[15:8] = h2d.a_mask[1] ? '1 : '0; + assign a_mask_bit[23:16] = h2d.a_mask[2] ? '1 : '0; + assign a_mask_bit[31:24] = h2d.a_mask[3] ? '1 : '0; + + bit [${addr_msb}-2:0] hro_idx; // index for exp_vals + bit [${addr_msb}:0] normalized_addr; + + // Map register address with hro_idx in exp_vals array. + always_comb begin: decode_hro_addr_to_idx + unique case (pend_trans[d2h.d_source].addr) +% for idx, r in hro_map.values(): + ${r.offset}: hro_idx <= ${idx}; +% endfor + // If the register is not a HRO register, the write data will all update to this default idx. + default: hro_idx <= ${num_hro_regs + 1}; + endcase + end + + // store internal expected values for HW ReadOnly registers + logic [TL_DW-1:0] exp_vals[${num_hro_regs + 1}]; + + `ifdef FPV_ON + pend_item_t [MAX_A_SOURCE:0] pend_trans; + `else + pend_item_t [2**TL_AIW-1:0] pend_trans; + `endif + + // normalized address only take the [${addr_msb}:2] address from the TLUL a_address + assign normalized_addr = {h2d.a_address[${addr_msb}:2], 2'b0}; + +% if num_hro_regs > 0: + // for write HRO registers, store the write data into exp_vals + always_ff @(negedge clk_i or negedge rst_ni) begin + if (!rst_ni) begin + pend_trans <= '0; + % for hro_reg in hro_regs_list: + exp_vals[${hro_map.get(hro_reg.offset)[0]}] <= ${hro_reg.resval}; + % endfor + end else begin + if (h2d.a_valid && d2h.a_ready) begin + pend_trans[h2d.a_source].addr <= normalized_addr; + if (h2d.a_opcode inside {PutFullData, PutPartialData}) begin + pend_trans[h2d.a_source].wr_data <= h2d.a_data & a_mask_bit; + pend_trans[h2d.a_source].wr_pending <= 1'b1; + end else if (h2d.a_opcode == Get) begin + pend_trans[h2d.a_source].rd_pending <= 1'b1; + end + end + if (d2h.d_valid) begin + if (pend_trans[d2h.d_source].wr_pending == 1) begin + if (!d2h.d_error) begin + exp_vals[hro_idx] <= pend_trans[d2h.d_source].wr_data; + end + pend_trans[d2h.d_source].wr_pending <= 1'b0; + end + if (h2d.d_ready && pend_trans[d2h.d_source].rd_pending == 1) begin + pend_trans[d2h.d_source].rd_pending <= 1'b0; + end + end + end + end + + // for read HRO registers, assert read out values by access policy and exp_vals + % for hro_reg in hro_regs_list: +<% + r_name = hro_reg.name.lower() + reg_addr = hro_reg.offset + reg_addr_hex = format(reg_addr, 'x') + regwen = hro_reg.regwen + reg_mask = 0 + + for f in hro_reg.get_field_list(): + f_access = f.swaccess.key.lower() + if f_access == "rw" and regwen == None: + reg_mask = reg_mask | f.bits.bitmask() +%>\ + % if reg_mask != 0: +<% reg_mask_hex = format(reg_mask, 'x') %>\ + `ASSERT(${r_name}_rd_A, d2h.d_valid && pend_trans[d2h.d_source].rd_pending && + pend_trans[d2h.d_source].addr == ${addr_width}'h${reg_addr_hex} |-> + d2h.d_error || + (d2h.d_data & 'h${reg_mask_hex}) == (exp_vals[${hro_map.get(reg_addr)[0]}] & 'h${reg_mask_hex})) + + % endif + % endfor +% endif + + // This FPV only assumption is to reduce the FPV runtime. + `ASSUME_FPV(TlulSource_M, h2d.a_source >= 0 && h2d.a_source <= MAX_A_SOURCE, clk_i, !rst_ni) + + `ifdef UVM + initial forever begin + bit csr_assert_en; + uvm_config_db#(bit)::wait_modified(null, "%m", "csr_assert_en"); + if (!uvm_config_db#(bit)::get(null, "%m", "csr_assert_en", csr_assert_en)) begin + `uvm_fatal("csr_assert", "Can't find csr_assert_en") + end + disable_sva = !csr_assert_en; + end + `endif + +`endif +`endif +% endif +endmodule +\ +${construct_classes(block)} diff --git a/utils/reggen/reggen/gen_cfg_html.py b/utils/reggen/reggen/gen_cfg_html.py new file mode 100644 index 0000000..0bb44d3 --- /dev/null +++ b/utils/reggen/reggen/gen_cfg_html.py @@ -0,0 +1,113 @@ +# Copyright lowRISC contributors. +# Licensed under the Apache License, Version 2.0, see LICENSE for details. +# SPDX-License-Identifier: Apache-2.0 +""" +Generate HTML documentation from Block +""" + +from typing import TextIO + +from .ip_block import IpBlock +from .html_helpers import render_td +from .signal import Signal + + +def genout(outfile: TextIO, msg: str) -> None: + outfile.write(msg) + + +def name_width(x: Signal) -> str: + if x.bits.width() == 1: + return x.name + + return '{}[{}:0]'.format(x.name, x.bits.msb) + + +def gen_kv(outfile: TextIO, key: str, value: str) -> None: + genout(outfile, + '

{}: {}

\n'.format(key, value)) + + +def gen_cfg_html(cfgs: IpBlock, outfile: TextIO) -> None: + rnames = cfgs.get_rnames() + + ot_server = 'https://docs.opentitan.org' + comport_url = ot_server + '/doc/rm/comportability_specification' + genout(outfile, + '

Referring to the Comportable guideline for ' + 'peripheral device functionality, the module ' + '{mod_name} has the following hardware ' + 'interfaces defined.

\n' + .format(url=comport_url, mod_name=cfgs.name)) + + # clocks + gen_kv(outfile, + 'Primary Clock', + '{}'.format(cfgs.clock_signals[0])) + if len(cfgs.clock_signals) > 1: + other_clocks = ['{}'.format(clk) + for clk in cfgs.clock_signals[1:]] + gen_kv(outfile, 'Other Clocks', ', '.join(other_clocks)) + else: + gen_kv(outfile, 'Other Clocks', 'none') + + # bus interfaces + dev_ports = ['{}'.format(port) + for port in cfgs.bus_interfaces.get_port_names(False, True)] + assert dev_ports + gen_kv(outfile, 'Bus Device Interfaces (TL-UL)', ', '.join(dev_ports)) + + host_ports = ['{}'.format(port) + for port in cfgs.bus_interfaces.get_port_names(True, False)] + if host_ports: + gen_kv(outfile, 'Bus Host Interfaces (TL-UL)', ', '.join(host_ports)) + else: + gen_kv(outfile, 'Bus Host Interfaces (TL-UL)', 'none') + + # IO + ios = ([('input', x) for x in cfgs.xputs[1]] + + [('output', x) for x in cfgs.xputs[2]] + + [('inout', x) for x in cfgs.xputs[0]]) + if ios: + genout(outfile, "

Peripheral Pins for Chip IO:

\n") + genout( + outfile, "" + + "" + + "\n") + for direction, x in ios: + genout(outfile, + '{}' + .format(name_width(x), + direction, + render_td(x.desc, rnames, None))) + genout(outfile, "
Pin namedirectionDescription
{}{}
\n") + else: + genout(outfile, "

Peripheral Pins for Chip IO: none

\n") + + if not cfgs.interrupts: + genout(outfile, "

Interrupts: none

\n") + else: + genout(outfile, "

Interrupts:

\n") + genout( + outfile, "" + + "\n") + for x in cfgs.interrupts: + genout(outfile, + '{}' + .format(name_width(x), + render_td(x.desc, rnames, None))) + genout(outfile, "
Interrupt NameDescription
{}
\n") + + if not cfgs.alerts: + genout(outfile, "

Security Alerts: none

\n") + else: + genout(outfile, "

Security Alerts:

\n") + genout( + outfile, "" + + "\n") + for x in cfgs.alerts: + genout(outfile, + '{}' + .format(x.name, + render_td(x.desc, rnames, None))) + genout(outfile, "
Alert NameDescription
{}
\n") diff --git a/utils/reggen/reggen/gen_cheader.py b/utils/reggen/reggen/gen_cheader.py new file mode 100644 index 0000000..f68bd39 --- /dev/null +++ b/utils/reggen/reggen/gen_cheader.py @@ -0,0 +1,439 @@ +# Copyright lowRISC contributors. +# Licensed under the Apache License, Version 2.0, see LICENSE for details. +# SPDX-License-Identifier: Apache-2.0 +""" +Generate C header from validated register JSON tree +""" + +import io +import logging as log +import sys +import textwrap +import warnings +from typing import List, Optional, Set, TextIO + + +from .field import Field +from .ip_block import IpBlock +from .params import LocalParam +from .register import Register +from .multi_register import MultiRegister +from .signal import Signal +from .window import Window + + +def genout(outfile: TextIO, msg: str) -> None: + outfile.write(msg) + +def to_snake_case(s: str) -> str: + val = [] + for i, ch in enumerate(s): + if i > 0 and ch.isupper(): + val.append('_') + val.append(ch) + return ''.join(val) + +def as_define(s: str) -> str: + s = s.upper() + r = '' + for i in range(0, len(s)): + r += s[i] if s[i].isalnum() else '_' + return r + + +def first_line(s: str) -> str: + """Returns the first line of a multi-line string""" + return s.splitlines()[0] + + +def format_comment(s: str) -> str: + """Formats a string to comment wrapped to an 80 character line width + + Returns wrapped string including newline and // comment characters. + """ + return '\n'.join( + textwrap.wrap( + s, width=77, initial_indent='// ', subsequent_indent='// ')) + '\n' + + +def gen_define(name: str, + args: List[str], + body: str, + existing_defines: Set[str], + indent: str = ' ') -> str: + r"""Produces a #define string, will split into two lines if a single line + has a width greater than 80 characters. Result includes newline. + + Arguments: + name - Name of the #define + args - List of arguments for the define, provide an empty list if there are + none + body - Body of the #define + existing_defines - set of already generated define names. + Error if `name` is in `existing_defines`. + indent - Gives string to prepend on any new lines produced by + wrapping (default ' ') + + Example result: + name = 'A_MACRO' + args = ['arg1', 'arg2'], + body = 'arg1 + arg2 + 10' + + #define A_MACRO(arg1, arg2) arg1 + arg2 + 10 + + When the macro is wrapped the break happens after the argument list (or + macro name if there is no argument list + + #define A_MACRO(arg1, arg2) \ + arg1 + arg2 + 10 + + """ + + if name in existing_defines: + log.error("Duplicate #define for " + name) + sys.exit(1) + + if len(args) != 0: + define_declare = '#define ' + name + '(' + ', '.join(args) + ')' + else: + define_declare = '#define ' + name + + oneline_define = define_declare + ' ' + body + + existing_defines.add(name) + + if len(oneline_define) <= 80: + return oneline_define + '\n' + + return define_declare + ' \\\n' + indent + body + '\n' + + +def gen_cdefine_register(outstr: TextIO, + reg: Register, + comp: str, + width: int, + rnames: Set[str], + existing_defines: Set[str]) -> None: + rname = reg.name + offset = reg.offset + + genout(outstr, format_comment(first_line(reg.desc))) + defname = as_define(comp + '_' + rname) + genout( + outstr, + gen_define(defname + '_REG_OFFSET', [], hex(offset), existing_defines)) + + for field in reg.fields: + dname = defname + '_' + as_define(field.name) + field_width = field.bits.width() + + if field_width == 1: + # single bit + genout( + outstr, + gen_define(dname + '_BIT', [], str(field.bits.lsb), + existing_defines)) + else: + # multiple bits (unless it is the whole register) + if field_width != width: + mask = field.bits.bitmask() >> field.bits.lsb + genout( + outstr, + gen_define(dname + '_MASK', [], hex(mask), + existing_defines)) + genout( + outstr, + gen_define(dname + '_OFFSET', [], str(field.bits.lsb), + existing_defines)) + genout( + outstr, + gen_define( + dname + '_FIELD', [], + '((bitfield_field32_t) {{ .mask = {dname}_MASK, .index = {dname}_OFFSET }})' + .format(dname=dname), existing_defines)) + if field.enum is not None: + for enum in field.enum: + ename = as_define(enum.name) + value = hex(enum.value) + genout( + outstr, + gen_define( + defname + '_' + as_define(field.name) + + '_VALUE_' + ename, [], value, existing_defines)) + genout(outstr, '\n') + return + + +def gen_cdefine_window(outstr: TextIO, + win: Window, + comp: str, + regwidth: int, + rnames: Set[str], + existing_defines: Set[str]) -> None: + offset = win.offset + + genout(outstr, format_comment('Memory area: ' + first_line(win.desc))) + defname = as_define(comp + '_' + win.name) + genout( + outstr, + gen_define(defname + '_REG_OFFSET', [], hex(offset), existing_defines)) + items = win.items + genout( + outstr, + gen_define(defname + '_SIZE_WORDS', [], str(items), existing_defines)) + items = items * (regwidth // 8) + genout( + outstr, + gen_define(defname + '_SIZE_BYTES', [], str(items), existing_defines)) + + wid = win.validbits + if (wid != regwidth): + mask = (1 << wid) - 1 + genout(outstr, + gen_define(defname + '_MASK ', [], hex(mask), existing_defines)) + + +def gen_cdefines_module_param(outstr: TextIO, + param: LocalParam, + module_name: str, + existing_defines: Set[str]) -> None: + # Presently there is only one type (int), however if the new types are + # added, they potentially need to be handled differently. + known_types = ["int"] + if param.param_type not in known_types: + warnings.warn("Cannot generate a module define of type {}" + .format(param.param_type)) + return + + if param.desc is not None: + genout(outstr, format_comment(first_line(param.desc))) + # Heuristic: if the name already has underscores, it's already snake_case, + # otherwise, assume StudlyCaps and covert it to snake_case. + param_name = param.name if '_' in param.name else to_snake_case(param.name) + define_name = as_define(module_name + '_PARAM_' + param_name) + if param.param_type == "int": + define = gen_define(define_name, [], param.value, + existing_defines) + + genout(outstr, define) + genout(outstr, '\n') + + +def gen_cdefines_module_params(outstr: TextIO, + module_data: IpBlock, + module_name: str, + register_width: int, + existing_defines: Set[str]) -> None: + module_params = module_data.params + + for param in module_params.get_localparams(): + gen_cdefines_module_param(outstr, param, module_name, existing_defines) + + genout(outstr, format_comment(first_line("Register width"))) + define_name = as_define(module_name + '_PARAM_REG_WIDTH') + define = gen_define(define_name, [], str(register_width), existing_defines) + genout(outstr, define) + genout(outstr, '\n') + + +def gen_multireg_field_defines(outstr: TextIO, + regname: str, + field: Field, + subreg_num: int, + regwidth: int, + existing_defines: Set[str]) -> None: + field_width = field.bits.width() + fields_per_reg = regwidth // field_width + + define_name = regname + '_' + as_define(field.name + "_FIELD_WIDTH") + define = gen_define(define_name, [], str(field_width), existing_defines) + genout(outstr, define) + + define_name = regname + '_' + as_define(field.name + "_FIELDS_PER_REG") + define = gen_define(define_name, [], str(fields_per_reg), existing_defines) + genout(outstr, define) + + define_name = regname + "_MULTIREG_COUNT" + define = gen_define(define_name, [], str(subreg_num), existing_defines) + genout(outstr, define) + + genout(outstr, '\n') + + +def gen_cdefine_multireg(outstr: TextIO, + multireg: MultiRegister, + component: str, + regwidth: int, + rnames: Set[str], + existing_defines: Set[str]) -> None: + comment = multireg.reg.desc + " (common parameters)" + genout(outstr, format_comment(first_line(comment))) + if len(multireg.reg.fields) == 1: + regname = as_define(component + '_' + multireg.reg.name) + gen_multireg_field_defines(outstr, regname, multireg.reg.fields[0], + len(multireg.regs), regwidth, existing_defines) + else: + log.warn("Non-homogeneous multireg " + multireg.reg.name + + " skip multireg specific data generation.") + + for subreg in multireg.regs: + gen_cdefine_register(outstr, subreg, component, regwidth, rnames, + existing_defines) + + +def gen_cdefines_interrupt_field(outstr: TextIO, + interrupt: Signal, + component: str, + regwidth: int, + existing_defines: Set[str]) -> None: + fieldlsb = interrupt.bits.lsb + iname = interrupt.name + defname = as_define(component + '_INTR_COMMON_' + iname) + + if interrupt.bits.width() == 1: + # single bit + genout( + outstr, + gen_define(defname + '_BIT', [], str(fieldlsb), existing_defines)) + else: + # multiple bits (unless it is the whole register) + if interrupt.bits.width() != regwidth: + mask = interrupt.bits.msb >> fieldlsb + genout( + outstr, + gen_define(defname + '_MASK', [], hex(mask), existing_defines)) + genout( + outstr, + gen_define(defname + '_OFFSET', [], str(fieldlsb), + existing_defines)) + genout( + outstr, + gen_define( + defname + '_FIELD', [], + '((bitfield_field32_t) {{ .mask = {dname}_MASK, .index = {dname}_OFFSET }})' + .format(dname=defname), existing_defines)) + + +def gen_cdefines_interrupts(outstr: TextIO, + block: IpBlock, + component: str, + regwidth: int, + existing_defines: Set[str]) -> None: + # If no_auto_intr_regs is true, then we do not generate common defines, + # because the bit offsets for a particular interrupt may differ between + # the interrupt enable/state/test registers. + if block.no_auto_intr: + return + + genout(outstr, format_comment(first_line("Common Interrupt Offsets"))) + for intr in block.interrupts: + gen_cdefines_interrupt_field(outstr, intr, component, regwidth, + existing_defines) + genout(outstr, '\n') + + +def gen_cdefines(block: IpBlock, + outfile: TextIO, + src_lic: Optional[str], + src_copy: str) -> int: + rnames = block.get_rnames() + + outstr = io.StringIO() + + # This tracks the defines that have been generated so far, so we + # can error if we attempt to duplicate a definition + existing_defines = set() # type: Set[str] + + gen_cdefines_module_params(outstr, block, block.name, block.regwidth, + existing_defines) + + gen_cdefines_interrupts(outstr, block, block.name, block.regwidth, + existing_defines) + + for rb in block.reg_blocks.values(): + for x in rb.entries: + if isinstance(x, Register): + gen_cdefine_register(outstr, x, block.name, block.regwidth, rnames, + existing_defines) + continue + + if isinstance(x, MultiRegister): + gen_cdefine_multireg(outstr, x, block.name, block.regwidth, rnames, + existing_defines) + continue + + if isinstance(x, Window): + gen_cdefine_window(outstr, x, block.name, block.regwidth, + rnames, existing_defines) + continue + + generated = outstr.getvalue() + outstr.close() + + genout(outfile, '// Generated register defines for ' + block.name + '\n\n') + if src_copy != '': + genout(outfile, '// Copyright information found in source file:\n') + genout(outfile, '// ' + src_copy + '\n\n') + if src_lic is not None: + genout(outfile, '// Licensing information found in source file:\n') + for line in src_lic.splitlines(): + genout(outfile, '// ' + line + '\n') + genout(outfile, '\n') + + # Header Include Guard + genout(outfile, '#ifndef _' + as_define(block.name) + '_REG_DEFS_\n') + genout(outfile, '#define _' + as_define(block.name) + '_REG_DEFS_\n\n') + + # Header Extern Guard (so header can be used from C and C++) + genout(outfile, '#ifdef __cplusplus\n') + genout(outfile, 'extern "C" {\n') + genout(outfile, '#endif\n') + + genout(outfile, generated) + + # Header Extern Guard + genout(outfile, '#ifdef __cplusplus\n') + genout(outfile, '} // extern "C"\n') + genout(outfile, '#endif\n') + + # Header Include Guard + genout(outfile, '#endif // _' + as_define(block.name) + '_REG_DEFS_\n') + + genout(outfile, '// End generated register defines for ' + block.name) + + return 0 + + +def test_gen_define() -> None: + basic_oneline = '#define MACRO_NAME body\n' + assert gen_define('MACRO_NAME', [], 'body', set()) == basic_oneline + + basic_oneline_with_args = '#define MACRO_NAME(arg1, arg2) arg1 + arg2\n' + assert (gen_define('MACRO_NAME', ['arg1', 'arg2'], 'arg1 + arg2', + set()) == basic_oneline_with_args) + + long_macro_name = 'A_VERY_VERY_VERY_VERY_VERY_VERY_VERY_VERY_VERY_VERY_VERY_LONG_MACRO_NAME' + + multiline = ('#define ' + long_macro_name + ' \\\n' + + ' a_fairly_long_body + something_else + 10\n') + + assert (gen_define(long_macro_name, [], + 'a_fairly_long_body + something_else + 10', + set()) == multiline) + + multiline_with_args = ('#define ' + long_macro_name + + '(arg1, arg2, arg3) \\\n' + + ' a_fairly_long_body + arg1 + arg2 + arg3\n') + + assert (gen_define(long_macro_name, ['arg1', 'arg2', 'arg3'], + 'a_fairly_long_body + arg1 + arg2 + arg3', + set()) == multiline_with_args) + + multiline_with_args_big_indent = ( + '#define ' + long_macro_name + '(arg1, arg2, arg3) \\\n' + + ' a_fairly_long_body + arg1 + arg2 + arg3\n') + + assert (gen_define(long_macro_name, ['arg1', 'arg2', 'arg3'], + 'a_fairly_long_body + arg1 + arg2 + arg3', + set(), + indent=' ') == multiline_with_args_big_indent) diff --git a/utils/reggen/reggen/gen_dv.py b/utils/reggen/reggen/gen_dv.py new file mode 100644 index 0000000..d2d054a --- /dev/null +++ b/utils/reggen/reggen/gen_dv.py @@ -0,0 +1,108 @@ +# Copyright lowRISC contributors. +# Licensed under the Apache License, Version 2.0, see LICENSE for details. +# SPDX-License-Identifier: Apache-2.0 +'''Generate DV code for an IP block''' + +import logging as log +import os +from typing import List + +import yaml + +from mako import exceptions # type: ignore +from mako.lookup import TemplateLookup # type: ignore +from pkg_resources import resource_filename + +from .ip_block import IpBlock +from .register import Register +from .window import Window + + +def bcname(esc_if_name: str) -> str: + '''Get the name of the dv_base_reg_block subclass for this device interface''' + return esc_if_name + "_reg_block" + + +def rcname(esc_if_name: str, r: Register) -> str: + '''Get the name of the dv_base_reg subclass for this register''' + return '{}_reg_{}'.format(esc_if_name, r.name.lower()) + + +def mcname(esc_if_name: str, m: Window) -> str: + '''Get the name of the dv_base_mem subclass for this memory''' + return '{}_mem_{}'.format(esc_if_name, m.name.lower()) + + +def miname(m: Window) -> str: + '''Get the lower-case name of a memory block''' + return m.name.lower() + + +def gen_core_file(outdir: str, + lblock: str, + dv_base_prefix: str, + paths: List[str]) -> None: + depends = ["lowrisc:dv:dv_base_reg"] + if dv_base_prefix and dv_base_prefix != "dv_base": + depends.append("lowrisc:dv:{}_reg".format(dv_base_prefix)) + + # Generate a fusesoc core file that points at the files we've just + # generated. + core_data = { + 'name': "lowrisc:dv:{}_ral_pkg".format(lblock), + 'filesets': { + 'files_dv': { + 'depend': depends, + 'files': paths, + 'file_type': 'systemVerilogSource' + }, + }, + 'targets': { + 'default': { + 'filesets': [ + 'files_dv', + ], + }, + }, + } + core_file_path = os.path.join(outdir, lblock + '_ral_pkg.core') + with open(core_file_path, 'w') as core_file: + core_file.write('CAPI=2:\n') + yaml.dump(core_data, core_file, encoding='utf-8') + + +def gen_dv(block: IpBlock, dv_base_prefix: str, outdir: str) -> int: + '''Generate DV files for an IpBlock''' + + lookup = TemplateLookup(directories=[resource_filename('reggen', '.')]) + uvm_reg_tpl = lookup.get_template('uvm_reg.sv.tpl') + + # Generate the RAL package(s). For a device interface with no name we + # generate the package "_ral_pkg" (writing to _ral_pkg.sv). + # In any other case, we also need the interface name, giving + # __ral_pkg. + generated = [] + + lblock = block.name.lower() + for if_name, rb in block.reg_blocks.items(): + hier_path = '' if block.hier_path is None else block.hier_path + '.' + if_suffix = '' if if_name is None else '_' + if_name.lower() + mod_base = lblock + if_suffix + reg_block_path = hier_path + 'u_reg' + if_suffix + + file_name = mod_base + '_ral_pkg.sv' + generated.append(file_name) + reg_top_path = os.path.join(outdir, file_name) + with open(reg_top_path, 'w', encoding='UTF-8') as fout: + try: + fout.write(uvm_reg_tpl.render(rb=rb, + block=block, + esc_if_name=mod_base, + reg_block_path=reg_block_path, + dv_base_prefix=dv_base_prefix)) + except: # noqa F722 for template Exception handling + log.error(exceptions.text_error_template().render()) + return 1 + + gen_core_file(outdir, lblock, dv_base_prefix, generated) + return 0 diff --git a/utils/reggen/reggen/gen_fpv.py b/utils/reggen/reggen/gen_fpv.py new file mode 100644 index 0000000..e6e6d7d --- /dev/null +++ b/utils/reggen/reggen/gen_fpv.py @@ -0,0 +1,81 @@ +# Copyright lowRISC contributors. +# Licensed under the Apache License, Version 2.0, see LICENSE for details. +# SPDX-License-Identifier: Apache-2.0 +# # Lint as: python3 +# +"""Generate FPV CSR read and write assertions from IpBlock +""" + +import logging as log +import os.path + +import yaml +from mako import exceptions +from mako.template import Template +from pkg_resources import resource_filename + +from .ip_block import IpBlock + + +def gen_fpv(block: IpBlock, outdir): + # Read Register templates + fpv_csr_tpl = Template( + filename=resource_filename('reggen', 'fpv_csr.sv.tpl')) + + # Generate a module with CSR assertions for each device interface. For a + # device interface with no name, we generate _csr_assert_fpv. For a + # named interface, we generate __csr_assert_fpv. + lblock = block.name.lower() + generated = [] + for if_name, rb in block.reg_blocks.items(): + if not rb.flat_regs: + # No registers to check! + continue + + if if_name is None: + mod_base = lblock + else: + mod_base = lblock + '_' + if_name.lower() + + mod_name = mod_base + '_csr_assert_fpv' + filename = mod_name + '.sv' + generated.append(filename) + reg_top_path = os.path.join(outdir, filename) + with open(reg_top_path, 'w', encoding='UTF-8') as fout: + try: + fout.write(fpv_csr_tpl.render(block=block, + mod_base=mod_base, + if_name=if_name, + rb=rb)) + except: # noqa F722 for template Exception handling + log.error(exceptions.text_error_template().render()) + return 1 + + # Generate a fusesoc core file that points at the files we've just + # generated. + core_data = { + 'name': "lowrisc:fpv:{}_csr_assert".format(lblock), + 'filesets': { + 'files_dv': { + 'depend': [ + "lowrisc:tlul:headers", + "lowrisc:prim:assert", + ], + 'files': generated, + 'file_type': 'systemVerilogSource' + }, + }, + 'targets': { + 'default': { + 'filesets': [ + 'files_dv', + ], + }, + }, + } + core_file_path = os.path.join(outdir, lblock + '_csr_assert_fpv.core') + with open(core_file_path, 'w') as core_file: + core_file.write('CAPI=2:\n') + yaml.dump(core_data, core_file, encoding='utf-8') + + return 0 diff --git a/utils/reggen/reggen/gen_html.py b/utils/reggen/reggen/gen_html.py new file mode 100644 index 0000000..e8c427b --- /dev/null +++ b/utils/reggen/reggen/gen_html.py @@ -0,0 +1,325 @@ +# Copyright lowRISC contributors. +# Licensed under the Apache License, Version 2.0, see LICENSE for details. +# SPDX-License-Identifier: Apache-2.0 +""" +Generate HTML documentation from IpBlock +""" + +from typing import Set, TextIO + +from .ip_block import IpBlock +from .html_helpers import expand_paras, render_td +from .multi_register import MultiRegister +from .reg_block import RegBlock +from .register import Register +from .window import Window + + +def genout(outfile: TextIO, msg: str) -> None: + outfile.write(msg) + + +# Generation of HTML table with register bit-field summary picture +# Max 16-bit wide on one line + + +def gen_tbl_row(outfile: TextIO, msb: int, width: int, close: bool) -> None: + if (close): + genout(outfile, "\n") + genout(outfile, "") + for x in range(msb, msb - width, -1): + genout(outfile, "" + str(x) + "") + + genout(outfile, "") + + +def gen_html_reg_pic(outfile: TextIO, reg: Register, width: int) -> None: + + if (width > 32): + bsize = 3 + nextbit = 63 + hdrbits = 16 + nextline = 48 + elif (width > 16): + bsize = 3 + nextbit = 31 + hdrbits = 16 + nextline = 16 + elif (width > 8): + bsize = 3 + nextbit = 15 + nextline = 0 + hdrbits = 16 + else: + bsize = 12 + nextbit = 7 + nextline = 0 + hdrbits = 8 + + genout(outfile, "") + gen_tbl_row(outfile, nextbit, hdrbits, False) + + for field in reversed(reg.fields): + fieldlsb = field.bits.lsb + fieldwidth = field.bits.width() + fieldmsb = field.bits.msb + fname = field.name + + while nextbit > fieldmsb: + if (nextbit >= nextline) and (fieldmsb < nextline): + spans = nextbit - (nextline - 1) + else: + spans = nextbit - fieldmsb + genout( + outfile, "\n") + if (nextbit >= nextline) and (fieldmsb < nextline): + nextbit = nextline - 1 + gen_tbl_row(outfile, nextbit, hdrbits, True) + nextline = nextline - 16 + else: + nextbit = fieldmsb + + while (fieldmsb >= nextline) and (fieldlsb < nextline): + spans = fieldmsb - (nextline - 1) + genout( + outfile, "\n") + fname = "..." + field.name + fieldwidth = fieldwidth - spans + fieldmsb = nextline - 1 + nextline = nextline - 16 + gen_tbl_row(outfile, fieldmsb, hdrbits, True) + + namelen = len(fname) + if namelen == 0 or fname == ' ': + fname = " " + if (namelen > bsize * fieldwidth): + usestyle = (" style=\"font-size:" + str( + (bsize * 100 * fieldwidth) / namelen) + "%\"") + else: + usestyle = "" + + genout( + outfile, "\n") + + if (fieldlsb == nextline) and nextline > 0: + gen_tbl_row(outfile, nextline - 1, hdrbits, True) + nextline = nextline - 16 + + nextbit = fieldlsb - 1 + while (nextbit > 0): + spans = nextbit - (nextline - 1) + genout(outfile, + "\n") + nextbit = nextline - 1 + if (nextline > 0): + gen_tbl_row(outfile, nextline - 1, hdrbits, True) + nextline = nextline - 16 + + genout(outfile, "
 " + + fname + "..." + fname + " 
") + + +# Generation of HTML table with header, register picture and details + + +def gen_html_register(outfile: TextIO, + reg: Register, + comp: str, + width: int, + rnames: Set[str]) -> None: + rname = reg.name + offset = reg.offset + regwen_div = '' + if reg.regwen is not None: + regwen_div = ('
Register enable = {}
\n' + .format(reg.regwen)) + + desc_paras = expand_paras(reg.desc, rnames) + desc_head = desc_paras[0] + desc_body = desc_paras[1:] + + genout(outfile, + '\n' + ' \n' + ' \n' + ' \n' + .format(lrname=rname.lower(), + comp=comp, + rname=rname, + off=offset, + desc=desc_head, + resval=reg.resval, + mask=reg.resmask, + wen=regwen_div)) + if desc_body: + genout(outfile, + '' + .format(''.join(desc_body))) + + genout(outfile, "\n") + + genout(outfile, "") + genout(outfile, "") + genout(outfile, "") + genout(outfile, "") + genout(outfile, "") + nextbit = 0 + fcount = 0 + + for field in reg.fields: + fcount += 1 + fname = field.name + + fieldlsb = field.bits.lsb + if fieldlsb > nextbit: + genout(outfile, "") + genout(outfile, "") + genout(outfile, "") + genout( + outfile, "") + genout(outfile, "") + + # Collect up any description and enum table + desc_parts = [] + + if field.desc is not None: + desc_parts += expand_paras(field.desc, rnames) + + if field.enum is not None: + desc_parts.append('
\n' + '
{comp}.{rname} @ {off:#x}
\n' + '
{desc}
\n' + '
Reset default = {resval:#x}, mask {mask:#x}
\n' + '{wen}' + '
{}
") + gen_html_reg_pic(outfile, reg, width) + genout(outfile, "
BitsTypeResetNameDescription
") + if (nextbit == (fieldlsb - 1)): + genout(outfile, str(nextbit)) + else: + genout(outfile, str(fieldlsb - 1) + ":" + str(nextbit)) + genout(outfile, + "Reserved
" + field.bits.as_str() + "" + field.swaccess.key + "" + + ('x' if field.resval is None else hex(field.resval)) + + "" + fname + "
') + for enum in field.enum: + enum_desc_paras = expand_paras(enum.desc, rnames) + desc_parts.append('' + '' + '' + '' + '\n' + .format(val=enum.value, + name=enum.name, + desc=''.join(enum_desc_paras))) + desc_parts.append('
{val}{name}{desc}
') + if field.has_incomplete_enum(): + desc_parts.append("

Other values are reserved.

") + + genout(outfile, + '{}'.format(''.join(desc_parts))) + nextbit = fieldlsb + field.bits.width() + + genout(outfile, "\n
\n") + + +def gen_html_window(outfile: TextIO, + win: Window, + comp: str, + regwidth: int, + rnames: Set[str]) -> None: + wname = win.name or '(unnamed window)' + offset = win.offset + genout(outfile, + '\n' + ' \n' + ' \n' + ' \n' + .format(comp=comp, + wname=wname, + lwname=wname.lower(), + off=offset, + items=win.items, + swaccess=win.swaccess.key, + byte_writes=('' if win.byte_write else 'not '))) + genout(outfile, '{}'.format(render_td(win.desc, rnames, 'regde'))) + genout(outfile, "
\n' + '
{comp}.{wname} @ + {off:#x}
\n' + '
{items} item {swaccess} window
\n' + '
Byte writes are {byte_writes}supported
\n' + '
') + genout(outfile, '') + wid = win.validbits + + for x in range(regwidth - 1, -1, -1): + if x == regwidth - 1 or x == wid - 1 or x == 0: + genout(outfile, '') + else: + genout(outfile, '') + genout(outfile, '') + tblmax = win.items - 1 + for x in [0, 1, 2, tblmax - 1, tblmax]: + if x == 2: + genout( + outfile, '') + else: + genout( + outfile, '') + if wid < regwidth: + genout( + outfile, '\n') + genout( + outfile, + '\n') + else: + genout( + outfile, '\n') + genout(outfile, '') + genout(outfile, '
' + str(x) + '
 ...
+' + + hex(offset + x * (regwidth // 8)) + '   
') + genout(outfile, + '
\n
\n") + + +def gen_html_reg_block(outfile: TextIO, + rb: RegBlock, + comp: str, + width: int, + rnames: Set[str]) -> None: + for x in rb.entries: + if isinstance(x, Register): + gen_html_register(outfile, x, comp, width, rnames) + elif isinstance(x, MultiRegister): + for reg in x.regs: + gen_html_register(outfile, reg, comp, width, rnames) + else: + assert isinstance(x, Window) + gen_html_window(outfile, x, comp, width, rnames) + + +def gen_html(block: IpBlock, outfile: TextIO) -> int: + rnames = block.get_rnames() + + assert block.reg_blocks + # Handle the case where there's just one interface + if len(block.reg_blocks) == 1: + rb = list(block.reg_blocks.values())[0] + gen_html_reg_block(outfile, rb, block.name, block.regwidth, rnames) + return 0 + + # Handle the case where there is more than one device interface and, + # correspondingly, more than one reg block. + for iface_name, rb in block.reg_blocks.items(): + iface_desc = ('device interface {}'.format(iface_name) + if iface_name is not None + else 'the unnamed device interface') + genout(outfile, + '

Registers visible under {}

'.format(iface_desc)) + gen_html_reg_block(outfile, rb, block.name, block.regwidth, rnames) + + return 0 diff --git a/utils/reggen/reggen/gen_json.py b/utils/reggen/reggen/gen_json.py new file mode 100644 index 0000000..c593cc1 --- /dev/null +++ b/utils/reggen/reggen/gen_json.py @@ -0,0 +1,34 @@ +# Copyright lowRISC contributors. +# Licensed under the Apache License, Version 2.0, see LICENSE for details. +# SPDX-License-Identifier: Apache-2.0 +"""Generate JSON/compact JSON/Hjson from register JSON tree +""" + +import hjson + + +def gen_json(obj, outfile, format): + if format == 'json': + hjson.dumpJSON(obj, + outfile, + ensure_ascii=False, + use_decimal=True, + indent=' ', + for_json=True) + elif format == 'compact': + hjson.dumpJSON(obj, + outfile, + ensure_ascii=False, + for_json=True, + use_decimal=True, + separators=(',', ':')) + elif format == 'hjson': + hjson.dump(obj, + outfile, + ensure_ascii=False, + for_json=True, + use_decimal=True) + else: + raise ValueError('Invalid JSON format ' + format) + + return 0 diff --git a/utils/reggen/reggen/gen_rtl.py b/utils/reggen/reggen/gen_rtl.py new file mode 100644 index 0000000..14c9b4b --- /dev/null +++ b/utils/reggen/reggen/gen_rtl.py @@ -0,0 +1,136 @@ +# Copyright lowRISC contributors. +# Licensed under the Apache License, Version 2.0, see LICENSE for details. +# SPDX-License-Identifier: Apache-2.0 +"""Generate SystemVerilog designs from IpBlock object""" + +import logging as log +import os +from typing import Dict, Optional, Tuple + +from mako import exceptions # type: ignore +from mako.template import Template # type: ignore +from pkg_resources import resource_filename + +from .ip_block import IpBlock +from .multi_register import MultiRegister +from .reg_base import RegBase +from .register import Register + + +def escape_name(name: str) -> str: + return name.lower().replace(' ', '_') + + +def make_box_quote(msg: str, indent: str = ' ') -> str: + hr = indent + ('/' * (len(msg) + 6)) + middle = indent + '// ' + msg + ' //' + return '\n'.join([hr, middle, hr]) + + +def _get_awparam_name(iface_name: Optional[str]) -> str: + return (iface_name or 'Iface').capitalize() + 'Aw' + + +def get_addr_widths(block: IpBlock) -> Dict[Optional[str], Tuple[str, int]]: + '''Return the address widths for the device interfaces + + Returns a dictionary keyed by interface name whose values are pairs: + (paramname, width) where paramname is IfaceAw for an unnamed interface and + FooAw for an interface called foo. This is constructed in the same order as + block.reg_blocks. + + If there is a single device interface and that interface is unnamed, use + the more general parameter name "BlockAw". + + ''' + assert block.reg_blocks + if len(block.reg_blocks) == 1 and None in block.reg_blocks: + return {None: ('BlockAw', block.reg_blocks[None].get_addr_width())} + + return {name: (_get_awparam_name(name), rb.get_addr_width()) + for name, rb in block.reg_blocks.items()} + + +def get_type_name_pfx(block: IpBlock, iface_name: Optional[str]) -> str: + return block.name.lower() + ('' if iface_name is None + else '_{}'.format(iface_name.lower())) + + +def get_r0(reg: RegBase) -> Register: + '''Get a Register representing an entry in the RegBase''' + if isinstance(reg, Register): + return reg + else: + assert isinstance(reg, MultiRegister) + return reg.reg + + +def get_iface_tx_type(block: IpBlock, + iface_name: Optional[str], + hw2reg: bool) -> str: + x2x = 'hw2reg' if hw2reg else 'reg2hw' + pfx = get_type_name_pfx(block, iface_name) + return '_'.join([pfx, x2x, 't']) + + +def get_reg_tx_type(block: IpBlock, reg: RegBase, hw2reg: bool) -> str: + '''Get the name of the hw2reg or reg2hw type for reg''' + if isinstance(reg, Register): + r0 = reg + type_suff = 'reg_t' + else: + assert isinstance(reg, MultiRegister) + r0 = reg.reg + type_suff = 'mreg_t' + + x2x = 'hw2reg' if hw2reg else 'reg2hw' + return '_'.join([block.name.lower(), + x2x, + r0.name.lower(), + type_suff]) + + +def gen_rtl(block: IpBlock, outdir: str) -> int: + # Read Register templates + reg_top_tpl = Template( + filename=resource_filename('reggen', 'reg_top.sv.tpl')) + reg_pkg_tpl = Template( + filename=resource_filename('reggen', 'reg_pkg.sv.tpl')) + + # Generate _reg_pkg.sv + # + # This defines the various types used to interface between the *_reg_top + # module(s) and the block itself. + reg_pkg_path = os.path.join(outdir, block.name.lower() + "_reg_pkg.sv") + with open(reg_pkg_path, 'w', encoding='UTF-8') as fout: + try: + fout.write(reg_pkg_tpl.render(block=block)) + except: # noqa F722 for template Exception handling + log.error(exceptions.text_error_template().render()) + return 1 + + # Generate the register block implementation(s). For a device interface + # with no name we generate the register module "_reg_top" (writing + # to _reg_top.sv). In any other case, we also need the interface + # name, giving __reg_top. + lblock = block.name.lower() + for if_name, rb in block.reg_blocks.items(): + if if_name is None: + mod_base = lblock + else: + mod_base = lblock + '_' + if_name.lower() + + mod_name = mod_base + '_reg_top' + reg_top_path = os.path.join(outdir, mod_name + '.sv') + with open(reg_top_path, 'w', encoding='UTF-8') as fout: + try: + fout.write(reg_top_tpl.render(block=block, + mod_base=mod_base, + mod_name=mod_name, + if_name=if_name, + rb=rb)) + except: # noqa F722 for template Exception handling + log.error(exceptions.text_error_template().render()) + return 1 + + return 0 diff --git a/utils/reggen/reggen/gen_selfdoc.py b/utils/reggen/reggen/gen_selfdoc.py new file mode 100644 index 0000000..5f38404 --- /dev/null +++ b/utils/reggen/reggen/gen_selfdoc.py @@ -0,0 +1,306 @@ +# Copyright lowRISC contributors. +# Licensed under the Apache License, Version 2.0, see LICENSE for details. +# SPDX-License-Identifier: Apache-2.0 +""" +Generates the documentation for the register tool + +""" +from .access import SWACCESS_PERMITTED, HWACCESS_PERMITTED +from reggen import (validate, + ip_block, enum_entry, field, + register, multi_register, window) + + +def genout(outfile, msg): + outfile.write(msg) + + +doc_intro = """ + + + +The tables describe each key and the type of the value. The following +types are used: + +Type | Description +---- | ----------- +""" + +swaccess_intro = """ + +Register fields are tagged using the swaccess key to describe the +permitted access and side-effects. This key must have one of these +values: + +""" + +hwaccess_intro = """ + +Register fields are tagged using the hwaccess key to describe the +permitted access from hardware logic and side-effects. This key must +have one of these values: + +""" + +top_example = """ +The basic structure of a register definition file is thus: + +```hjson +{ + name: "GP", + regwidth: "32", + registers: [ + // register definitions... + ] +} + +``` + +""" + +register_example = """ + +The basic register definition group will follow this pattern: + +```hjson + { name: "REGA", + desc: "Description of register", + swaccess: "rw", + resval: "42", + fields: [ + // bit field definitions... + ] + } +``` + +The name and brief description are required. If the swaccess key is +provided it describes the access pattern that will be used by all +bitfields in the register that do not override with their own swaccess +key. This is a useful shortcut because in most cases a register will +have the same access restrictions for all fields. The reset value of +the register may also be provided here or in the individual fields. If +it is provided in both places then they must match, if it is provided +in neither place then the reset value defaults to zero for all except +write-only fields when it defaults to x. + +""" + +field_example = """ + +Field names should be relatively short because they will be used +frequently (and need to fit in the register layout picture!) The field +description is expected to be longer and will most likely make use of +the Hjson ability to include multi-line strings. An example with three +fields: + +```hjson + fields: [ + { bits: "15:0", + name: "RXS", + desc: ''' + Last 16 oversampled values of RX. These are captured at 16x the baud + rate clock. This is a shift register with the most recent bit in + bit 0 and the oldest in bit 15. Only valid when ENRXS is set. + ''' + } + { bits: "16", + name: "ENRXS", + desc: ''' + If this bit is set the receive oversampled data is collected + in the RXS field. + ''' + } + {bits: "20:19", name: "TXILVL", + desc: "Trigger level for TX interrupts", + resval: "2", + enum: [ + { value: "0", name: "txlvl1", desc: "1 character" }, + { value: "1", name: "txlvl4", desc: "4 characters" }, + { value: "2", name: "txlvl8", desc: "8 characters" }, + { value: "3", name: "txlvl16", desc: "16 characters" } + ] + } + ] +``` + +In all of these the swaccess parameter is inherited from the register +level, and will be added so this key is always available to the +backend. The RXS and ENRXS will default to zero reset value (unless +something different is provided for the register) and will have the +key added, but TXILVL expicitly sets its reset value as 2. + +The missing bits 17 and 18 will be treated as reserved by the tool, as +will any bits between 21 and the maximum in the register. + +The TXILVL is an example using an enumeration to specify all valid +values for the field. In this case all possible values are described, +if the list is incomplete then the field is marked with the rsvdenum +key so the backend can take appropriate action. (If the enum field is +more than 7 bits then the checking is not done.) + +""" + +offset_intro = """ + +""" + +multi_intro = """ + +The multireg expands on the register required fields and will generate +a list of the generated registers (that contain all required and +generated keys for an actual register). + +""" + +window_intro = """ + +A window defines an open region of the register space that can be used +for things that are not registers (for example access to a buffer ram). + +""" + +regwen_intro = """ + +Registers can protect themselves from software writes by using the +register attribute regwen. When not an emptry string (the default +value), regwen indicates that another register must be true in order +to allow writes to this register. This is useful for the prevention +of software modification. The register-enable register (call it +REGWEN) must be one bit in width, and should default to 1 and be rw1c +for preferred security control. This allows all writes to proceed +until at some point software disables future modifications by clearing +REGWEN. An error is reported if REGWEN does not exist, contains more +than one bit, is not `rw1c` or does not default to 1. One REGWEN can +protect multiple registers. The REGWEN register must precede those +registers that refer to it in the .hjson register list. An example: + +```hjson + { name: "REGWEN", + desc: "Register write enable for a bank of registers", + swaccess: "rw1c", + fields: [ { bits: "0", resval: "1" } ] + } + { name: "REGA", + swaccess: "rw", + regwen: "REGWEN", + ... + } + { name: "REGB", + swaccess: "rw", + regwen: "REGWEN", + ... + } +``` +""" + +doc_tail = """ + +(end of output generated by `regtool.py --doc`) + +""" + + +def doc_tbl_head(outfile, use): + if use is not None: + genout(outfile, "\nKey | Kind | Type | Description of Value\n") + genout(outfile, "--- | ---- | ---- | --------------------\n") + else: + genout(outfile, "\nKey | Description\n") + genout(outfile, "--- | -----------\n") + + +def doc_tbl_line(outfile, key, use, desc): + if use is not None: + desc_key, desc_txt = desc + val_type = (validate.val_types[desc_key][0] + if desc_key is not None else None) + else: + assert isinstance(desc, str) + val_type = None + desc_txt = desc + + if val_type is not None: + genout( + outfile, '{} | {} | {} | {}\n'.format(key, validate.key_use[use], + val_type, desc_txt)) + else: + genout(outfile, key + " | " + desc_txt + "\n") + + +def document(outfile): + genout(outfile, doc_intro) + for x in validate.val_types: + genout( + outfile, + validate.val_types[x][0] + " | " + validate.val_types[x][1] + "\n") + + genout(outfile, swaccess_intro) + doc_tbl_head(outfile, None) + for key, value in SWACCESS_PERMITTED.items(): + doc_tbl_line(outfile, key, None, value[0]) + + genout(outfile, hwaccess_intro) + doc_tbl_head(outfile, None) + for key, value in HWACCESS_PERMITTED.items(): + doc_tbl_line(outfile, key, None, value[0]) + + genout( + outfile, "\n\nThe top level of the JSON is a group containing " + "the following keys:\n") + doc_tbl_head(outfile, 1) + for k, v in ip_block.REQUIRED_FIELDS.items(): + doc_tbl_line(outfile, k, 'r', v) + for k, v in ip_block.OPTIONAL_FIELDS.items(): + doc_tbl_line(outfile, k, 'o', v) + genout(outfile, top_example) + + genout( + outfile, "\n\nThe list of registers includes register definition " + "groups containing the following keys:\n") + doc_tbl_head(outfile, 1) + for k, v in register.REQUIRED_FIELDS.items(): + doc_tbl_line(outfile, k, 'r', v) + for k, v in register.OPTIONAL_FIELDS.items(): + doc_tbl_line(outfile, k, 'o', v) + genout(outfile, register_example) + + genout( + outfile, "\n\nIn the fields list each field definition is a group " + "itself containing the following keys:\n") + doc_tbl_head(outfile, 1) + for k, v in field.REQUIRED_FIELDS.items(): + doc_tbl_line(outfile, k, 'r', v) + for k, v in field.OPTIONAL_FIELDS.items(): + doc_tbl_line(outfile, k, 'o', v) + genout(outfile, field_example) + + genout(outfile, "\n\nDefinitions in an enumeration group contain:\n") + doc_tbl_head(outfile, 1) + for k, v in enum_entry.REQUIRED_FIELDS.items(): + doc_tbl_line(outfile, k, 'r', v) + + genout( + outfile, "\n\nThe list of registers may include single entry groups " + "to control the offset, open a window or generate registers:\n") + doc_tbl_head(outfile, 1) + for x in validate.list_optone: + doc_tbl_line(outfile, x, 'o', validate.list_optone[x]) + + genout(outfile, offset_intro) + genout(outfile, regwen_intro) + + genout(outfile, window_intro) + doc_tbl_head(outfile, 1) + for k, v in window.REQUIRED_FIELDS.items(): + doc_tbl_line(outfile, k, 'r', v) + for k, v in window.OPTIONAL_FIELDS.items(): + doc_tbl_line(outfile, k, 'o', v) + + genout(outfile, multi_intro) + doc_tbl_head(outfile, 1) + for k, v in multi_register.REQUIRED_FIELDS.items(): + doc_tbl_line(outfile, k, 'r', v) + for k, v in multi_register.OPTIONAL_FIELDS.items(): + doc_tbl_line(outfile, k, 'o', v) + + genout(outfile, doc_tail) diff --git a/utils/reggen/reggen/html_helpers.py b/utils/reggen/reggen/html_helpers.py new file mode 100644 index 0000000..8c828ee --- /dev/null +++ b/utils/reggen/reggen/html_helpers.py @@ -0,0 +1,83 @@ +# Copyright lowRISC contributors. +# Licensed under the Apache License, Version 2.0, see LICENSE for details. +# SPDX-License-Identifier: Apache-2.0 + +import logging as log +import re +from typing import List, Match, Optional, Set + + +def expand_paras(s: str, rnames: Set[str]) -> List[str]: + '''Expand a description field to HTML. + + This supports a sort of simple pseudo-markdown. Supported Markdown + features: + + - Separate paragraphs on a blank line + - **bold** and *italicised* text + - Back-ticks for pre-formatted text + + We also generate links to registers when a name is prefixed with a double + exclamation mark. For example, if there is a register FOO then !!FOO or + !!FOO.field will generate a link to that register. + + Returns a list of rendered paragraphs + + ''' + # Start by splitting into paragraphs. The regex matches a newline followed + # by one or more lines that just contain whitespace. Then render each + # paragraph with the _expand_paragraph worker function. + paras = [_expand_paragraph(paragraph.strip(), rnames) + for paragraph in re.split(r'\n(?:\s*\n)+', s)] + + # There will always be at least one paragraph (splitting an empty string + # gives ['']) + assert paras + return paras + + +def _expand_paragraph(s: str, rnames: Set[str]) -> str: + '''Expand a single paragraph, as described in _get_desc_paras''' + def fieldsub(match: Match[str]) -> str: + base = match.group(1).partition('.')[0].lower() + if base in rnames: + if match.group(1)[-1] == ".": + return ('' + + match.group(1)[:-1] + '.') + else: + return ('' + + match.group(1) + '') + log.warn('!!' + match.group(1).partition('.')[0] + + ' not found in register list.') + return match.group(0) + + # Split out pre-formatted text. Because the call to re.split has a capture + # group in the regex, we get an odd number of results. Elements with even + # indices are "normal text". Those with odd indices are the captured text + # between the back-ticks. + code_split = re.split(r'`([^`]+)`', s) + expanded_parts = [] + + for idx, part in enumerate(code_split): + if idx & 1: + # Text contained in back ticks + expanded_parts.append('{}'.format(part)) + continue + + part = re.sub(r"!!([A-Za-z0-9_.]+)", fieldsub, part) + part = re.sub(r"(?s)\*\*(.+?)\*\*", r'\1', part) + part = re.sub(r"\*([^*]+?)\*", r'\1', part) + expanded_parts.append(part) + + return '

{}

'.format(''.join(expanded_parts)) + + +def render_td(s: str, rnames: Set[str], td_class: Optional[str]) -> str: + '''Expand a description field and put it in a . + + Returns a string. See _get_desc_paras for the format that gets expanded. + + ''' + desc_paras = expand_paras(s, rnames) + class_attr = '' if td_class is None else ' class="{}"'.format(td_class) + return '{}'.format(class_attr, ''.join(desc_paras)) diff --git a/utils/reggen/reggen/inter_signal.py b/utils/reggen/reggen/inter_signal.py new file mode 100644 index 0000000..cf27d51 --- /dev/null +++ b/utils/reggen/reggen/inter_signal.py @@ -0,0 +1,81 @@ +# Copyright lowRISC contributors. +# Licensed under the Apache License, Version 2.0, see LICENSE for details. +# SPDX-License-Identifier: Apache-2.0 + +from typing import Dict, Optional + +from .lib import (check_keys, check_name, + check_str, check_optional_str, check_int) + + +class InterSignal: + def __init__(self, + name: str, + desc: Optional[str], + struct: str, + package: Optional[str], + signal_type: str, + act: str, + width: int, + default: Optional[str]): + assert 0 < width + self.name = name + self.desc = desc + self.struct = struct + self.package = package + self.signal_type = signal_type + self.act = act + self.width = width + self.default = default + + @staticmethod + def from_raw(what: str, raw: object) -> 'InterSignal': + rd = check_keys(raw, what, + ['name', 'struct', 'type', 'act'], + ['desc', 'package', 'width', 'default']) + + name = check_name(rd['name'], 'name field of ' + what) + + r_desc = rd.get('desc') + if r_desc is None: + desc = None + else: + desc = check_str(r_desc, 'desc field of ' + what) + + struct = check_str(rd['struct'], 'struct field of ' + what) + + r_package = rd.get('package') + if r_package is None or r_package == '': + package = None + else: + package = check_name(r_package, 'package field of ' + what) + + signal_type = check_name(rd['type'], 'type field of ' + what) + act = check_name(rd['act'], 'act field of ' + what) + width = check_int(rd.get('width', 1), 'width field of ' + what) + if width <= 0: + raise ValueError('width field of {} is not positive.'.format(what)) + + default = check_optional_str(rd.get('default'), + 'default field of ' + what) + + return InterSignal(name, desc, struct, package, + signal_type, act, width, default) + + def _asdict(self) -> Dict[str, object]: + ret = {'name': self.name} # type: Dict[str, object] + if self.desc is not None: + ret['desc'] = self.desc + ret['struct'] = self.struct + if self.package is not None: + ret['package'] = self.package + ret['type'] = self.signal_type + ret['act'] = self.act + ret['width'] = self.width + if self.default is not None: + ret['default'] = self.default + + return ret + + def as_dict(self) -> Dict[str, object]: + return self._asdict() diff --git a/utils/reggen/reggen/ip_block.py b/utils/reggen/reggen/ip_block.py new file mode 100644 index 0000000..5865d04 --- /dev/null +++ b/utils/reggen/reggen/ip_block.py @@ -0,0 +1,365 @@ +# Copyright lowRISC contributors. +# Licensed under the Apache License, Version 2.0, see LICENSE for details. +# SPDX-License-Identifier: Apache-2.0 + +'''Code representing an IP block for reggen''' + +from typing import Dict, List, Optional, Sequence, Set, Tuple + +import hjson # type: ignore + +from .alert import Alert +from .bus_interfaces import BusInterfaces +from .inter_signal import InterSignal +from .lib import (check_keys, check_name, check_int, check_bool, + check_list, check_optional_str, check_name_list) +from .params import ReggenParams, LocalParam +from .reg_block import RegBlock +from .signal import Signal + + +REQUIRED_FIELDS = { + 'name': ['s', "name of the component"], + 'clock_primary': ['s', "name of the primary clock"], + 'bus_interfaces': ['l', "bus interfaces for the device"], + 'registers': [ + 'l', + "list of register definition groups and " + "offset control groups" + ] +} + +OPTIONAL_FIELDS = { + 'alert_list': ['lnw', "list of peripheral alerts"], + 'available_inout_list': ['lnw', "list of available peripheral inouts"], + 'available_input_list': ['lnw', "list of available peripheral inputs"], + 'available_output_list': ['lnw', "list of available peripheral outputs"], + 'hier_path': [ + None, + 'additional hierarchy path before the reg block instance' + ], + 'interrupt_list': ['lnw', "list of peripheral interrupts"], + 'inter_signal_list': ['l', "list of inter-module signals"], + 'no_auto_alert_regs': [ + 's', "Set to true to suppress automatic " + "generation of alert test registers. " + "Defaults to true if no alert_list is present. " + "Otherwise this defaults to false. " + ], + 'no_auto_intr_regs': [ + 's', "Set to true to suppress automatic " + "generation of interrupt registers. " + "Defaults to true if no interrupt_list is present. " + "Otherwise this defaults to false. " + ], + 'other_clock_list': ['l', "list of other chip clocks needed"], + 'other_reset_list': ['l', "list of other resets"], + 'param_list': ['lp', "list of parameters of the IP"], + 'regwidth': ['d', "width of registers in bits (default 32)"], + 'reset_primary': ['s', "primary reset used by the module"], + 'reset_request_list': ['l', 'list of signals requesting reset'], + 'scan': ['pb', 'Indicates the module have `scanmode_i`'], + 'scan_reset': ['pb', 'Indicates the module have `scan_rst_ni`'], + 'scan_en': ['pb', 'Indicates the module has `scan_en_i`'], + 'SPDX-License-Identifier': [ + 's', "License ientifier (if using pure json) " + "Only use this if unable to put this " + "information in a comment at the top of the " + "file." + ], + 'wakeup_list': ['lnw', "list of peripheral wakeups"] +} + + +class IpBlock: + def __init__(self, + name: str, + regwidth: int, + params: ReggenParams, + reg_blocks: Dict[Optional[str], RegBlock], + interrupts: Sequence[Signal], + no_auto_intr: bool, + alerts: List[Alert], + no_auto_alert: bool, + scan: bool, + inter_signals: List[InterSignal], + bus_interfaces: BusInterfaces, + hier_path: Optional[str], + clock_signals: List[str], + reset_signals: List[str], + xputs: Tuple[Sequence[Signal], + Sequence[Signal], + Sequence[Signal]], + wakeups: Sequence[Signal], + reset_requests: Sequence[Signal], + scan_reset: bool, + scan_en: bool): + assert reg_blocks + assert clock_signals + assert reset_signals + + # Check that register blocks are in bijection with device interfaces + reg_block_names = reg_blocks.keys() + dev_if_names = [] # type: List[Optional[str]] + dev_if_names += bus_interfaces.named_devices + if bus_interfaces.has_unnamed_device: + dev_if_names.append(None) + assert set(reg_block_names) == set(dev_if_names) + + self.name = name + self.regwidth = regwidth + self.reg_blocks = reg_blocks + self.params = params + self.interrupts = interrupts + self.no_auto_intr = no_auto_intr + self.alerts = alerts + self.no_auto_alert = no_auto_alert + self.scan = scan + self.inter_signals = inter_signals + self.bus_interfaces = bus_interfaces + self.hier_path = hier_path + self.clock_signals = clock_signals + self.reset_signals = reset_signals + self.xputs = xputs + self.wakeups = wakeups + self.reset_requests = reset_requests + self.scan_reset = scan_reset + self.scan_en = scan_en + + @staticmethod + def from_raw(param_defaults: List[Tuple[str, str]], + raw: object, + where: str) -> 'IpBlock': + + rd = check_keys(raw, 'block at ' + where, + list(REQUIRED_FIELDS.keys()), + list(OPTIONAL_FIELDS.keys())) + + name = check_name(rd['name'], 'name of block at ' + where) + + what = '{} block at {}'.format(name, where) + + r_regwidth = rd.get('regwidth') + if r_regwidth is None: + regwidth = 32 + else: + regwidth = check_int(r_regwidth, 'regwidth field of ' + what) + if regwidth <= 0: + raise ValueError('Invalid regwidth field for {}: ' + '{} is not positive.' + .format(what, regwidth)) + + params = ReggenParams.from_raw('parameter list for ' + what, + rd.get('param_list', [])) + try: + params.apply_defaults(param_defaults) + except (ValueError, KeyError) as err: + raise ValueError('Failed to apply defaults to params: {}' + .format(err)) from None + + init_block = RegBlock(regwidth, params) + + interrupts = Signal.from_raw_list('interrupt_list for block {}' + .format(name), + rd.get('interrupt_list', [])) + alerts = Alert.from_raw_list('alert_list for block {}' + .format(name), + rd.get('alert_list', [])) + + no_auto_intr = check_bool(rd.get('no_auto_intr_regs', not interrupts), + 'no_auto_intr_regs field of ' + what) + + no_auto_alert = check_bool(rd.get('no_auto_alert_regs', not alerts), + 'no_auto_alert_regs field of ' + what) + + if interrupts and not no_auto_intr: + if interrupts[-1].bits.msb >= regwidth: + raise ValueError("Interrupt list for {} is too wide: " + "msb is {}, which doesn't fit with a " + "regwidth of {}." + .format(what, + interrupts[-1].bits.msb, regwidth)) + init_block.make_intr_regs(interrupts) + + if alerts: + if not no_auto_alert: + if len(alerts) > regwidth: + raise ValueError("Interrupt list for {} is too wide: " + "{} alerts don't fit with a regwidth of {}." + .format(what, len(alerts), regwidth)) + init_block.make_alert_regs(alerts) + + # Generate a NumAlerts parameter + existing_param = params.get('NumAlerts') + if existing_param is not None: + if ((not isinstance(existing_param, LocalParam) or + existing_param.param_type != 'int' or + existing_param.value != str(len(alerts)))): + raise ValueError('Conflicting definition of NumAlerts ' + 'parameter.') + else: + params.add(LocalParam(name='NumAlerts', + desc='Number of alerts', + param_type='int', + value=str(len(alerts)))) + + scan = check_bool(rd.get('scan', False), 'scan field of ' + what) + + reg_blocks = RegBlock.build_blocks(init_block, rd['registers']) + + r_inter_signals = check_list(rd.get('inter_signal_list', []), + 'inter_signal_list field') + inter_signals = [ + InterSignal.from_raw('entry {} of the inter_signal_list field' + .format(idx + 1), + entry) + for idx, entry in enumerate(r_inter_signals) + ] + + bus_interfaces = (BusInterfaces. + from_raw(rd['bus_interfaces'], + 'bus_interfaces field of ' + where)) + inter_signals += bus_interfaces.inter_signals() + + hier_path = check_optional_str(rd.get('hier_path', None), + 'hier_path field of ' + what) + + clock_primary = check_name(rd['clock_primary'], + 'clock_primary field of ' + what) + other_clock_list = check_name_list(rd.get('other_clock_list', []), + 'other_clock_list field of ' + what) + clock_signals = [clock_primary] + other_clock_list + + reset_primary = check_name(rd.get('reset_primary', 'rst_ni'), + 'reset_primary field of ' + what) + other_reset_list = check_name_list(rd.get('other_reset_list', []), + 'other_reset_list field of ' + what) + reset_signals = [reset_primary] + other_reset_list + + xputs = ( + Signal.from_raw_list('available_inout_list for block ' + name, + rd.get('available_inout_list', [])), + Signal.from_raw_list('available_input_list for block ' + name, + rd.get('available_input_list', [])), + Signal.from_raw_list('available_output_list for block ' + name, + rd.get('available_output_list', [])) + ) + wakeups = Signal.from_raw_list('wakeup_list for block ' + name, + rd.get('wakeup_list', [])) + rst_reqs = Signal.from_raw_list('reset_request_list for block ' + name, + rd.get('reset_request_list', [])) + + scan_reset = check_bool(rd.get('scan_reset', False), + 'scan_reset field of ' + what) + + scan_en = check_bool(rd.get('scan_en', False), + 'scan_en field of ' + what) + + # Check that register blocks are in bijection with device interfaces + reg_block_names = reg_blocks.keys() + dev_if_names = [] # type: List[Optional[str]] + dev_if_names += bus_interfaces.named_devices + if bus_interfaces.has_unnamed_device: + dev_if_names.append(None) + if set(reg_block_names) != set(dev_if_names): + raise ValueError("IP block {} defines device interfaces, named {} " + "but its registers don't match (they are keyed " + "by {})." + .format(name, dev_if_names, + list(reg_block_names))) + + return IpBlock(name, regwidth, params, reg_blocks, + interrupts, no_auto_intr, alerts, no_auto_alert, + scan, inter_signals, bus_interfaces, + hier_path, clock_signals, reset_signals, xputs, + wakeups, rst_reqs, scan_reset, scan_en) + + @staticmethod + def from_text(txt: str, + param_defaults: List[Tuple[str, str]], + where: str) -> 'IpBlock': + '''Load an IpBlock from an hjson description in txt''' + return IpBlock.from_raw(param_defaults, + hjson.loads(txt, use_decimal=True), + where) + + @staticmethod + def from_path(path: str, + param_defaults: List[Tuple[str, str]]) -> 'IpBlock': + '''Load an IpBlock from an hjson description in a file at path''' + with open(path, 'r', encoding='utf-8') as handle: + return IpBlock.from_text(handle.read(), param_defaults, + 'file at {!r}'.format(path)) + + def _asdict(self) -> Dict[str, object]: + ret = { + 'name': self.name, + 'regwidth': self.regwidth + } + if len(self.reg_blocks) == 1 and None in self.reg_blocks: + ret['registers'] = self.reg_blocks[None].as_dicts() + else: + ret['registers'] = {k: v.as_dicts() + for k, v in self.reg_blocks.items()} + + ret['param_list'] = self.params.as_dicts() + ret['interrupt_list'] = self.interrupts + ret['no_auto_intr_regs'] = self.no_auto_intr + ret['alert_list'] = self.alerts + ret['no_auto_alert_regs'] = self.no_auto_alert + ret['scan'] = self.scan + ret['inter_signal_list'] = self.inter_signals + ret['bus_interfaces'] = self.bus_interfaces.as_dicts() + + if self.hier_path is not None: + ret['hier_path'] = self.hier_path + + ret['clock_primary'] = self.clock_signals[0] + if len(self.clock_signals) > 1: + ret['other_clock_list'] = self.clock_signals[1:] + + ret['reset_primary'] = self.reset_signals[0] + if len(self.reset_signals) > 1: + ret['other_reset_list'] = self.reset_signals[1:] + + inouts, inputs, outputs = self.xputs + if inouts: + ret['available_inout_list'] = inouts + if inputs: + ret['available_input_list'] = inputs + if outputs: + ret['available_output_list'] = outputs + + if self.wakeups: + ret['wakeup_list'] = self.wakeups + if self.reset_requests: + ret['reset_request_list'] = self.reset_requests + + ret['scan_reset'] = self.scan_reset + ret['scan_en'] = self.scan_en + + return ret + + def get_rnames(self) -> Set[str]: + ret = set() # type: Set[str] + for rb in self.reg_blocks.values(): + ret = ret.union(set(rb.name_to_offset.keys())) + return ret + + def get_signals_as_list_of_dicts(self) -> List[Dict]: + '''Look up and return signal by name''' + result = [] + for iodir, xput in zip(('inout', 'input', 'output'), self.xputs): + for sig in xput: + result.append(sig.as_nwt_dict(iodir)) + return result + + def get_signal_by_name_as_dict(self, name: str) -> Dict: + '''Look up and return signal by name''' + sig_list = self.get_signals_as_list_of_dicts() + for sig in sig_list: + if sig['name'] == name: + return sig + else: + raise ValueError("Signal {} does not exist in IP block {}" + .format(name, self.name)) diff --git a/utils/reggen/reggen/lib.py b/utils/reggen/reggen/lib.py new file mode 100644 index 0000000..d72ef3d --- /dev/null +++ b/utils/reggen/reggen/lib.py @@ -0,0 +1,262 @@ +# Copyright lowRISC contributors. +# Licensed under the Apache License, Version 2.0, see LICENSE for details. +# SPDX-License-Identifier: Apache-2.0 + +'''Parsing support code for reggen''' + +import re +from typing import Dict, List, Optional, cast + + +# Names that are prohibited (used as reserved keywords in systemverilog) +_VERILOG_KEYWORDS = { + 'alias', 'always', 'always_comb', 'always_ff', 'always_latch', 'and', + 'assert', 'assign', 'assume', 'automatic', 'before', 'begin', 'bind', + 'bins', 'binsof', 'bit', 'break', 'buf', 'bufif0', 'bufif1', 'byte', + 'case', 'casex', 'casez', 'cell', 'chandle', 'class', 'clocking', 'cmos', + 'config', 'const', 'constraint', 'context', 'continue', 'cover', + 'covergroup', 'coverpoint', 'cross', 'deassign', 'default', 'defparam', + 'design', 'disable', 'dist', 'do', 'edge', 'else', 'end', 'endcase', + 'endclass', 'endclocking', 'endconfig', 'endfunction', 'endgenerate', + 'endgroup', 'endinterface', 'endmodule', 'endpackage', 'endprimitive', + 'endprogram', 'endproperty', 'endspecify', 'endsequence', 'endtable', + 'endtask', 'enum', 'event', 'expect', 'export', 'extends', 'extern', + 'final', 'first_match', 'for', 'force', 'foreach', 'forever', 'fork', + 'forkjoin', 'function', 'generate', 'genvar', 'highz0', 'highz1', 'if', + 'iff', 'ifnone', 'ignore_bins', 'illegal_bins', 'import', 'incdir', + 'include', 'initial', 'inout', 'input', 'inside', 'instance', 'int', + 'integer', 'interface', 'intersect', 'join', 'join_any', 'join_none', + 'large', 'liblist', 'library', 'local', 'localparam', 'logic', 'longint', + 'macromodule', 'matches', 'medium', 'modport', 'module', 'nand', 'negedge', + 'new', 'nmos', 'nor', 'noshowcancelled', 'not', 'notif0', 'notif1', 'null', + 'or', 'output', 'package', 'packed', 'parameter', 'pmos', 'posedge', + 'primitive', 'priority', 'program', 'property', 'protected', 'pull0', + 'pull1', 'pulldown', 'pullup', 'pulsestyle_onevent', 'pulsestyle_ondetect', + 'pure', 'rand', 'randc', 'randcase', 'randsequence', 'rcmos', 'real', + 'realtime', 'ref', 'reg', 'release', 'repeat', 'return', 'rnmos', 'rpmos', + 'rtran', 'rtranif0', 'rtranif1', 'scalared', 'sequence', 'shortint', + 'shortreal', 'showcancelled', 'signed', 'small', 'solve', 'specify', + 'specparam', 'static', 'string', 'strong0', 'strong1', 'struct', 'super', + 'supply0', 'supply1', 'table', 'tagged', 'task', 'this', 'throughout', + 'time', 'timeprecision', 'timeunit', 'tran', 'tranif0', 'tranif1', 'tri', + 'tri0', 'tri1', 'triand', 'trior', 'trireg', 'type', 'typedef', 'union', + 'unique', 'unsigned', 'use', 'uwire', 'var', 'vectored', 'virtual', 'void', + 'wait', 'wait_order', 'wand', 'weak0', 'weak1', 'while', 'wildcard', + 'wire', 'with', 'within', 'wor', 'xnor', 'xor' +} + + +def check_str_dict(obj: object, what: str) -> Dict[str, object]: + if not isinstance(obj, dict): + raise ValueError("{} is expected to be a dict, but was actually a {}." + .format(what, type(obj).__name__)) + + for key in obj: + if not isinstance(key, str): + raise ValueError('{} has a key {!r}, which is not a string.' + .format(what, key)) + + return cast(Dict[str, object], obj) + + +def check_keys(obj: object, + what: str, + required_keys: List[str], + optional_keys: List[str]) -> Dict[str, object]: + '''Check that obj is a dict object with the expected keys + + If not, raise a ValueError; the what argument names the object. + + ''' + od = check_str_dict(obj, what) + + allowed = set() + missing = [] + for key in required_keys: + assert key not in allowed + allowed.add(key) + if key not in od: + missing.append(key) + + for key in optional_keys: + assert key not in allowed + allowed.add(key) + + unexpected = [] + for key in od: + if key not in allowed: + unexpected.append(key) + + if missing or unexpected: + mstr = ('The following required fields were missing: {}.' + .format(', '.join(missing)) if missing else '') + ustr = ('The following unexpected fields were found: {}.' + .format(', '.join(unexpected)) if unexpected else '') + raise ValueError("{} doesn't have the right keys. {}{}{}" + .format(what, + mstr, + ' ' if mstr and ustr else '', + ustr)) + + return od + + +def check_str(obj: object, what: str) -> str: + '''Check that the given object is a string + + If not, raise a ValueError; the what argument names the object. + + ''' + if not isinstance(obj, str): + raise ValueError('{} is of type {}, not a string.' + .format(what, type(obj).__name__)) + return obj + + +def check_name(obj: object, what: str) -> str: + '''Check that obj is a string that's a valid name. + + If not, raise a ValueError; the what argument names the object. + + ''' + as_str = check_str(obj, what) + + # Allow the usual symbol constituents (alphanumeric plus underscore; no + # leading numbers) + if not re.match(r'[a-zA-Z_][a-zA-Z_0-9]*$', as_str): + raise ValueError("{} is {!r}, which isn't a valid symbol in " + "C / Verilog, so isn't allowed as a name." + .format(what, as_str)) + + # Also check that this isn't a reserved word. + if as_str in _VERILOG_KEYWORDS: + raise ValueError("{} is {!r}, which is a reserved word in " + "SystemVerilog, so isn't allowed as a name." + .format(what, as_str)) + + return as_str + + +def check_bool(obj: object, what: str) -> bool: + '''Check that obj is a bool or a string that parses to a bool. + + If not, raise a ValueError; the what argument names the object. + + ''' + if isinstance(obj, str): + as_bool = { + 'true': True, + 'false': False, + '1': True, + '0': False + }.get(obj.lower()) + if as_bool is None: + raise ValueError('{} is {!r}, which cannot be parsed as a bool.' + .format(what, obj)) + return as_bool + + if obj is True or obj is False: + return obj + + raise ValueError('{} is of type {}, not a bool.' + .format(what, type(obj).__name__)) + + +def check_list(obj: object, what: str) -> List[object]: + '''Check that the given object is a list + + If not, raise a ValueError; the what argument names the object. + + ''' + if not isinstance(obj, list): + raise ValueError('{} is of type {}, not a list.' + .format(what, type(obj).__name__)) + return obj + + +def check_str_list(obj: object, what: str) -> List[str]: + '''Check that the given object is a list of strings + + If not, raise a ValueError; the what argument names the object. + + ''' + lst = check_list(obj, what) + for idx, elt in enumerate(lst): + if not isinstance(elt, str): + raise ValueError('Element {} of {} is of type {}, ' + 'not a string.' + .format(idx, what, type(elt).__name__)) + return cast(List[str], lst) + + +def check_name_list(obj: object, what: str) -> List[str]: + '''Check that the given object is a list of valid names + + If not, raise a ValueError; the what argument names the object. + + ''' + lst = check_list(obj, what) + for idx, elt in enumerate(lst): + check_name(elt, 'Element {} of {}'.format(idx + 1, what)) + + return cast(List[str], lst) + + +def check_int(obj: object, what: str) -> int: + '''Check that obj is an integer or a string that parses to an integer. + + If not, raise a ValueError; the what argument names the object. + + ''' + if isinstance(obj, int): + return obj + + if isinstance(obj, str): + try: + return int(obj, 0) + except ValueError: + raise ValueError('{} is {!r}, which cannot be parsed as an int.' + .format(what, obj)) from None + + raise ValueError('{} is of type {}, not an integer.' + .format(what, type(obj).__name__)) + + +def check_xint(obj: object, what: str) -> Optional[int]: + '''Check that obj is an integer, a string that parses to an integer or "x". + + On success, return an integer value if there is one or None if the value + was 'x'. On failure, raise a ValueError; the what argument names the + object. + + ''' + if isinstance(obj, int): + return obj + + if isinstance(obj, str): + if obj == 'x': + return None + try: + return int(obj, 0) + except ValueError: + raise ValueError('{} is {!r}, which is not "x", ' + 'nor can it be parsed as an int.' + .format(what, obj)) from None + + raise ValueError('{} is of type {}, not an integer.' + .format(what, type(obj).__name__)) + + +def check_optional_str(obj: object, what: str) -> Optional[str]: + '''Check that obj is a string or None''' + return None if obj is None else check_str(obj, what) + + +def get_basename(name: str) -> str: + '''Strip trailing _number (used as multireg suffix) from name''' + # TODO: This is a workaround, should solve this as part of parsing a + # multi-reg. + match = re.search(r'_[0-9]+$', name) + assert match + assert match.start() > 0 + return name[0:match.start()] diff --git a/utils/reggen/reggen/multi_register.py b/utils/reggen/reggen/multi_register.py new file mode 100644 index 0000000..82c8667 --- /dev/null +++ b/utils/reggen/reggen/multi_register.py @@ -0,0 +1,142 @@ +# Copyright lowRISC contributors. +# Licensed under the Apache License, Version 2.0, see LICENSE for details. +# SPDX-License-Identifier: Apache-2.0 + +from typing import Dict, List + +from reggen import register +from .field import Field +from .lib import check_keys, check_str, check_name, check_bool +from .params import ReggenParams +from .reg_base import RegBase +from .register import Register + +REQUIRED_FIELDS = { + 'name': ['s', "base name of the registers"], + 'desc': ['t', "description of the registers"], + 'count': [ + 's', "number of instances to generate." + " This field can be integer or string matching" + " from param_list." + ], + 'cname': [ + 's', "base name for each instance, mostly" + " useful for referring to instance in messages." + ], + 'fields': [ + 'l', "list of register field description" + " groups. Describes bit positions used for" + " base instance." + ] +} +OPTIONAL_FIELDS = register.OPTIONAL_FIELDS.copy() +OPTIONAL_FIELDS.update({ + 'regwen_multi': [ + 'pb', "If true, regwen term increments" + " along with current multireg count." + ], + 'compact': [ + 'pb', "If true, allow multireg compacting." + "If false, do not compact." + ] +}) + + +class MultiRegister(RegBase): + def __init__(self, + offset: int, + addrsep: int, + reg_width: int, + params: ReggenParams, + raw: object): + super().__init__(offset) + + rd = check_keys(raw, 'multireg', + list(REQUIRED_FIELDS.keys()), + list(OPTIONAL_FIELDS.keys())) + + # Now that we've checked the schema of rd, we make a "reg" version of + # it that removes any fields that are allowed by MultiRegister but + # aren't allowed by Register. We'll pass that to the register factory + # method. + reg_allowed_keys = (set(register.REQUIRED_FIELDS.keys()) | + set(register.OPTIONAL_FIELDS.keys())) + reg_rd = {key: value + for key, value in rd.items() + if key in reg_allowed_keys} + self.reg = Register.from_raw(reg_width, offset, params, reg_rd) + + self.cname = check_name(rd['cname'], + 'cname field of multireg {}' + .format(self.reg.name)) + + self.regwen_multi = check_bool(rd.get('regwen_multi', False), + 'regwen_multi field of multireg {}' + .format(self.reg.name)) + + default_compact = True if len(self.reg.fields) == 1 else False + self.compact = check_bool(rd.get('compact', default_compact), + 'compact field of multireg {}' + .format(self.reg.name)) + if self.compact and len(self.reg.fields) > 1: + raise ValueError('Multireg {} sets the compact flag ' + 'but has multiple fields.' + .format(self.reg.name)) + + count_str = check_str(rd['count'], + 'count field of multireg {}' + .format(self.reg.name)) + self.count = params.expand(count_str, + 'count field of multireg ' + self.reg.name) + if self.count <= 0: + raise ValueError("Multireg {} has a count of {}, " + "which isn't positive." + .format(self.reg.name, self.count)) + + # Generate the registers that this multireg expands into. Here, a + # "creg" is a "compacted register", which might contain multiple actual + # registers. + if self.compact: + assert len(self.reg.fields) == 1 + width_per_reg = self.reg.fields[0].bits.msb + 1 + assert width_per_reg <= reg_width + regs_per_creg = reg_width // width_per_reg + else: + regs_per_creg = 1 + + self.regs = [] + creg_count = (self.count + regs_per_creg - 1) // regs_per_creg + for creg_idx in range(creg_count): + min_reg_idx = regs_per_creg * creg_idx + max_reg_idx = min(min_reg_idx + regs_per_creg, self.count) - 1 + creg_offset = offset + creg_idx * addrsep + + reg = self.reg.make_multi(reg_width, + creg_offset, creg_idx, creg_count, + self.regwen_multi, self.compact, + min_reg_idx, max_reg_idx, self.cname) + self.regs.append(reg) + + def next_offset(self, addrsep: int) -> int: + return self.offset + len(self.regs) * addrsep + + def get_n_bits(self, bittype: List[str] = ["q"]) -> int: + return sum(reg.get_n_bits(bittype) for reg in self.regs) + + def get_field_list(self) -> List[Field]: + ret = [] + for reg in self.regs: + ret += reg.get_field_list() + return ret + + def is_homogeneous(self) -> bool: + return self.reg.is_homogeneous() + + def _asdict(self) -> Dict[str, object]: + rd = self.reg._asdict() + rd['count'] = str(self.count) + rd['cname'] = self.cname + rd['regwen_multi'] = str(self.regwen_multi) + rd['compact'] = str(self.compact) + + return {'multireg': rd} diff --git a/utils/reggen/reggen/params.py b/utils/reggen/reggen/params.py new file mode 100644 index 0000000..b7a6adc --- /dev/null +++ b/utils/reggen/reggen/params.py @@ -0,0 +1,341 @@ +# Copyright lowRISC contributors. +# Licensed under the Apache License, Version 2.0, see LICENSE for details. +# SPDX-License-Identifier: Apache-2.0 + +import re +from collections.abc import MutableMapping +from typing import Dict, List, Optional, Tuple + +from .lib import check_keys, check_str, check_int, check_bool, check_list + +REQUIRED_FIELDS = { + 'name': ['s', "name of the item"], +} + +OPTIONAL_FIELDS = { + 'desc': ['s', "description of the item"], + 'type': ['s', "item type. int by default"], + 'default': ['s', "item default value"], + 'local': ['pb', "to be localparam"], + 'expose': ['pb', "to be exposed to top"], + 'randcount': [ + 's', "number of bits to randomize in the parameter. 0 by default." + ], + 'randtype': ['s', "type of randomization to perform. none by default"], +} + + +class BaseParam: + def __init__(self, name: str, desc: Optional[str], param_type: str): + self.name = name + self.desc = desc + self.param_type = param_type + + def apply_default(self, value: str) -> None: + if self.param_type[:3] == 'int': + check_int(value, + 'default value for parameter {} ' + '(which has type {})' + .format(self.name, self.param_type)) + self.default = value + + def as_dict(self) -> Dict[str, object]: + rd = {} # type: Dict[str, object] + rd['name'] = self.name + if self.desc is not None: + rd['desc'] = self.desc + rd['type'] = self.param_type + return rd + + +class LocalParam(BaseParam): + def __init__(self, + name: str, + desc: Optional[str], + param_type: str, + value: str): + super().__init__(name, desc, param_type) + self.value = value + + def expand_value(self, when: str) -> int: + try: + return int(self.value, 0) + except ValueError: + raise ValueError("When {}, the {} value expanded as " + "{}, which doesn't parse as an integer." + .format(when, self.name, self.value)) from None + + def as_dict(self) -> Dict[str, object]: + rd = super().as_dict() + rd['local'] = True + rd['default'] = self.value + return rd + + +class Parameter(BaseParam): + def __init__(self, + name: str, + desc: Optional[str], + param_type: str, + default: str, + expose: bool): + super().__init__(name, desc, param_type) + self.default = default + self.expose = expose + + def as_dict(self) -> Dict[str, object]: + rd = super().as_dict() + rd['default'] = self.default + rd['expose'] = 'true' if self.expose else 'false' + return rd + + +class RandParameter(BaseParam): + def __init__(self, + name: str, + desc: Optional[str], + param_type: str, + randcount: int, + randtype: str): + assert randcount > 0 + assert randtype in ['perm', 'data'] + super().__init__(name, desc, param_type) + self.randcount = randcount + self.randtype = randtype + + def apply_default(self, value: str) -> None: + raise ValueError('Cannot apply a default value of {!r} to ' + 'parameter {}: it is a random netlist constant.' + .format(self.name, value)) + + def as_dict(self) -> Dict[str, object]: + rd = super().as_dict() + rd['randcount'] = self.randcount + rd['randtype'] = self.randtype + return rd + + +def _parse_parameter(where: str, raw: object) -> BaseParam: + rd = check_keys(raw, where, + list(REQUIRED_FIELDS.keys()), + list(OPTIONAL_FIELDS.keys())) + + # TODO: Check if PascalCase or ALL_CAPS + name = check_str(rd['name'], 'name field of ' + where) + + r_desc = rd.get('desc') + if r_desc is None: + desc = None + else: + desc = check_str(r_desc, 'desc field of ' + where) + + # TODO: We should probably check that any register called RndCnstFoo has + # randtype and randcount. + if name.lower().startswith('rndcnst') and 'randtype' in rd: + # This is a random netlist constant and should be parsed as a + # RandParameter. + randtype = check_str(rd.get('randtype', 'none'), + 'randtype field of ' + where) + if randtype not in ['perm', 'data']: + raise ValueError('At {}, parameter {} has a name that implies it ' + 'is a random netlist constant, which means it ' + 'must specify a randtype of "perm" or "data", ' + 'rather than {!r}.' + .format(where, name, randtype)) + + r_randcount = rd.get('randcount') + if r_randcount is None: + raise ValueError('At {}, the random netlist constant {} has no ' + 'randcount field.' + .format(where, name)) + randcount = check_int(r_randcount, 'randcount field of ' + where) + if randcount <= 0: + raise ValueError('At {}, the random netlist constant {} has a ' + 'randcount of {}, which is not positive.' + .format(where, name, randcount)) + + r_type = rd.get('type') + if r_type is None: + raise ValueError('At {}, parameter {} has no type field (which is ' + 'required for random netlist constants).' + .format(where, name)) + param_type = check_str(r_type, 'type field of ' + where) + + local = check_bool(rd.get('local', 'false'), 'local field of ' + where) + if local: + raise ValueError('At {}, the parameter {} specifies local = true, ' + 'meaning that it is a localparam. This is ' + 'incompatible with being a random netlist ' + 'constant (how would it be set?)' + .format(where, name)) + + r_default = rd.get('default') + if r_default is not None: + raise ValueError('At {}, the parameter {} specifies a value for ' + 'the "default" field. This is incompatible with ' + 'being a random netlist constant: the value will ' + 'be set by the random generator.' + .format(where, name)) + + expose = check_bool(rd.get('expose', 'false'), + 'expose field of ' + where) + if expose: + raise ValueError('At {}, the parameter {} specifies expose = ' + 'true, meaning that the parameter is exposed to ' + 'the top-level. This is incompatible with being ' + 'a random netlist constant.' + .format(where, name)) + + return RandParameter(name, desc, param_type, randcount, randtype) + + # This doesn't have a name like a random netlist constant. Check that it + # doesn't define randcount or randtype. + for fld in ['randcount', 'randtype']: + if fld in rd: + raise ValueError("At {where}, the parameter {name} specifies " + "{fld} but the name doesn't look like a random " + "netlist constant. To use {fld}, prefix the name " + "with RndCnst." + .format(where=where, name=name, fld=fld)) + + r_type = rd.get('type') + if r_type is None: + param_type = 'int' + else: + param_type = check_str(r_type, 'type field of ' + where) + + local = check_bool(rd.get('local', 'true'), 'local field of ' + where) + expose = check_bool(rd.get('expose', 'false'), 'expose field of ' + where) + + r_default = rd.get('default') + if r_default is None: + raise ValueError('At {}, the {} param has no default field.' + .format(where, name)) + else: + default = check_str(r_default, 'default field of ' + where) + if param_type[:3] == 'int': + check_int(default, + 'default field of {}, (an integer parameter)' + .format(name)) + + if local: + if expose: + raise ValueError('At {}, the localparam {} cannot be exposed to ' + 'the top-level.' + .format(where, name)) + return LocalParam(name, desc, param_type, value=default) + else: + return Parameter(name, desc, param_type, default, expose) + + +class Params(MutableMapping): + def __init__(self) -> None: + self.by_name = {} # type: Dict[str, BaseParam] + + def __getitem__(self, key): + return self.by_name[key] + + def __delitem__(self, key): + del self.by_name[key] + + def __setitem__(self, key, value): + self.by_name[key] = value + + def __iter__(self): + return iter(self.by_name) + + def __len__(self): + return len(self.by_name) + + def __repr__(self): + return f"{type(self).__name__}({self.by_name})" + + def add(self, param: BaseParam) -> None: + assert param.name not in self.by_name + self.by_name[param.name] = param + + def apply_defaults(self, defaults: List[Tuple[str, str]]) -> None: + for idx, (key, value) in enumerate(defaults): + param = self.by_name[key] + if param is None: + raise KeyError('Cannot find parameter ' + '{} to set default value.' + .format(key)) + + param.apply_default(value) + + def _expand_one(self, value: str, when: str) -> int: + # Check whether value is already an integer: if so, return that. + try: + return int(value, 0) + except ValueError: + pass + + param = self.by_name.get(value) + if param is None: + raise ValueError('Cannot find a parameter called {} when {}. ' + 'Known parameters: {}.' + .format(value, + when, + ', '.join(self.by_name.keys()))) + + # Only allow localparams in the expansion (because otherwise we're at + # the mercy of whatever instantiates the block). + if not isinstance(param, LocalParam): + raise ValueError("When {}, {} is a not a local parameter." + .format(when, value)) + + return param.expand_value(when) + + def expand(self, value: str, where: str) -> int: + # Here, we want to support arithmetic expressions with + and -. We + # don't support other operators, or parentheses (so can parse with just + # a regex). + # + # Use re.split, capturing the operators. This turns e.g. "a + b-c" into + # ['a ', '+', ' b', '-', 'c']. If there's a leading operator ("+a"), + # the first element of the results is an empty string. This means + # elements with odd positions are always operators and elements with + # even positions are values. + acc = 0 + is_neg = False + + for idx, tok in enumerate(re.split(r'([+-])', value)): + if idx == 0 and not tok: + continue + if idx % 2: + is_neg = (tok == '-') + continue + + term = self._expand_one(tok.strip(), + 'expanding term {} of {}' + .format(idx // 2, where)) + acc += -term if is_neg else term + + return acc + + def as_dicts(self) -> List[Dict[str, object]]: + return [p.as_dict() for p in self.by_name.values()] + + +class ReggenParams(Params): + @staticmethod + def from_raw(where: str, raw: object) -> 'ReggenParams': + ret = ReggenParams() + rl = check_list(raw, where) + for idx, r_param in enumerate(rl): + entry_where = 'entry {} in {}'.format(idx + 1, where) + param = _parse_parameter(entry_where, r_param) + if param.name in ret: + raise ValueError('At {}, found a duplicate parameter with ' + 'name {}.' + .format(entry_where, param.name)) + ret.add(param) + return ret + + def get_localparams(self) -> List[LocalParam]: + ret = [] + for param in self.by_name.values(): + if isinstance(param, LocalParam): + ret.append(param) + return ret diff --git a/utils/reggen/reggen/reg_base.py b/utils/reggen/reggen/reg_base.py new file mode 100644 index 0000000..eb88b46 --- /dev/null +++ b/utils/reggen/reggen/reg_base.py @@ -0,0 +1,45 @@ +# Copyright lowRISC contributors. +# Licensed under the Apache License, Version 2.0, see LICENSE for details. +# SPDX-License-Identifier: Apache-2.0 + +from typing import List + +from .field import Field + + +class RegBase: + '''An abstract class inherited by Register and MultiRegister + + This represents a block of one or more registers with a base address. + + ''' + def __init__(self, offset: int): + self.offset = offset + + def get_n_bits(self, bittype: List[str]) -> int: + '''Get the size of this register / these registers in bits + + See Field.get_n_bits() for the precise meaning of bittype. + + ''' + raise NotImplementedError() + + def get_field_list(self) -> List[Field]: + '''Get an ordered list of the fields in the register(s) + + Registers are ordered from low to high address. Within a register, + fields are ordered as Register.fields: from LSB to MSB. + + ''' + raise NotImplementedError() + + def is_homogeneous(self) -> bool: + '''True if every field in the block is identical + + For a single register, this is true if it only has one field. For a + multireg, it is true if the generating register has just one field. + Note that if the compact flag is set, the generated registers might + have multiple (replicated) fields. + + ''' + raise NotImplementedError() diff --git a/utils/reggen/reggen/reg_block.py b/utils/reggen/reggen/reg_block.py new file mode 100644 index 0000000..30a4f74 --- /dev/null +++ b/utils/reggen/reggen/reg_block.py @@ -0,0 +1,431 @@ +# Copyright lowRISC contributors. +# Licensed under the Apache License, Version 2.0, see LICENSE for details. +# SPDX-License-Identifier: Apache-2.0 + +'''Code representing the registers, windows etc. for a block''' + +import re +from typing import Callable, Dict, List, Optional, Sequence, Union + +from .alert import Alert +from .access import SWAccess, HWAccess +from .field import Field +from .signal import Signal +from .lib import check_int, check_list, check_str_dict, check_str +from .multi_register import MultiRegister +from .params import ReggenParams +from .register import Register +from .window import Window + + +class RegBlock: + def __init__(self, reg_width: int, params: ReggenParams): + + self._addrsep = (reg_width + 7) // 8 + self._reg_width = reg_width + self._params = params + + self.offset = 0 + self.multiregs = [] # type: List[MultiRegister] + self.registers = [] # type: List[Register] + self.windows = [] # type: List[Window] + + # Boolean indication whether ANY window in regblock has data integrity passthrough + self.has_data_intg_passthru = False + + # A list of all registers, expanding multiregs, ordered by offset + self.flat_regs = [] # type: List[Register] + + # A list of registers and multiregisters (unexpanded) + self.all_regs = [] # type: List[Union[Register, MultiRegister]] + + # A list with everything in order + self.entries = [] # type: List[object] + + # A dict of named entries, mapping name to offset + self.name_to_offset = {} # type: Dict[str, int] + + # A dict of all registers (expanding multiregs), mapping name to the + # register object + self.name_to_flat_reg = {} # type: Dict[str, Register] + + # A list of all write enable names + self.wennames = [] # type: List[str] + + @staticmethod + def build_blocks(block: 'RegBlock', + raw: object) -> Dict[Optional[str], 'RegBlock']: + '''Build a dictionary of blocks for a 'registers' field in the hjson + + There are two different syntaxes we might see here. The simple syntax + just consists of a list of entries (register, multireg, window, + skipto). If we see that, each entry gets added to init_block and then + we return {None: init_block}. + + The more complicated syntax is a dictionary. This parses from hjson as + an OrderedDict which we walk in document order. Entries from the first + key/value pair in the dictionary will be added to init_block. Later + key/value pairs start empty RegBlocks. The return value is a dictionary + mapping the keys we saw to their respective RegBlocks. + + ''' + if isinstance(raw, list): + # This is the simple syntax + block.add_raw_registers(raw, 'registers field at top-level') + return {None: block} + + # This is the more complicated syntax + if not isinstance(raw, dict): + raise ValueError('registers field at top-level is ' + 'neither a list or a dictionary.') + + ret = {} # type: Dict[Optional[str], RegBlock] + for idx, (r_key, r_val) in enumerate(raw.items()): + if idx > 0: + block = RegBlock(block._reg_width, block._params) + + rb_key = check_str(r_key, + 'the key for item {} of ' + 'the registers dictionary at top-level' + .format(idx + 1)) + rb_val = check_list(r_val, + 'the value for item {} of ' + 'the registers dictionary at top-level' + .format(idx + 1)) + + block.add_raw_registers(rb_val, + 'item {} of the registers ' + 'dictionary at top-level' + .format(idx + 1)) + block.validate() + + assert rb_key not in ret + ret[rb_key] = block + + return ret + + def add_raw_registers(self, raw: object, what: str) -> None: + rl = check_list(raw, 'registers field at top-level') + for entry_idx, entry_raw in enumerate(rl): + where = ('entry {} of the top-level registers field' + .format(entry_idx + 1)) + self.add_raw(where, entry_raw) + + def add_raw(self, where: str, raw: object) -> None: + entry = check_str_dict(raw, where) + + handlers = { + 'register': self._handle_register, + 'reserved': self._handle_reserved, + 'skipto': self._handle_skipto, + 'window': self._handle_window, + 'multireg': self._handle_multireg + } + + entry_type = 'register' + entry_body = entry # type: object + + for t in ['reserved', 'skipto', 'window', 'multireg']: + t_body = entry.get(t) + if t_body is not None: + # Special entries look like { window: { ... } }, so if we + # get a hit, this should be the only key in entry. Note + # that this also checks that nothing has more than one + # entry type. + if len(entry) != 1: + other_keys = [k for k in entry if k != t] + assert other_keys + raise ValueError('At offset {:#x}, {} has key {}, which ' + 'should give its type. But it also has ' + 'other keys too: {}.' + .format(self.offset, + where, t, ', '.join(other_keys))) + entry_type = t + entry_body = t_body + + entry_where = ('At offset {:#x}, {}, type {!r}' + .format(self.offset, where, entry_type)) + + handlers[entry_type](entry_where, entry_body) + + def _handle_register(self, where: str, body: object) -> None: + reg = Register.from_raw(self._reg_width, + self.offset, self._params, body) + self.add_register(reg) + + def _handle_reserved(self, where: str, body: object) -> None: + nreserved = check_int(body, 'body of ' + where) + if nreserved <= 0: + raise ValueError('Reserved count in {} is {}, ' + 'which is not positive.' + .format(where, nreserved)) + + self.offset += self._addrsep * nreserved + + def _handle_skipto(self, where: str, body: object) -> None: + skipto = check_int(body, 'body of ' + where) + if skipto < self.offset: + raise ValueError('Destination of skipto in {} is {:#x}, ' + 'is less than the current offset, {:#x}.' + .format(where, skipto, self.offset)) + if skipto % self._addrsep: + raise ValueError('Destination of skipto in {} is {:#x}, ' + 'not a multiple of addrsep, {:#x}.' + .format(where, skipto, self._addrsep)) + self.offset = skipto + + def _handle_window(self, where: str, body: object) -> None: + window = Window.from_raw(self.offset, + self._reg_width, self._params, body) + if window.name is not None: + lname = window.name.lower() + if lname in self.name_to_offset: + raise ValueError('Window {} (at offset {:#x}) has the ' + 'same name as something at offset {:#x}.' + .format(window.name, window.offset, + self.name_to_offset[lname])) + self.add_window(window) + + def _handle_multireg(self, where: str, body: object) -> None: + mr = MultiRegister(self.offset, + self._addrsep, self._reg_width, self._params, body) + for reg in mr.regs: + lname = reg.name.lower() + if lname in self.name_to_offset: + raise ValueError('Multiregister {} (at offset {:#x}) expands ' + 'to a register with name {} (at offset ' + '{:#x}), but this already names something at ' + 'offset {:#x}.' + .format(mr.reg.name, mr.reg.offset, + reg.name, reg.offset, + self.name_to_offset[lname])) + self._add_flat_reg(reg) + self.name_to_offset[lname] = reg.offset + + self.multiregs.append(mr) + self.all_regs.append(mr) + self.entries.append(mr) + self.offset = mr.next_offset(self._addrsep) + + def add_register(self, reg: Register) -> None: + assert reg.offset == self.offset + + lname = reg.name.lower() + if lname in self.name_to_offset: + raise ValueError('Register {} (at offset {:#x}) has the same ' + 'name as something at offset {:#x}.' + .format(reg.name, reg.offset, + self.name_to_offset[lname])) + self._add_flat_reg(reg) + self.name_to_offset[lname] = reg.offset + + self.registers.append(reg) + self.all_regs.append(reg) + self.entries.append(reg) + self.offset = reg.next_offset(self._addrsep) + + if reg.regwen is not None and reg.regwen not in self.wennames: + self.wennames.append(reg.regwen) + + def _add_flat_reg(self, reg: Register) -> None: + # The first assertion is checked at the call site (where we can print + # out a nicer message for multiregs). The second assertion should be + # implied by the first. + assert reg.name not in self.name_to_offset + assert reg.name not in self.name_to_flat_reg + + self.flat_regs.append(reg) + self.name_to_flat_reg[reg.name.lower()] = reg + + def add_window(self, window: Window) -> None: + if window.name is not None: + lname = window.name.lower() + assert lname not in self.name_to_offset + self.name_to_offset[lname] = window.offset + + self.windows.append(window) + self.entries.append(window) + assert self.offset <= window.offset + self.offset = window.next_offset(self._addrsep) + + self.has_data_intg_passthru |= window.data_intg_passthru + + def validate(self) -> None: + '''Run this to check consistency after all registers have been added''' + + # Check that every write-enable register has a good name, a valid reset + # value, and valid access permissions. + for wenname in self.wennames: + # check the REGWEN naming convention + if re.fullmatch(r'(.+_)*REGWEN(_[0-9]+)?', wenname) is None: + raise ValueError("Regwen name {} must have the suffix '_REGWEN'" + .format(wenname)) + + wen_reg = self.name_to_flat_reg.get(wenname.lower()) + if wen_reg is None: + raise ValueError('One or more registers use {} as a ' + 'write-enable, but there is no such register.' + .format(wenname)) + + # If the REGWEN bit is SW controlled, check that the register + # defaults to enabled. If this bit is read-only by SW and hence + # hardware controlled, we do not enforce this requirement. + if wen_reg.swaccess.key != "ro" and not wen_reg.resval: + raise ValueError('One or more registers use {} as a ' + 'write-enable. Since it is SW-controlled ' + 'it should have a nonzero reset value.' + .format(wenname)) + + if wen_reg.swaccess.key == "rw0c": + # The register is software managed: all good! + continue + + if wen_reg.swaccess.key == "ro" and wen_reg.hwaccess.key == "hwo": + # The register is hardware managed: that's fine too. + continue + + raise ValueError('One or more registers use {} as a write-enable. ' + 'However, it has invalid access permissions ' + '({} / {}). It should either have swaccess=RW0C ' + 'or have swaccess=RO and hwaccess=HWO.' + .format(wenname, + wen_reg.swaccess.key, + wen_reg.hwaccess.key)) + + def get_n_bits(self, bittype: List[str] = ["q"]) -> int: + '''Returns number of bits in registers in this block. + + This includes those expanded from multiregs. See Field.get_n_bits for a + description of the bittype argument. + + ''' + return sum(reg.get_n_bits(bittype) for reg in self.flat_regs) + + def as_dicts(self) -> List[object]: + entries = [] # type: List[object] + offset = 0 + for entry in self.entries: + assert (isinstance(entry, Register) or + isinstance(entry, MultiRegister) or + isinstance(entry, Window)) + + next_off = entry.offset + assert offset <= next_off + res_bytes = next_off - offset + if res_bytes: + assert res_bytes % self._addrsep == 0 + entries.append({'reserved': res_bytes // self._addrsep}) + + entries.append(entry) + offset = entry.next_offset(self._addrsep) + + return entries + + _FieldFormatter = Callable[[bool, str], str] + + def _add_intr_alert_reg(self, + signals: Sequence[Signal], + reg_name: str, + reg_desc: str, + field_desc_fmt: Optional[Union[str, _FieldFormatter]], + swaccess: str, + hwaccess: str, + is_testreg: bool, + reg_tags: List[str]) -> None: + swaccess_obj = SWAccess('RegBlock._make_intr_alert_reg()', swaccess) + hwaccess_obj = HWAccess('RegBlock._make_intr_alert_reg()', hwaccess) + + fields = [] + for signal in signals: + if field_desc_fmt is None: + field_desc = signal.desc + elif isinstance(field_desc_fmt, str): + field_desc = field_desc_fmt + else: + width = signal.bits.width() + field_desc = field_desc_fmt(width > 1, signal.name) + + fields.append(Field(signal.name, + field_desc or signal.desc, + tags=[], + swaccess=swaccess_obj, + hwaccess=hwaccess_obj, + hwqe=is_testreg, + hwre=False, + bits=signal.bits, + resval=0, + enum=None)) + + reg = Register(self.offset, + reg_name, + reg_desc, + swaccess_obj, + hwaccess_obj, + hwext=is_testreg, + hwqe=is_testreg, + hwre=False, + regwen=None, + tags=reg_tags, + resval=None, + shadowed=False, + fields=fields, + update_err_alert=None, + storage_err_alert=None) + self.add_register(reg) + + def make_intr_regs(self, interrupts: Sequence[Signal]) -> None: + assert interrupts + assert interrupts[-1].bits.msb < self._reg_width + + self._add_intr_alert_reg(interrupts, + 'INTR_STATE', + 'Interrupt State Register', + None, + 'rw1c', + 'hrw', + False, + # intr_state csr is affected by writes to + # other csrs - skip write-check + ["excl:CsrNonInitTests:CsrExclWriteCheck"]) + self._add_intr_alert_reg(interrupts, + 'INTR_ENABLE', + 'Interrupt Enable Register', + lambda w, n: ('Enable interrupt when ' + '{}!!INTR_STATE.{} is set.' + .format('corresponding bit in ' + if w else '', + n)), + 'rw', + 'hro', + False, + []) + self._add_intr_alert_reg(interrupts, + 'INTR_TEST', + 'Interrupt Test Register', + lambda w, n: ('Write 1 to force ' + '{}!!INTR_STATE.{} to 1.' + .format('corresponding bit in ' + if w else '', + n)), + 'wo', + 'hro', + True, + # intr_test csr is WO so reads back 0s + ["excl:CsrNonInitTests:CsrExclWrite"]) + + def make_alert_regs(self, alerts: List[Alert]) -> None: + assert alerts + assert len(alerts) < self._reg_width + self._add_intr_alert_reg(alerts, + 'ALERT_TEST', + 'Alert Test Register', + ('Write 1 to trigger ' + 'one alert event of this kind.'), + 'wo', + 'hro', + True, + []) + + def get_addr_width(self) -> int: + '''Calculate the number of bits to address every byte of the block''' + return (self.offset - 1).bit_length() diff --git a/utils/reggen/reggen/reg_html.css b/utils/reggen/reggen/reg_html.css new file mode 100644 index 0000000..4cb48ed --- /dev/null +++ b/utils/reggen/reggen/reg_html.css @@ -0,0 +1,74 @@ +/* Stylesheet for reggen HTML register output */ +/* Copyright lowRISC contributors. */ +/* Licensed under the Apache License, Version 2.0, see LICENSE for details. */ +/* SPDX-License-Identifier: Apache-2.0 */ + +table.regpic { + width: 95%; + border-collapse: collapse; + margin-left:auto; + margin-right:auto; + table-layout:fixed; +} + +table.regdef { + border: 1px solid black; + width: 80%; + border-collapse: collapse; + margin-left:auto; + margin-right:auto; + table-layout:auto; +} + +table.regdef th { + border: 1px solid black; + font-family: sans-serif; + +} + +td.bitnum { + font-size: 60%; + text-align: center; +} + +td.unused { + border: 1px solid black; + background-color: gray; +} + +td.fname { + border: 1px solid black; + text-align: center; + font-family: sans-serif; +} + + +td.regbits, td.regperm, td.regrv { + border: 1px solid black; + text-align: center; + font-family: sans-serif; +} + +td.regde, td.regfn { + border: 1px solid black; +} + +table.cfgtable { + border: 1px solid black; + width: 80%; + border-collapse: collapse; + margin-left:auto; + margin-right:auto; + table-layout:auto; +} + +table.cfgtable th { + border: 1px solid black; + font-family: sans-serif; + font-weight: bold; +} + +table.cfgtable td { + border: 1px solid black; + font-family: sans-serif; +} diff --git a/utils/reggen/reggen/reg_pkg.sv.tpl b/utils/reggen/reggen/reg_pkg.sv.tpl new file mode 100644 index 0000000..1c5520a --- /dev/null +++ b/utils/reggen/reggen/reg_pkg.sv.tpl @@ -0,0 +1,347 @@ +// Copyright lowRISC contributors. +// Licensed under the Apache License, Version 2.0, see LICENSE for details. +// SPDX-License-Identifier: Apache-2.0 +// +// Register Package auto-generated by `reggen` containing data structure +<% + from topgen import lib # TODO: Split lib to common lib module + + from reggen.access import HwAccess, SwRdAccess, SwWrAccess + from reggen.register import Register + from reggen.multi_register import MultiRegister + + from reggen import gen_rtl + + localparams = block.params.get_localparams() + + addr_widths = gen_rtl.get_addr_widths(block) + + lblock = block.name.lower() + ublock = lblock.upper() + + def reg_pfx(reg): + return '{}_{}'.format(ublock, reg.name.upper()) + + def reg_resname(reg): + return '{}_RESVAL'.format(reg_pfx(reg)) + + def field_resname(reg, field): + return '{}_{}_RESVAL'.format(reg_pfx(reg), field.name.upper()) + +%>\ +<%def name="typedefs_for_iface(iface_name, iface_desc, for_iface, rb)">\ +<% + hdr = gen_rtl.make_box_quote('Typedefs for registers' + for_iface) +%>\ +% for r in rb.all_regs: + % if r.get_n_bits(["q"]): + % if hdr: + +${hdr} + % endif +<% + r0 = gen_rtl.get_r0(r) + hdr = None +%>\ + + typedef struct packed { + % if r.is_homogeneous(): + ## If we have a homogeneous register or multireg, there is just one field + ## (possibly replicated many times). The typedef is for one copy of that + ## field. +<% + field = r.get_field_list()[0] + field_q_width = field.get_n_bits(r0.hwext, ['q']) + field_q_bits = lib.bitarray(field_q_width, 2) +%>\ + logic ${field_q_bits} q; + % if field.hwqe: + logic qe; + % endif + % if field.hwre or (r0.shadowed and r0.hwext): + logic re; + % endif + % if r0.shadowed and not r0.hwext: + logic err_update; + logic err_storage; + % endif + % else: + ## We are inhomogeneous, which means there is more than one different + ## field. Generate a reg2hw typedef that packs together all the fields of + ## the register. + % for f in r0.fields: + % if f.get_n_bits(r0.hwext, ["q"]) >= 1: +<% + field_q_width = f.get_n_bits(r0.hwext, ['q']) + field_q_bits = lib.bitarray(field_q_width, 2) + + struct_name = f.name.lower() +%>\ + struct packed { + logic ${field_q_bits} q; + % if f.hwqe: + logic qe; + % endif + % if f.hwre or (r0.shadowed and r0.hwext): + logic re; + % endif + % if r0.shadowed and not r0.hwext: + logic err_update; + logic err_storage; + % endif + } ${struct_name}; + %endif + %endfor + %endif + } ${gen_rtl.get_reg_tx_type(block, r, False)}; + %endif +% endfor +% for r in rb.all_regs: + % if r.get_n_bits(["d"]): + % if hdr: + +${hdr} + % endif +<% + r0 = gen_rtl.get_r0(r) + hdr = None +%>\ + + typedef struct packed { + % if r.is_homogeneous(): + ## If we have a homogeneous register or multireg, there is just one field + ## (possibly replicated many times). The typedef is for one copy of that + ## field. +<% + field = r.get_field_list()[0] + field_d_width = field.get_n_bits(r0.hwext, ['d']) + field_d_bits = lib.bitarray(field_d_width, 2) +%>\ + logic ${field_d_bits} d; + % if not r0.hwext: + logic de; + % endif + % else: + ## We are inhomogeneous, which means there is more than one different + ## field. Generate a hw2reg typedef that packs together all the fields of + ## the register. + % for f in r0.fields: + % if f.get_n_bits(r0.hwext, ["d"]) >= 1: +<% + field_d_width = f.get_n_bits(r0.hwext, ['d']) + field_d_bits = lib.bitarray(field_d_width, 2) + + struct_name = f.name.lower() +%>\ + struct packed { + logic ${field_d_bits} d; + % if not r0.hwext: + logic de; + % endif + } ${struct_name}; + %endif + %endfor + %endif + } ${gen_rtl.get_reg_tx_type(block, r, True)}; + % endif +% endfor +\ +<%def name="reg2hw_for_iface(iface_name, iface_desc, for_iface, rb)">\ +<% +nbits = rb.get_n_bits(["q", "qe", "re"]) +packbit = 0 +%>\ +% if nbits > 0: + + // Register -> HW type${for_iface} + typedef struct packed { +% for r in rb.all_regs: + % if r.get_n_bits(["q"]): +<% + r0 = gen_rtl.get_r0(r) + struct_type = gen_rtl.get_reg_tx_type(block, r, False) + struct_width = r0.get_n_bits(['q', 'qe', 're']) + + if isinstance(r, MultiRegister): + struct_type += " [{}:0]".format(r.count - 1) + struct_width *= r.count + + msb = nbits - packbit - 1 + lsb = msb - struct_width + 1 + packbit += struct_width +%>\ + ${struct_type} ${r0.name.lower()}; // [${msb}:${lsb}] + % endif +% endfor + } ${gen_rtl.get_iface_tx_type(block, iface_name, False)}; +% endif +\ +<%def name="hw2reg_for_iface(iface_name, iface_desc, for_iface, rb)">\ +<% +nbits = rb.get_n_bits(["d", "de"]) +packbit = 0 +%>\ +% if nbits > 0: + + // HW -> register type${for_iface} + typedef struct packed { +% for r in rb.all_regs: + % if r.get_n_bits(["d"]): +<% + r0 = gen_rtl.get_r0(r) + struct_type = gen_rtl.get_reg_tx_type(block, r, True) + struct_width = r0.get_n_bits(['d', 'de']) + + if isinstance(r, MultiRegister): + struct_type += " [{}:0]".format(r.count - 1) + struct_width *= r.count + + msb = nbits - packbit - 1 + lsb = msb - struct_width + 1 + packbit += struct_width +%>\ + ${struct_type} ${r0.name.lower()}; // [${msb}:${lsb}] + % endif +% endfor + } ${gen_rtl.get_iface_tx_type(block, iface_name, True)}; +% endif +\ +<%def name="offsets_for_iface(iface_name, iface_desc, for_iface, rb)">\ +% if not rb.flat_regs: +<% return STOP_RENDERING %> +% endif + + // Register offsets${for_iface} +<% +aw_name, aw = addr_widths[iface_name] +%>\ +% for r in rb.flat_regs: +<% +value = "{}'h {:x}".format(aw, r.offset) +%>\ + parameter logic [${aw_name}-1:0] ${reg_pfx(r)}_OFFSET = ${value}; +% endfor +\ +<%def name="hwext_resvals_for_iface(iface_name, iface_desc, for_iface, rb)">\ +<% + hwext_regs = [r for r in rb.flat_regs if r.hwext] +%>\ +% if hwext_regs: + + // Reset values for hwext registers and their fields${for_iface} + % for reg in hwext_regs: +<% + reg_width = reg.get_width() + reg_msb = reg_width - 1 + reg_resval = "{}'h {:x}".format(reg_width, reg.resval) +%>\ + parameter logic [${reg_msb}:0] ${reg_resname(reg)} = ${reg_resval}; + % for field in reg.fields: + % if field.resval is not None: +<% + field_width = field.bits.width() + field_msb = field_width - 1 + field_resval = "{}'h {:x}".format(field_width, field.resval) +%>\ + parameter logic [${field_msb}:0] ${field_resname(reg, field)} = ${field_resval}; + % endif + % endfor + % endfor +% endif +\ +<%def name="windows_for_iface(iface_name, iface_desc, for_iface, rb)">\ +% if rb.windows: +<% + aw_name, aw = addr_widths[iface_name] +%>\ + + // Window parameters${for_iface} +% for i,w in enumerate(rb.windows): +<% + win_pfx = '{}_{}'.format(ublock, w.name.upper()) + base_txt_val = "{}'h {:x}".format(aw, w.offset) + size_txt_val = "'h {:x}".format(w.size_in_bytes) + + offset_type = 'logic [{}-1:0]'.format(aw_name) + size_type = 'int unsigned' + max_type_len = max(len(offset_type), len(size_type)) + + offset_type += ' ' * (max_type_len - len(offset_type)) + size_type += ' ' * (max_type_len - len(size_type)) + +%>\ + parameter ${offset_type} ${win_pfx}_OFFSET = ${base_txt_val}; + parameter ${size_type} ${win_pfx}_SIZE = ${size_txt_val}; +% endfor +% endif +\ +<%def name="reg_data_for_iface(iface_name, iface_desc, for_iface, rb)">\ +% if rb.flat_regs: +<% + lpfx = gen_rtl.get_type_name_pfx(block, iface_name) + upfx = lpfx.upper() + idx_len = len("{}".format(len(rb.flat_regs) - 1)) +%>\ + + // Register index${for_iface} + typedef enum int { +% for r in rb.flat_regs: + ${ublock}_${r.name.upper()}${"" if loop.last else ","} +% endfor + } ${lpfx}_id_e; + + // Register width information to check illegal writes${for_iface} + parameter logic [3:0] ${upfx}_PERMIT [${len(rb.flat_regs)}] = '{ + % for i, r in enumerate(rb.flat_regs): +<% + index_str = "{}".format(i).rjust(idx_len) + width = r.get_width() + if width > 24: + mask = '1111' + elif width > 16: + mask = '0111' + elif width > 8: + mask = '0011' + else: + mask = '0001' + + comma = ',' if i < len(rb.flat_regs) - 1 else ' ' +%>\ + 4'b ${mask}${comma} // index[${index_str}] ${ublock}_${r.name.upper()} + % endfor + }; +% endif +\ + +package ${lblock}_reg_pkg; +% if localparams: + + // Param list +% for param in localparams: + parameter ${param.param_type} ${param.name} = ${param.value}; +% endfor +% endif + + // Address widths within the block +% for param_name, width in addr_widths.values(): + parameter int ${param_name} = ${width}; +% endfor +<% + just_default = len(block.reg_blocks) == 1 and None in block.reg_blocks +%>\ +% for iface_name, rb in block.reg_blocks.items(): +<% + iface_desc = iface_name or 'default' + for_iface = '' if just_default else ' for {} interface'.format(iface_desc) +%>\ +${typedefs_for_iface(iface_name, iface_desc, for_iface, rb)}\ +${reg2hw_for_iface(iface_name, iface_desc, for_iface, rb)}\ +${hw2reg_for_iface(iface_name, iface_desc, for_iface, rb)}\ +${offsets_for_iface(iface_name, iface_desc, for_iface, rb)}\ +${hwext_resvals_for_iface(iface_name, iface_desc, for_iface, rb)}\ +${windows_for_iface(iface_name, iface_desc, for_iface, rb)}\ +${reg_data_for_iface(iface_name, iface_desc, for_iface, rb)}\ +% endfor + +endpackage + diff --git a/utils/reggen/reggen/reg_top.sv.tpl b/utils/reggen/reggen/reg_top.sv.tpl new file mode 100644 index 0000000..bfab87f --- /dev/null +++ b/utils/reggen/reggen/reg_top.sv.tpl @@ -0,0 +1,795 @@ +// Copyright lowRISC contributors. +// Licensed under the Apache License, Version 2.0, see LICENSE for details. +// SPDX-License-Identifier: Apache-2.0 +// +// Register Top module auto-generated by `reggen` +<% + from reggen import gen_rtl + from reggen.access import HwAccess, SwRdAccess, SwWrAccess + from reggen.lib import get_basename + from reggen.register import Register + from reggen.multi_register import MultiRegister + from reggen.ip_block import IpBlock + from reggen.bus_interfaces import BusProtocol + + num_wins = len(rb.windows) + num_wins_width = ((num_wins+1).bit_length()) - 1 + num_reg_dsp = 1 if rb.all_regs else 0 + num_dsp = num_wins + num_reg_dsp + regs_flat = rb.flat_regs + max_regs_char = len("{}".format(len(regs_flat) - 1)) + addr_width = rb.get_addr_width() + + # Used for the dev_select_i signal on a tlul_socket_1n with N = + # num_wins + 1. This needs to be able to represent any value up to + # N-1. + steer_msb = ((num_wins).bit_length()) - 1 + + lblock = block.name.lower() + ublock = lblock.upper() + + u_mod_base = mod_base.upper() + + reg2hw_t = gen_rtl.get_iface_tx_type(block, if_name, False) + hw2reg_t = gen_rtl.get_iface_tx_type(block, if_name, True) + + # Calculate whether we're going to need an AW parameter. We use it if there + # are any registers (obviously). We also use it if there are any windows that + # don't start at zero and end at 1 << addr_width (see the "addr_checks" + # calculation below for where that comes from). + needs_aw = (bool(regs_flat) or + num_wins > 1 or + rb.windows and ( + rb.windows[0].offset != 0 or + rb.windows[0].size_in_bytes != (1 << addr_width))) + + # Check if the interface protocol is reg_interface + use_reg_iface = any([interface['protocol'] == BusProtocol.REG_IFACE and not interface['is_host'] for interface in block.bus_interfaces.interface_list]) + reg_intf_req = "reg_req_t" + reg_intf_rsp = "reg_rsp_t" + + common_data_intg_gen = 0 if rb.has_data_intg_passthru else 1 + adapt_data_intg_gen = 1 if rb.has_data_intg_passthru else 0 + assert common_data_intg_gen != adapt_data_intg_gen +%> + +% if use_reg_iface: +`include "common_cells/assertions.svh" +% else: +`include "prim_assert.sv" +% endif + +module ${mod_name} \ +% if use_reg_iface: +#( + parameter type reg_req_t = logic, + parameter type reg_rsp_t = logic, + parameter int AW = ${addr_width} +) \ +% else: + % if needs_aw: +#( + parameter int AW = ${addr_width} +) \ + % endif +% endif +( + input logic clk_i, + input logic rst_ni, +% if use_reg_iface: + input ${reg_intf_req} reg_req_i, + output ${reg_intf_rsp} reg_rsp_o, +% else: + input tlul_pkg::tl_h2d_t tl_i, + output tlul_pkg::tl_d2h_t tl_o, +% endif +% if num_wins != 0: + + // Output port for window +% if use_reg_iface: + output ${reg_intf_req} [${num_wins}-1:0] reg_req_win_o, + input ${reg_intf_rsp} [${num_wins}-1:0] reg_rsp_win_i, +% else: + output tlul_pkg::tl_h2d_t tl_win_o [${num_wins}], + input tlul_pkg::tl_d2h_t tl_win_i [${num_wins}], +% endif + +% endif + // To HW +% if rb.get_n_bits(["q","qe","re"]): + output ${lblock}_reg_pkg::${reg2hw_t} reg2hw, // Write +% endif +% if rb.get_n_bits(["d","de"]): + input ${lblock}_reg_pkg::${hw2reg_t} hw2reg, // Read +% endif + +% if not use_reg_iface: + // Integrity check errors + output logic intg_err_o, +% endif + + // Config + input devmode_i // If 1, explicit error return for unmapped register access +); + + import ${lblock}_reg_pkg::* ; + +% if rb.all_regs: + localparam int DW = ${block.regwidth}; + localparam int DBW = DW/8; // Byte Width + + // register signals + logic reg_we; + logic reg_re; + logic [BlockAw-1:0] reg_addr; + logic [DW-1:0] reg_wdata; + logic [DBW-1:0] reg_be; + logic [DW-1:0] reg_rdata; + logic reg_error; + + logic addrmiss, wr_err; + + logic [DW-1:0] reg_rdata_next; + +% if use_reg_iface: + // Below register interface can be changed + reg_req_t reg_intf_req; + reg_rsp_t reg_intf_rsp; +% else: + tlul_pkg::tl_h2d_t tl_reg_h2d; + tlul_pkg::tl_d2h_t tl_reg_d2h; +% endif +% endif + +% if not use_reg_iface: + // incoming payload check + logic intg_err; + tlul_cmd_intg_chk u_chk ( + .tl_i, + .err_o(intg_err) + ); + + logic intg_err_q; + always_ff @(posedge clk_i or negedge rst_ni) begin + if (!rst_ni) begin + intg_err_q <= '0; + end else if (intg_err) begin + intg_err_q <= 1'b1; + end + end + + // integrity error output is permanent and should be used for alert generation + // register errors are transactional + assign intg_err_o = intg_err_q | intg_err; + + // outgoing integrity generation + tlul_pkg::tl_d2h_t tl_o_pre; + tlul_rsp_intg_gen #( + .EnableRspIntgGen(1), + .EnableDataIntgGen(${common_data_intg_gen}) + ) u_rsp_intg_gen ( + .tl_i(tl_o_pre), + .tl_o + ); +% endif + +% if num_dsp == 1: + ## Either no windows (and just registers) or no registers and only + ## one window. + % if num_wins == 0: + % if use_reg_iface: + assign reg_intf_req = reg_req_i; + assign reg_rsp_o = reg_intf_rsp; + % else: + assign tl_reg_h2d = tl_i; + assign tl_o_pre = tl_reg_d2h; + % endif + % else: + % if use_reg_iface: + assign reg_req_win_o = reg_req_i; + assign reg_rsp_o = reg_rsp_win_i + % else: + assign tl_win_o[0] = tl_i; + assign tl_o_pre = tl_win_i[0]; + % endif + % endif +% else: + logic [${steer_msb}:0] reg_steer; + + % if use_reg_iface: + ${reg_intf_req} [${num_dsp}-1:0] reg_intf_demux_req; + ${reg_intf_rsp} [${num_dsp}-1:0] reg_intf_demux_rsp; + + // demux connection + assign reg_intf_req = reg_intf_demux_req[${num_wins}]; + assign reg_intf_demux_rsp[${num_wins}] = reg_intf_rsp; + + % for i in range(num_wins): + assign reg_req_win_o[${i}] = reg_intf_demux_req[${i}]; + assign reg_intf_demux_rsp[${i}] = reg_rsp_win_i[${i}]; + % endfor + + // Create Socket_1n + reg_demux #( + .NoPorts (${num_dsp}), + .req_t (${reg_intf_req}), + .rsp_t (${reg_intf_rsp}) + ) i_reg_demux ( + .clk_i, + .rst_ni, + .in_req_i (reg_req_i), + .in_rsp_o (reg_rsp_o), + .out_req_o (reg_intf_demux_req), + .out_rsp_i (reg_intf_demux_rsp), + .in_select_i (reg_steer) + ); + + % else: + tlul_pkg::tl_h2d_t tl_socket_h2d [${num_dsp}]; + tlul_pkg::tl_d2h_t tl_socket_d2h [${num_dsp}]; + + // socket_1n connection + % if rb.all_regs: + assign tl_reg_h2d = tl_socket_h2d[${num_wins}]; + assign tl_socket_d2h[${num_wins}] = tl_reg_d2h; + + % endif + % for i,t in enumerate(rb.windows): + assign tl_win_o[${i}] = tl_socket_h2d[${i}]; + % if common_data_intg_gen == 0 and rb.windows[i].data_intg_passthru == False: + ## If there are multiple windows, and not every window has data integrity + ## passthrough, we must generate data integrity for it here. + tlul_rsp_intg_gen #( + .EnableRspIntgGen(0), + .EnableDataIntgGen(1) + ) u_win${i}_data_intg_gen ( + .tl_i(tl_win_i[${i}]), + .tl_o(tl_socket_d2h[${i}]) + ); + % else: + assign tl_socket_d2h[${i}] = tl_win_i[${i}]; + % endif + % endfor + + // Create Socket_1n + tlul_socket_1n #( + .N (${num_dsp}), + .HReqPass (1'b1), + .HRspPass (1'b1), + .DReqPass ({${num_dsp}{1'b1}}), + .DRspPass ({${num_dsp}{1'b1}}), + .HReqDepth (4'h0), + .HRspDepth (4'h0), + .DReqDepth ({${num_dsp}{4'h0}}), + .DRspDepth ({${num_dsp}{4'h0}}) + ) u_socket ( + .clk_i, + .rst_ni, + .tl_h_i (tl_i), + .tl_h_o (tl_o_pre), + .tl_d_o (tl_socket_h2d), + .tl_d_i (tl_socket_d2h), + .dev_select_i (reg_steer) + ); + % endif + + // Create steering logic + always_comb begin + reg_steer = ${num_dsp-1}; // Default set to register + + // TODO: Can below codes be unique case () inside ? + % for i,w in enumerate(rb.windows): +<% + base_addr = w.offset + limit_addr = w.offset + w.size_in_bytes + if use_reg_iface: + hi_check = 'reg_req_i.addr[AW-1:0] < {}'.format(limit_addr) + else: + hi_check = 'tl_i.a_address[AW-1:0] < {}'.format(limit_addr) + addr_checks = [] + if base_addr > 0: + if use_reg_iface: + addr_checks.append('reg_req_i.addr[AW-1:0] >= {}'.format(base_addr)) + else: + addr_checks.append('tl_i.a_address[AW-1:0] >= {}'.format(base_addr)) + if limit_addr < 2**addr_width: + if use_reg_iface: + addr_checks.append('reg_req_i.addr[AW-1:0] < {}'.format(limit_addr)) + else: + addr_checks.append('tl_i.a_address[AW-1:0] < {}'.format(limit_addr)) + + addr_test = ' && '.join(addr_checks) +%>\ + % if addr_test: + if (${addr_test}) begin + % endif + reg_steer = ${i}; + % if addr_test: + end + % endif + % endfor + % if not use_reg_iface: + if (intg_err) begin + reg_steer = ${num_dsp-1}; + end + % endif + end +% endif +% if rb.all_regs: + + +% if use_reg_iface: + assign reg_we = reg_intf_req.valid & reg_intf_req.write; + assign reg_re = reg_intf_req.valid & ~reg_intf_req.write; + assign reg_addr = reg_intf_req.addr[BlockAw-1:0]; + assign reg_wdata = reg_intf_req.wdata; + assign reg_be = reg_intf_req.wstrb; + assign reg_intf_rsp.rdata = reg_rdata; + assign reg_intf_rsp.error = reg_error; + assign reg_intf_rsp.ready = 1'b1; +% else: + tlul_adapter_reg #( + .RegAw(AW), + .RegDw(DW), + .EnableDataIntgGen(${adapt_data_intg_gen}) + ) u_reg_if ( + .clk_i, + .rst_ni, + + .tl_i (tl_reg_h2d), + .tl_o (tl_reg_d2h), + + .we_o (reg_we), + .re_o (reg_re), + .addr_o (reg_addr), + .wdata_o (reg_wdata), + .be_o (reg_be), + .rdata_i (reg_rdata), + .error_i (reg_error) + ); +% endif + + assign reg_rdata = reg_rdata_next ; +% if use_reg_iface: + assign reg_error = (devmode_i & addrmiss) | wr_err; +% else: + assign reg_error = (devmode_i & addrmiss) | wr_err | intg_err; +% endif + + + // Define SW related signals + // Format: __{wd|we|qs} + // or _{wd|we|qs} if field == 1 or 0 + % for r in regs_flat: + % if len(r.fields) == 1: +${sig_gen(r.fields[0], r.name.lower(), r.hwext, r.shadowed)}\ + % else: + % for f in r.fields: +${sig_gen(f, r.name.lower() + "_" + f.name.lower(), r.hwext, r.shadowed)}\ + % endfor + % endif + % endfor + + // Register instances + % for r in rb.all_regs: + ######################## multiregister ########################### + % if isinstance(r, MultiRegister): +<% + k = 0 +%> + % for sr in r.regs: + // Subregister ${k} of Multireg ${r.reg.name.lower()} + // R[${sr.name.lower()}]: V(${str(sr.hwext)}) + % if len(sr.fields) == 1: +<% + f = sr.fields[0] + finst_name = sr.name.lower() + fsig_name = r.reg.name.lower() + "[%d]" % k + k = k + 1 +%> +${finst_gen(f, finst_name, fsig_name, sr.hwext, sr.regwen, sr.shadowed)} + % else: + % for f in sr.fields: +<% + finst_name = sr.name.lower() + "_" + f.name.lower() + if r.is_homogeneous(): + fsig_name = r.reg.name.lower() + "[%d]" % k + k = k + 1 + else: + fsig_name = r.reg.name.lower() + "[%d]" % k + "." + get_basename(f.name.lower()) +%> + // F[${f.name.lower()}]: ${f.bits.msb}:${f.bits.lsb} +${finst_gen(f, finst_name, fsig_name, sr.hwext, sr.regwen, sr.shadowed)} + % endfor +<% + if not r.is_homogeneous(): + k += 1 +%> + % endif + ## for: mreg_flat + % endfor +######################## register with single field ########################### + % elif len(r.fields) == 1: + // R[${r.name.lower()}]: V(${str(r.hwext)}) +<% + f = r.fields[0] + finst_name = r.name.lower() + fsig_name = r.name.lower() +%> +${finst_gen(f, finst_name, fsig_name, r.hwext, r.regwen, r.shadowed)} +######################## register with multiple fields ########################### + % else: + // R[${r.name.lower()}]: V(${str(r.hwext)}) + % for f in r.fields: +<% + finst_name = r.name.lower() + "_" + f.name.lower() + fsig_name = r.name.lower() + "." + f.name.lower() +%> + // F[${f.name.lower()}]: ${f.bits.msb}:${f.bits.lsb} +${finst_gen(f, finst_name, fsig_name, r.hwext, r.regwen, r.shadowed)} + % endfor + % endif + + ## for: rb.all_regs + % endfor + + + logic [${len(regs_flat)-1}:0] addr_hit; + always_comb begin + addr_hit = '0; + % for i,r in enumerate(regs_flat): + addr_hit[${"{}".format(i).rjust(max_regs_char)}] = (reg_addr == ${ublock}_${r.name.upper()}_OFFSET); + % endfor + end + + assign addrmiss = (reg_re || reg_we) ? ~|addr_hit : 1'b0 ; + +% if regs_flat: +<% + # We want to signal wr_err if reg_be (the byte enable signal) is true for + # any bytes that aren't supported by a register. That's true if a + # addr_hit[i] and a bit is set in reg_be but not in *_PERMIT[i]. + + wr_err_terms = ['(addr_hit[{idx}] & (|({mod}_PERMIT[{idx}] & ~reg_be)))' + .format(idx=str(i).rjust(max_regs_char), + mod=u_mod_base) + for i in range(len(regs_flat))] + wr_err_expr = (' |\n' + (' ' * 15)).join(wr_err_terms) +%>\ + // Check sub-word write is permitted + always_comb begin + wr_err = (reg_we & + (${wr_err_expr})); + end +% else: + assign wr_error = 1'b0; +% endif\ + + % for i, r in enumerate(regs_flat): + % if len(r.fields) == 1: +${we_gen(r.fields[0], r.name.lower(), r.hwext, r.shadowed, i)}\ + % else: + % for f in r.fields: +${we_gen(f, r.name.lower() + "_" + f.name.lower(), r.hwext, r.shadowed, i)}\ + % endfor + % endif + % endfor + + // Read data return + always_comb begin + reg_rdata_next = '0; + unique case (1'b1) + % for i, r in enumerate(regs_flat): + % if len(r.fields) == 1: + addr_hit[${i}]: begin +${rdata_gen(r.fields[0], r.name.lower())}\ + end + + % else: + addr_hit[${i}]: begin + % for f in r.fields: +${rdata_gen(f, r.name.lower() + "_" + f.name.lower())}\ + % endfor + end + + % endif + % endfor + default: begin + reg_rdata_next = '1; + end + endcase + end +% endif + + // Unused signal tieoff +% if rb.all_regs: + + // wdata / byte enable are not always fully used + // add a blanket unused statement to handle lint waivers + logic unused_wdata; + logic unused_be; + assign unused_wdata = ^reg_wdata; + assign unused_be = ^reg_be; +% else: + // devmode_i is not used if there are no registers + logic unused_devmode; + assign unused_devmode = ^devmode_i; +% endif +% if rb.all_regs: + + // Assertions for Register Interface +% if not use_reg_iface: + `ASSERT_PULSE(wePulse, reg_we) + `ASSERT_PULSE(rePulse, reg_re) + + `ASSERT(reAfterRv, $rose(reg_re || reg_we) |=> tl_o.d_valid) + + // this is formulated as an assumption such that the FPV testbenches do disprove this + // property by mistake + //`ASSUME(reqParity, tl_reg_h2d.a_valid |-> tl_reg_h2d.a_user.chk_en == tlul_pkg::CheckDis) +% endif + `ASSERT(en2addrHit, (reg_we || reg_re) |-> $onehot0(addr_hit)) + +% endif +endmodule + +% if use_reg_iface: +module ${mod_name}_intf +#( + parameter int AW = ${addr_width}, + localparam int DW = ${block.regwidth} +) ( + input logic clk_i, + input logic rst_ni, + REG_BUS.in regbus_slave, +% if num_wins != 0: + REG_BUS.out regbus_win_mst[${num_wins}-1:0], +% endif + // To HW +% if rb.get_n_bits(["q","qe","re"]): + output ${lblock}_reg_pkg::${reg2hw_t} reg2hw, // Write +% endif +% if rb.get_n_bits(["d","de"]): + input ${lblock}_reg_pkg::${hw2reg_t} hw2reg, // Read +% endif + // Config + input devmode_i // If 1, explicit error return for unmapped register access +); + localparam int unsigned STRB_WIDTH = DW/8; + +`include "register_interface/typedef.svh" +`include "register_interface/assign.svh" + + // Define structs for reg_bus + typedef logic [AW-1:0] addr_t; + typedef logic [DW-1:0] data_t; + typedef logic [STRB_WIDTH-1:0] strb_t; + `REG_BUS_TYPEDEF_ALL(reg_bus, addr_t, data_t, strb_t) + + reg_bus_req_t s_reg_req; + reg_bus_rsp_t s_reg_rsp; + + // Assign SV interface to structs + `REG_BUS_ASSIGN_TO_REQ(s_reg_req, regbus_slave) + `REG_BUS_ASSIGN_FROM_RSP(regbus_slave, s_reg_rsp) + +% if num_wins != 0: + reg_bus_req_t s_reg_win_req[${num_wins}-1:0]; + reg_bus_rsp_t s_reg_win_rsp[${num_wins}-1:0]; + for (genvar i = 0; i < ${num_wins}; i++) begin : gen_assign_window_structs + `REG_BUS_ASSIGN_TO_REQ(s_reg_win_req[i], regbus_win_mst[i]) + `REG_BUS_ASSIGN_FROM_RSP(regbus_win_mst[i], s_reg_win_rsp[i]) + end + +% endif + + + ${mod_name} #( + .reg_req_t(reg_bus_req_t), + .reg_rsp_t(reg_bus_rsp_t), + .AW(AW) + ) i_regs ( + .clk_i, + .rst_ni, + .reg_req_i(s_reg_req), + .reg_rsp_o(s_reg_rsp), +% if num_wins != 0: + .reg_req_win_o(s_reg_win_req), + .reg_rsp_win_i(s_reg_win_rsp), +% endif +% if rb.get_n_bits(["q","qe","re"]): + .reg2hw, // Write +% endif +% if rb.get_n_bits(["d","de"]): + .hw2reg, // Read +% endif + .devmode_i + ); + +endmodule + +% endif + +<%def name="str_bits_sv(bits)">\ +% if bits.msb != bits.lsb: +${bits.msb}:${bits.lsb}\ +% else: +${bits.msb}\ +% endif +\ +<%def name="str_arr_sv(bits)">\ +% if bits.msb != bits.lsb: +[${bits.msb-bits.lsb}:0] \ +% endif +\ +<%def name="sig_gen(field, sig_name, hwext, shadowed)">\ + % if field.swaccess.allows_read(): + logic ${str_arr_sv(field.bits)}${sig_name}_qs; + % endif + % if field.swaccess.allows_write(): + logic ${str_arr_sv(field.bits)}${sig_name}_wd; + logic ${sig_name}_we; + % endif + % if (field.swaccess.allows_read() and hwext) or shadowed: + logic ${sig_name}_re; + % endif +\ +<%def name="finst_gen(field, finst_name, fsig_name, hwext, regwen, shadowed)">\ + % if hwext: ## if hwext, instantiate prim_subreg_ext + prim_subreg_ext #( + .DW (${field.bits.width()}) + ) u_${finst_name} ( + % if field.swaccess.allows_read(): + .re (${finst_name}_re), + % else: + .re (1'b0), + % endif + % if field.swaccess.allows_write(): + % if regwen: + // qualified with register enable + .we (${finst_name}_we & ${regwen.lower()}_qs), + % else: + .we (${finst_name}_we), + % endif + .wd (${finst_name}_wd), + % else: + .we (1'b0), + .wd ('0), + % endif + % if field.hwaccess.allows_write(): + .d (hw2reg.${fsig_name}.d), + % else: + .d ('0), + % endif + % if field.hwre or shadowed: + .qre (reg2hw.${fsig_name}.re), + % else: + .qre (), + % endif + % if not field.hwaccess.allows_read(): + .qe (), + .q (), + % else: + % if field.hwqe: + .qe (reg2hw.${fsig_name}.qe), + % else: + .qe (), + % endif + .q (reg2hw.${fsig_name}.q ), + % endif + % if field.swaccess.allows_read(): + .qs (${finst_name}_qs) + % else: + .qs () + % endif + ); + % else: ## if not hwext, instantiate prim_subreg, prim_subreg_shadow or constant assign + % if ((not field.hwaccess.allows_read() and\ + not field.hwaccess.allows_write() and\ + field.swaccess.swrd() == SwRdAccess.RD and\ + not field.swaccess.allows_write())): + // constant-only read + assign ${finst_name}_qs = ${field.bits.width()}'h${"%x" % (field.resval or 0)}; + % else: ## not hwext not constant + % if not shadowed: + prim_subreg #( + % else: + prim_subreg_shadow #( + % endif + .DW (${field.bits.width()}), + .SWACCESS("${field.swaccess.value[1].name.upper()}"), + .RESVAL (${field.bits.width()}'h${"%x" % (field.resval or 0)}) + ) u_${finst_name} ( + .clk_i (clk_i ), + .rst_ni (rst_ni ), + + % if shadowed: + .re (${finst_name}_re), + % endif + % if field.swaccess.allows_write(): ## non-RO types + % if regwen: + // from register interface (qualified with register enable) + .we (${finst_name}_we & ${regwen.lower()}_qs), + % else: + // from register interface + .we (${finst_name}_we), + % endif + .wd (${finst_name}_wd), + % else: ## RO types + .we (1'b0), + .wd ('0 ), + % endif + + // from internal hardware + % if field.hwaccess.allows_write(): + .de (hw2reg.${fsig_name}.de), + .d (hw2reg.${fsig_name}.d ), + % else: + .de (1'b0), + .d ('0 ), + % endif + + // to internal hardware + % if not field.hwaccess.allows_read(): + .qe (), + .q (), + % else: + % if field.hwqe: + .qe (reg2hw.${fsig_name}.qe), + % else: + .qe (), + % endif + .q (reg2hw.${fsig_name}.q ), + % endif + + % if not shadowed: + % if field.swaccess.allows_read(): + // to register interface (read) + .qs (${finst_name}_qs) + % else: + .qs () + % endif + % else: + % if field.swaccess.allows_read(): + // to register interface (read) + .qs (${finst_name}_qs), + % else: + .qs (), + % endif + + // Shadow register error conditions + .err_update (reg2hw.${fsig_name}.err_update ), + .err_storage (reg2hw.${fsig_name}.err_storage) + % endif + ); + % endif ## end non-constant prim_subreg + % endif +\ +<%def name="we_gen(field, sig_name, hwext, shadowed, idx)">\ +<% + needs_we = field.swaccess.allows_write() + needs_re = (field.swaccess.allows_read() and hwext) or shadowed + space = '\n' if needs_we or needs_re else '' +%>\ +${space}\ +% if needs_we: + % if field.swaccess.swrd() != SwRdAccess.RC: + assign ${sig_name}_we = addr_hit[${idx}] & reg_we & !reg_error; + assign ${sig_name}_wd = reg_wdata[${str_bits_sv(field.bits)}]; + % else: + ## Generate WE based on read request, read should clear + assign ${sig_name}_we = addr_hit[${idx}] & reg_re & !reg_error; + assign ${sig_name}_wd = '1; + % endif +% endif +% if needs_re: + assign ${sig_name}_re = addr_hit[${idx}] & reg_re & !reg_error; +% endif +\ +<%def name="rdata_gen(field, sig_name)">\ +% if field.swaccess.allows_read(): + reg_rdata_next[${str_bits_sv(field.bits)}] = ${sig_name}_qs; +% else: + reg_rdata_next[${str_bits_sv(field.bits)}] = '0; +% endif +\ diff --git a/utils/reggen/reggen/register.py b/utils/reggen/reggen/register.py new file mode 100644 index 0000000..24f73d0 --- /dev/null +++ b/utils/reggen/reggen/register.py @@ -0,0 +1,375 @@ +# Copyright lowRISC contributors. +# Licensed under the Apache License, Version 2.0, see LICENSE for details. +# SPDX-License-Identifier: Apache-2.0 + +from typing import Dict, List, Optional + +from .access import SWAccess, HWAccess +from .field import Field +from .lib import (check_keys, check_str, check_name, check_bool, + check_list, check_str_list, check_int) +from .params import ReggenParams +from .reg_base import RegBase + +REQUIRED_FIELDS = { + 'name': ['s', "name of the register"], + 'desc': ['t', "description of the register"], + 'fields': ['l', "list of register field description groups"] +} + +OPTIONAL_FIELDS = { + 'swaccess': [ + 's', + "software access permission to use for " + "fields that don't specify swaccess" + ], + 'hwaccess': [ + 's', + "hardware access permission to use for " + "fields that don't specify hwaccess" + ], + 'hwext': [ + 's', + "'true' if the register is stored outside " + "of the register module" + ], + 'hwqe': [ + 's', + "'true' if hardware uses 'q' enable signal, " + "which is latched signal of software write pulse." + ], + 'hwre': [ + 's', + "'true' if hardware uses 're' signal, " + "which is latched signal of software read pulse." + ], + 'regwen': [ + 's', + "if register is write-protected by another register, that " + "register name should be given here. empty-string for no register " + "write protection" + ], + 'resval': [ + 'd', + "reset value of full register (default 0)" + ], + 'tags': [ + 's', + "tags for the register, following the format 'tag_name:item1:item2...'" + ], + 'shadowed': [ + 's', + "'true' if the register is shadowed" + ], + 'update_err_alert': [ + 's', + "alert that will be triggered if " + "this shadowed register has update error" + ], + 'storage_err_alert': [ + 's', + "alert that will be triggered if " + "this shadowed register has storage error" + ] +} + + +class Register(RegBase): + '''Code representing a register for reggen''' + def __init__(self, + offset: int, + name: str, + desc: str, + swaccess: SWAccess, + hwaccess: HWAccess, + hwext: bool, + hwqe: bool, + hwre: bool, + regwen: Optional[str], + tags: List[str], + resval: Optional[int], + shadowed: bool, + fields: List[Field], + update_err_alert: Optional[str], + storage_err_alert: Optional[str]): + super().__init__(offset) + self.name = name + self.desc = desc + + self.swaccess = swaccess + self.hwaccess = hwaccess + + self.hwext = hwext + if self.hwext and self.hwaccess.key == 'hro' and self.sw_readable(): + raise ValueError('hwext flag for {} register is set, but ' + 'hwaccess is hro and the register value ' + 'is readable by software mode ({}).' + .format(self.name, self.swaccess.key)) + + self.hwqe = hwqe + if self.hwext and not self.hwqe and self.sw_writable(): + raise ValueError('The {} register has hwext set and is writable ' + 'by software (mode {}), so must also have hwqe ' + 'enabled.' + .format(self.name, self.swaccess.key)) + + self.hwre = hwre + if self.hwre and not self.hwext: + raise ValueError('The {} register specifies hwre but not hwext.' + .format(self.name)) + + self.regwen = regwen + self.tags = tags + + self.shadowed = shadowed + sounds_shadowy = self.name.lower().endswith('_shadowed') + if self.shadowed and not sounds_shadowy: + raise ValueError("Register {} has the shadowed flag but its name " + "doesn't end with the _shadowed suffix." + .format(self.name)) + elif sounds_shadowy and not self.shadowed: + raise ValueError("Register {} has a name ending in _shadowed, but " + "the shadowed flag is not set." + .format(self.name)) + + # Take a copy of fields and then sort by bit index + assert fields + self.fields = fields.copy() + self.fields.sort(key=lambda field: field.bits.lsb) + + # Index fields by name and check for duplicates + self.name_to_field = {} # type: Dict[str, Field] + for field in self.fields: + if field.name in self.name_to_field: + raise ValueError('Register {} has duplicate fields called {}.' + .format(self.name, field.name)) + self.name_to_field[field.name] = field + + # Check that field bits are disjoint + bits_used = 0 + for field in self.fields: + field_mask = field.bits.bitmask() + if bits_used & field_mask: + raise ValueError('Register {} has non-disjoint fields: ' + '{} uses bits {:#x} used by other fields.' + .format(self.name, field.name, + bits_used & field_mask)) + + # Compute a reset value and mask from our constituent fields. + self.resval = 0 + self.resmask = 0 + for field in self.fields: + self.resval |= (field.resval or 0) << field.bits.lsb + self.resmask |= field.bits.bitmask() + + # If the register defined a reset value, make sure it matches. We've + # already checked that each field matches, but we still need to make + # sure there weren't any bits unaccounted for. + if resval is not None and self.resval != resval: + raise ValueError('Register {} specifies a reset value of {:#x} but ' + 'collecting reset values across its fields yields ' + '{:#x}.' + .format(self.name, resval, self.resval)) + + self.update_err_alert = update_err_alert + self.storage_err_alert = storage_err_alert + + @staticmethod + def from_raw(reg_width: int, + offset: int, + params: ReggenParams, + raw: object) -> 'Register': + rd = check_keys(raw, 'register', + list(REQUIRED_FIELDS.keys()), + list(OPTIONAL_FIELDS.keys())) + + name = check_name(rd['name'], 'name of register') + desc = check_str(rd['desc'], 'desc for {} register'.format(name)) + + swaccess = SWAccess('{} register'.format(name), + rd.get('swaccess', 'none')) + hwaccess = HWAccess('{} register'.format(name), + rd.get('hwaccess', 'hro')) + + hwext = check_bool(rd.get('hwext', False), + 'hwext flag for {} register'.format(name)) + + hwqe = check_bool(rd.get('hwqe', False), + 'hwqe flag for {} register'.format(name)) + + hwre = check_bool(rd.get('hwre', False), + 'hwre flag for {} register'.format(name)) + + raw_regwen = rd.get('regwen', '') + if not raw_regwen: + regwen = None + else: + regwen = check_name(raw_regwen, + 'regwen for {} register'.format(name)) + + tags = check_str_list(rd.get('tags', []), + 'tags for {} register'.format(name)) + + raw_resval = rd.get('resval') + if raw_resval is None: + resval = None + else: + resval = check_int(raw_resval, + 'resval for {} register'.format(name)) + if not 0 <= resval < (1 << reg_width): + raise ValueError('resval for {} register is {}, ' + 'not an unsigned {}-bit number.' + .format(name, resval, reg_width)) + + shadowed = check_bool(rd.get('shadowed', False), + 'shadowed flag for {} register' + .format(name)) + + raw_fields = check_list(rd['fields'], + 'fields for {} register'.format(name)) + if not raw_fields: + raise ValueError('Register {} has no fields.'.format(name)) + fields = [Field.from_raw(name, + idx, + len(raw_fields), + swaccess, + hwaccess, + resval, + reg_width, + hwqe, + hwre, + params, + rf) + for idx, rf in enumerate(raw_fields)] + + raw_uea = rd.get('update_err_alert') + if raw_uea is None: + update_err_alert = None + else: + update_err_alert = check_name(raw_uea, + 'update_err_alert for {} register' + .format(name)) + + raw_sea = rd.get('storage_err_alert') + if raw_sea is None: + storage_err_alert = None + else: + storage_err_alert = check_name(raw_sea, + 'storage_err_alert for {} register' + .format(name)) + + return Register(offset, name, desc, swaccess, hwaccess, + hwext, hwqe, hwre, regwen, + tags, resval, shadowed, fields, + update_err_alert, storage_err_alert) + + def next_offset(self, addrsep: int) -> int: + return self.offset + addrsep + + def sw_readable(self) -> bool: + return self.swaccess.key not in ['wo', 'r0w1c'] + + def sw_writable(self) -> bool: + return self.swaccess.key != 'ro' + + def dv_rights(self) -> str: + return self.swaccess.dv_rights() + + def get_n_bits(self, bittype: List[str]) -> int: + return sum(field.get_n_bits(self.hwext, bittype) + for field in self.fields) + + def get_field_list(self) -> List[Field]: + return self.fields + + def is_homogeneous(self) -> bool: + return len(self.fields) == 1 + + def get_width(self) -> int: + '''Get the width of the fields in the register in bits + + This counts dead space between and below fields, so it's calculated as + one more than the highest msb. + + ''' + # self.fields is ordered by (increasing) LSB, so we can find the MSB of + # the register by taking the MSB of the last field. + return 1 + self.fields[-1].bits.msb + + def make_multi(self, + reg_width: int, + offset: int, + creg_idx: int, + creg_count: int, + regwen_multi: bool, + compact: bool, + min_reg_idx: int, + max_reg_idx: int, + cname: str) -> 'Register': + '''Generate a numbered, packed version of the register''' + assert 0 <= creg_idx < creg_count + assert 0 <= min_reg_idx <= max_reg_idx + assert compact or (min_reg_idx == max_reg_idx) + + new_name = ('{}_{}'.format(self.name, creg_idx) + if creg_count > 1 + else self.name) + + if self.regwen is None or not regwen_multi or creg_count == 1: + new_regwen = self.regwen + else: + new_regwen = '{}_{}'.format(self.regwen, creg_idx) + + strip_field = creg_idx > 0 + + if compact: + # Compacting multiple registers into a single "compacted" register. + # This is only supported if we have exactly one field (checked at + # the call-site) + assert len(self.fields) == 1 + new_fields = self.fields[0].make_multi(reg_width, + min_reg_idx, max_reg_idx, + cname, creg_idx, + strip_field) + else: + # No compacting going on, but we still choose to rename the fields + # to match the registers + assert creg_idx == min_reg_idx + new_fields = [field.make_suffixed('_{}'.format(creg_idx), + cname, creg_idx, strip_field) + for field in self.fields] + + # Don't specify a reset value for the new register. Any reset value + # defined for the original register will have propagated to its fields, + # so when we combine them here, the Register constructor can compute a + # reset value for us (which might well be different from self.resval if + # we've replicated fields). + new_resval = None + + return Register(offset, new_name, self.desc, + self.swaccess, self.hwaccess, + self.hwext, self.hwqe, self.hwre, new_regwen, + self.tags, new_resval, self.shadowed, new_fields, + self.update_err_alert, self.storage_err_alert) + + def _asdict(self) -> Dict[str, object]: + rd = { + 'name': self.name, + 'desc': self.desc, + 'fields': self.fields, + 'swaccess': self.swaccess.key, + 'hwaccess': self.hwaccess.key, + 'hwext': str(self.hwext), + 'hwqe': str(self.hwqe), + 'hwre': str(self.hwre), + 'tags': self.tags, + 'shadowed': str(self.shadowed), + } + if self.regwen is not None: + rd['regwen'] = self.regwen + if self.update_err_alert is not None: + rd['update_err_alert'] = self.update_err_alert + if self.storage_err_alert is not None: + rd['storage_err_alert'] = self.storage_err_alert + + return rd diff --git a/utils/reggen/reggen/signal.py b/utils/reggen/reggen/signal.py new file mode 100644 index 0000000..bd4d6a3 --- /dev/null +++ b/utils/reggen/reggen/signal.py @@ -0,0 +1,63 @@ +# Copyright lowRISC contributors. +# Licensed under the Apache License, Version 2.0, see LICENSE for details. +# SPDX-License-Identifier: Apache-2.0 + +from typing import Dict, Sequence + +from .bits import Bits +from .lib import check_keys, check_name, check_str, check_int, check_list + + +class Signal: + def __init__(self, name: str, desc: str, bits: Bits): + self.name = name + self.desc = desc + self.bits = bits + + @staticmethod + def from_raw(what: str, lsb: int, raw: object) -> 'Signal': + rd = check_keys(raw, what, + ['name', 'desc'], + ['width']) + + name = check_name(rd['name'], 'name field of ' + what) + desc = check_str(rd['desc'], 'desc field of ' + what) + width = check_int(rd.get('width', 1), 'width field of ' + what) + if width <= 0: + raise ValueError('The width field of signal {} ({}) ' + 'has value {}, but should be positive.' + .format(name, what, width)) + + bits = Bits(lsb + width - 1, lsb) + + return Signal(name, desc, bits) + + @staticmethod + def from_raw_list(what: str, raw: object) -> Sequence['Signal']: + lsb = 0 + ret = [] + for idx, entry in enumerate(check_list(raw, what)): + entry_what = 'entry {} of {}'.format(idx, what) + interrupt = Signal.from_raw(entry_what, lsb, entry) + ret.append(interrupt) + lsb += interrupt.bits.width() + return ret + + def _asdict(self) -> Dict[str, object]: + return { + 'name': self.name, + 'desc': self.desc, + 'width': str(self.bits.width()) + } + + def as_nwt_dict(self, type_field: str) -> Dict[str, object]: + '''Return a view of the signal as a dictionary + + The dictionary has fields "name", "width" and "type", the last + of which comes from the type_field argument. Used for topgen + integration. + + ''' + return {'name': self.name, + 'width': self.bits.width(), + 'type': type_field} diff --git a/utils/reggen/reggen/uvm_reg.sv.tpl b/utils/reggen/reggen/uvm_reg.sv.tpl new file mode 100644 index 0000000..9d8d9dc --- /dev/null +++ b/utils/reggen/reggen/uvm_reg.sv.tpl @@ -0,0 +1,14 @@ +// Copyright lowRISC contributors. +// Licensed under the Apache License, Version 2.0, see LICENSE for details. +// SPDX-License-Identifier: Apache-2.0 + +// UVM Registers auto-generated by `reggen` containing data structure +## +## +## We use functions from uvm_reg_base.sv.tpl to define +## per-device-interface code. +## +<%namespace file="uvm_reg_base.sv.tpl" import="*"/>\ +## +## +${make_ral_pkg(dv_base_prefix, block.regwidth, reg_block_path, rb, esc_if_name)} diff --git a/utils/reggen/reggen/uvm_reg_base.sv.tpl b/utils/reggen/reggen/uvm_reg_base.sv.tpl new file mode 100644 index 0000000..d1da4f4 --- /dev/null +++ b/utils/reggen/reggen/uvm_reg_base.sv.tpl @@ -0,0 +1,431 @@ +// Copyright lowRISC contributors. +// Licensed under the Apache License, Version 2.0, see LICENSE for details. +// SPDX-License-Identifier: Apache-2.0 +<%! + from reggen import gen_dv + from reggen.access import HwAccess, SwRdAccess, SwWrAccess +%> +## +## +## make_ral_pkg +## ============ +## +## Generate the RAL package for a device interface. +## +## dv_base_prefix a string naming the base register type. If it is FOO, +## then we will inherit from FOO_reg (assumed to +## be a subclass of uvm_reg). +## +## reg_width an integer giving the width of registers in bits +## +## reg_block_path the hierarchical path to the relevant register block in the +## design +## +## rb a RegBlock object +## +## esc_if_name a string giving the full, escaped, interface name. For +## a device interface called FOO on block BAR, +## this will be bar__foo. For an unnamed interface +## on block BAR, this will be just bar. +## +<%def name="make_ral_pkg(dv_base_prefix, reg_width, reg_block_path, rb, esc_if_name)">\ +package ${esc_if_name}_ral_pkg; +${make_ral_pkg_hdr(dv_base_prefix, [])} + +${make_ral_pkg_fwd_decls(esc_if_name, rb.flat_regs, rb.windows)} +% for reg in rb.flat_regs: + +${make_ral_pkg_reg_class(dv_base_prefix, reg_width, esc_if_name, reg_block_path, reg)} +% endfor +% for window in rb.windows: + +${make_ral_pkg_window_class(dv_base_prefix, esc_if_name, window)} +% endfor + +<% + reg_block_name = gen_dv.bcname(esc_if_name) +%>\ + class ${reg_block_name} extends ${dv_base_prefix}_reg_block; +% if rb.flat_regs: + // registers +% for r in rb.flat_regs: + rand ${gen_dv.rcname(esc_if_name, r)} ${r.name.lower()}; +% endfor +% endif +% if rb.windows: + // memories +% for window in rb.windows: + rand ${gen_dv.mcname(esc_if_name, window)} ${gen_dv.miname(window)}; +% endfor +% endif + + `uvm_object_utils(${reg_block_name}) + + function new(string name = "${reg_block_name}", + int has_coverage = UVM_NO_COVERAGE); + super.new(name, has_coverage); + endfunction : new + + virtual function void build(uvm_reg_addr_t base_addr, + csr_excl_item csr_excl = null); + // create default map + this.default_map = create_map(.name("default_map"), + .base_addr(base_addr), + .n_bytes(${reg_width//8}), + .endian(UVM_LITTLE_ENDIAN)); + if (csr_excl == null) begin + csr_excl = csr_excl_item::type_id::create("csr_excl"); + this.csr_excl = csr_excl; + end +% if rb.flat_regs: + set_hdl_path_root("tb.dut", "BkdrRegPathRtl"); + set_hdl_path_root("tb.dut", "BkdrRegPathRtlCommitted"); + set_hdl_path_root("tb.dut", "BkdrRegPathRtlShadow"); + // create registers +% for r in rb.flat_regs: +<% + reg_name = r.name.lower() + reg_right = r.dv_rights() + reg_offset = "{}'h{:x}".format(reg_width, r.offset) + reg_tags = r.tags + reg_shadowed = r.shadowed + + type_id_indent = ' ' * (len(reg_name) + 4) +%>\ + ${reg_name} = (${gen_dv.rcname(esc_if_name, r)}:: + ${type_id_indent}type_id::create("${reg_name}")); + ${reg_name}.configure(.blk_parent(this)); + ${reg_name}.build(csr_excl); + default_map.add_reg(.rg(${reg_name}), + .offset(${reg_offset}), + .rights("${reg_right}")); +% if reg_shadowed: + ${reg_name}.set_is_shadowed(); +% endif +% if reg_tags: + // create register tags +% for reg_tag in reg_tags: +<% + tag = reg_tag.split(":") +%>\ +% if tag[0] == "excl": + csr_excl.add_excl(${reg_name}.get_full_name(), ${tag[2]}, ${tag[1]}); +% endif +% endfor +% endif +% endfor +<% + any_regwen = False + for r in rb.flat_regs: + if r.regwen: + any_regwen = True + break +%>\ +% if any_regwen: + // assign locked reg to its regwen reg +% for r in rb.flat_regs: +% if r.regwen: +% for reg in rb.flat_regs: +% if r.regwen.lower() == reg.name.lower(): + ${r.regwen.lower()}.add_lockable_reg_or_fld(${r.name.lower()}); +<% break %>\ +% elif reg.name.lower() in r.regwen.lower(): +% for field in reg.get_field_list(): +% if r.regwen.lower() == (reg.name.lower() + "_" + field.name.lower()): + ${r.regwen.lower()}.${field.name.lower()}.add_lockable_reg_or_fld(${r.name.lower()}); +<% break %>\ +% endif +% endfor +% endif +% endfor +% endif +% endfor +% endif +% endif +${make_ral_pkg_window_instances(reg_width, esc_if_name, rb)} + endfunction : build + endclass : ${reg_block_name} + +endpackage +\ +## +## +## make_ral_pkg_hdr +## ================ +## +## Generate the header for a RAL package +## +## dv_base_prefix as for make_ral_pkg +## +## deps a list of names for packages that should be explicitly +## imported +## +<%def name="make_ral_pkg_hdr(dv_base_prefix, deps)">\ + // dep packages + import uvm_pkg::*; + import dv_base_reg_pkg::*; +% if dv_base_prefix != "dv_base": + import ${dv_base_prefix}_reg_pkg::*; +% endif +% for dep in deps: + import ${dep}::*; +% endfor + + // macro includes + `include "uvm_macros.svh"\ +\ +## +## +## make_ral_pkg_fwd_decls +## ====================== +## +## Generate the forward declarations for a RAL package +## +## esc_if_name as for make_ral_pkg +## +## flat_regs a list of Register objects (expanding multiregs) +## +## windows a list of Window objects +## +<%def name="make_ral_pkg_fwd_decls(esc_if_name, flat_regs, windows)">\ + // Forward declare all register/memory/block classes +% for r in flat_regs: + typedef class ${gen_dv.rcname(esc_if_name, r)}; +% endfor +% for w in windows: + typedef class ${gen_dv.mcname(esc_if_name, w)}; +% endfor + typedef class ${gen_dv.bcname(esc_if_name)};\ +\ +## +## +## make_ral_pkg_reg_class +## ====================== +## +## Generate the classes for a register inside a RAL package +## +## dv_base_prefix as for make_ral_pkg +## +## reg_width as for make_ral_pkg +## +## esc_if_name as for make_ral_pkg +## +## reg_block_path as for make_ral_pkg +## +## reg a Register object +<%def name="make_ral_pkg_reg_class(dv_base_prefix, reg_width, esc_if_name, reg_block_path, reg)">\ +<% + reg_name = reg.name.lower() + + is_ext = reg.hwext + for field in reg.fields: + if (field.hwaccess.value[1] == HwAccess.NONE and + field.swaccess.swrd() == SwRdAccess.RD and + not field.swaccess.allows_write()): + is_ext = 1 + + class_name = gen_dv.rcname(esc_if_name, reg) +%>\ + class ${class_name} extends ${dv_base_prefix}_reg; + // fields +% for f in reg.fields: + rand ${dv_base_prefix}_reg_field ${f.name.lower()}; +% endfor + + `uvm_object_utils(${class_name}) + + function new(string name = "${class_name}", + int unsigned n_bits = ${reg_width}, + int has_coverage = UVM_NO_COVERAGE); + super.new(name, n_bits, has_coverage); + endfunction : new + + virtual function void build(csr_excl_item csr_excl = null); + // create fields +% for field in reg.fields: +<% + if len(reg.fields) == 1: + reg_field_name = reg_name + else: + reg_field_name = reg_name + "_" + field.name.lower() +%>\ +${_create_reg_field(dv_base_prefix, reg_width, reg_block_path, reg.shadowed, reg.hwext, reg_field_name, field)} +% endfor +% if reg.shadowed and reg.hwext: +<% + shadowed_reg_path = '' + for tag in reg.tags: + parts = tag.split(':') + if parts[0] == 'shadowed_reg_path': + shadowed_reg_path = parts[1] + + if not shadowed_reg_path: + print("ERROR: ext shadow_reg does not have tags for shadowed_reg_path!") + assert 0 + + bit_idx = reg.fields[-1].bits.msb + 1 + +%>\ + add_update_err_alert("${reg.update_err_alert}"); + add_storage_err_alert("${reg.storage_err_alert}"); + add_hdl_path_slice("${shadowed_reg_path}.committed_reg.q", + 0, ${bit_idx}, 0, "BkdrRegPathRtlCommitted"); + add_hdl_path_slice("${shadowed_reg_path}.shadow_reg.q", + 0, ${bit_idx}, 0, "BkdrRegPathRtlShadow"); +% endif +% if is_ext: + set_is_ext_reg(1); +% endif + endfunction : build + endclass : ${class_name}\ +\ +## +## +## _create_reg_field +## ================= +## +## Generate the code that creates a uvm_reg_field object for a field +## in a register. +## +## dv_base_prefix as for make_ral_pkg +## +## reg_width as for make_ral_pkg +## +## reg_block_path as for make_ral_pkg +## +## shadowed true if the field's register is shadowed +## +## hwext true if the field's register is hwext +## +## reg_field_name a string with the name to give the field in the HDL +## +## field a Field object +<%def name="_create_reg_field(dv_base_prefix, reg_width, reg_block_path, shadowed, hwext, reg_field_name, field)">\ +<% + field_size = field.bits.width() + if field.swaccess.key == "r0w1c": + field_access = "W1C" + else: + field_access = field.swaccess.value[1].name + + if not field.hwaccess.allows_write(): + field_volatile = 0 + else: + field_volatile = 1 + field_tags = field.tags + + fname = field.name.lower() + type_id_indent = ' ' * (len(fname) + 4) +%>\ + ${fname} = (${dv_base_prefix}_reg_field:: + ${type_id_indent}type_id::create("${fname}")); + ${fname}.configure( + .parent(this), + .size(${field_size}), + .lsb_pos(${field.bits.lsb}), + .access("${field_access}"), + .volatile(${field_volatile}), + .reset(${reg_width}'h${format(field.resval or 0, 'x')}), + .has_reset(1), + .is_rand(1), + .individually_accessible(1)); + ${fname}.set_original_access("${field_access}"); +% if ((field.hwaccess.value[1] == HwAccess.NONE and\ + field.swaccess.swrd() == SwRdAccess.RD and\ + not field.swaccess.allows_write())): + // constant reg + add_hdl_path_slice("${reg_block_path}.${reg_field_name}_qs", + ${field.bits.lsb}, ${field_size}, 0, "BkdrRegPathRtl"); +% else: + add_hdl_path_slice("${reg_block_path}.u_${reg_field_name}.q${"s" if hwext else ""}", + ${field.bits.lsb}, ${field_size}, 0, "BkdrRegPathRtl"); +% endif +% if shadowed and not hwext: + add_hdl_path_slice("${reg_block_path}.u_${reg_field_name}.committed_reg.q", + ${field.bits.lsb}, ${field_size}, 0, "BkdrRegPathRtlCommitted"); + add_hdl_path_slice("${reg_block_path}.u_${reg_field_name}.shadow_reg.q", + ${field.bits.lsb}, ${field_size}, 0, "BkdrRegPathRtlShadow"); +% endif +% if field_tags: + // create field tags +% for field_tag in field_tags: +<% + tag = field_tag.split(":") +%>\ +% if tag[0] == "excl": + csr_excl.add_excl(${field.name.lower()}.get_full_name(), ${tag[2]}, ${tag[1]}); +% endif +% endfor +% endif +\ +## +## +## make_ral_pkg_window_class +## ========================= +## +## Generate the classes for a window inside a RAL package +## +## dv_base_prefix as for make_ral_pkg +## +## esc_if_name as for make_ral_pkg +## +## window a Window object +<%def name="make_ral_pkg_window_class(dv_base_prefix, esc_if_name, window)">\ +<% + mem_name = window.name.lower() + mem_right = window.swaccess.dv_rights() + mem_n_bits = window.validbits + mem_size = window.items + + class_name = gen_dv.mcname(esc_if_name, window) +%>\ + class ${class_name} extends ${dv_base_prefix}_mem; + + `uvm_object_utils(${class_name}) + + function new(string name = "${class_name}", + longint unsigned size = ${mem_size}, + int unsigned n_bits = ${mem_n_bits}, + string access = "${mem_right}", + int has_coverage = UVM_NO_COVERAGE); + super.new(name, size, n_bits, access, has_coverage); +% if window.byte_write: + set_mem_partial_write_support(1); +% endif + endfunction : new + + endclass : ${class_name} +\ +## +## +## make_ral_pkg_window_instances +## ============================= +## +## Generate the classes for a window inside a RAL package +## +## reg_width as for make_ral_pkg +## +## esc_if_name as for make_ral_pkg +## +## rb a RegBlock object +## +<%def name="make_ral_pkg_window_instances(reg_width, esc_if_name, rb)">\ +% if rb.windows: + + // create memories +% for w in rb.windows: +<% + mem_name = w.name.lower() + mem_right = w.swaccess.dv_rights() + mem_offset = "{}'h{:x}".format(reg_width, w.offset) + mem_n_bits = w.validbits + mem_size = w.items +%>\ + ${mem_name} = ${gen_dv.mcname(esc_if_name, w)}::type_id::create("${mem_name}"); + ${mem_name}.configure(.parent(this)); + default_map.add_mem(.mem(${mem_name}), + .offset(${mem_offset}), + .rights("${mem_right}")); +% endfor +% endif +\ diff --git a/utils/reggen/reggen/validate.py b/utils/reggen/reggen/validate.py new file mode 100644 index 0000000..e1cea7f --- /dev/null +++ b/utils/reggen/reggen/validate.py @@ -0,0 +1,155 @@ +# Copyright lowRISC contributors. +# Licensed under the Apache License, Version 2.0, see LICENSE for details. +# SPDX-License-Identifier: Apache-2.0 +""" +Register JSON validation +""" + +import logging as log + + +# validating version of int(x, 0) +# returns int value, error flag +# if error flag is True value will be zero +def check_int(x, err_prefix, suppress_err_msg=False): + if isinstance(x, int): + return x, False + if x[0] == '0' and len(x) > 2: + if x[1] in 'bB': + validch = '01' + elif x[1] in 'oO': + validch = '01234567' + elif x[1] in 'xX': + validch = '0123456789abcdefABCDEF' + else: + if not suppress_err_msg: + log.error(err_prefix + + ": int must start digit, 0b, 0B, 0o, 0O, 0x or 0X") + return 0, True + for c in x[2:]: + if c not in validch: + if not suppress_err_msg: + log.error(err_prefix + ": Bad character " + c + " in " + x) + return 0, True + else: + if not x.isdecimal(): + if not suppress_err_msg: + log.error(err_prefix + ": Number not valid int " + x) + return 0, True + return int(x, 0), False + + +def check_bool(x, err_prefix): + """check_bool checks if input 'x' is one of the list: + "true", "false" + + It returns value as Bool type and Error condition. + """ + if isinstance(x, bool): + # if Bool returns as it is + return x, False + if not x.lower() in ["true", "false"]: + log.error(err_prefix + ": Bad field value " + x) + return False, True + else: + return (x.lower() == "true"), False + + +def check_ln(obj, x, withwidth, err_prefix): + error = 0 + if not isinstance(obj[x], list): + log.error(err_prefix + ' element ' + x + ' not a list') + return 1 + for y in obj[x]: + error += check_keys(y, ln_required, ln_optional if withwidth else {}, + {}, err_prefix + ' element ' + x) + if withwidth: + if 'width' in y: + w, err = check_int(y['width'], err_prefix + ' width in ' + x) + if err: + error += 1 + w = 1 + else: + w = 1 + y['width'] = str(w) + + return error + + +def check_keys(obj, required_keys, optional_keys, added_keys, err_prefix): + error = 0 + for x in required_keys: + if x not in obj: + error += 1 + log.error(err_prefix + " missing required key " + x) + for x in obj: + type = None + if x in required_keys: + type = required_keys[x][0] + elif x in optional_keys: + type = optional_keys[x][0] + elif x not in added_keys: + log.warning(err_prefix + " contains extra key " + x) + if type is not None: + if type[:2] == 'ln': + error += check_ln(obj, x, type == 'lnw', err_prefix) + + return error + + +val_types = { + 'd': ["int", "integer (binary 0b, octal 0o, decimal, hex 0x)"], + 'x': ["xint", "x for undefined otherwise int"], + 'b': [ + "bitrange", "bit number as decimal integer, " + "or bit-range as decimal integers msb:lsb" + ], + 'l': ["list", "comma separated list enclosed in `[]`"], + 'ln': [ + "name list", 'comma separated list enclosed in `[]` of ' + 'one or more groups that have just name and dscr keys.' + ' e.g. `{ name: "name", desc: "description"}`' + ], + 'lnw': ["name list+", 'name list that optionally contains a width'], + 'lp': ["parameter list", 'parameter list having default value optionally'], + 'g': ["group", "comma separated group of key:value enclosed in `{}`"], + 'lg': [ + "list of group", "comma separated group of key:value enclosed in `{}`" + " the second entry of the list is the sub group format" + ], + 's': ["string", "string, typically short"], + 't': [ + "text", "string, may be multi-line enclosed in `'''` " + "may use `**bold**`, `*italic*` or `!!Reg` markup" + ], + 'T': ["tuple", "tuple enclosed in ()"], + 'pi': ["python int", "Native Python type int (generated)"], + 'pb': ["python Bool", "Native Python type Bool (generated)"], + 'pl': ["python list", "Native Python type list (generated)"], + 'pe': ["python enum", "Native Python type enum (generated)"] +} + +# ln type has list of groups with only name and description +# (was called "subunit" in cfg_validate) +ln_required = { + 'name': ['s', "name of the item"], + 'desc': ['s', "description of the item"], +} +ln_optional = { + 'width': ['d', "bit width of the item (if not 1)"], +} + +# Registers list may have embedded keys +list_optone = { + 'reserved': ['d', "number of registers to reserve space for"], + 'skipto': ['d', "set next register offset to value"], + 'window': [ + 'g', "group defining an address range " + "for something other than standard registers" + ], + 'multireg': + ['g', "group defining registers generated " + "from a base instance."] +} + +key_use = {'r': "required", 'o': "optional", 'a': "added by tool"} diff --git a/utils/reggen/reggen/version.py b/utils/reggen/reggen/version.py new file mode 100644 index 0000000..3539c46 --- /dev/null +++ b/utils/reggen/reggen/version.py @@ -0,0 +1,24 @@ +# Copyright lowRISC contributors. +# Licensed under the Apache License, Version 2.0, see LICENSE for details. +# SPDX-License-Identifier: Apache-2.0 +r"""Standard version printing +""" +import os +import subprocess +import sys + +import pkg_resources # part of setuptools + + +def show_and_exit(clitool, packages): + util_path = os.path.dirname(os.path.realpath(clitool)) + os.chdir(util_path) + ver = subprocess.run( + ["git", "describe", "--always", "--dirty", "--broken"], + stdout=subprocess.PIPE).stdout.strip().decode('ascii') + if (ver == ''): + ver = 'not found (not in Git repository?)' + sys.stderr.write(clitool + " Git version " + ver + '\n') + for p in packages: + sys.stderr.write(p + ' ' + pkg_resources.require(p)[0].version + '\n') + exit(0) diff --git a/utils/reggen/reggen/window.py b/utils/reggen/reggen/window.py new file mode 100644 index 0000000..d4355c8 --- /dev/null +++ b/utils/reggen/reggen/window.py @@ -0,0 +1,169 @@ +# Copyright lowRISC contributors. +# Licensed under the Apache License, Version 2.0, see LICENSE for details. +# SPDX-License-Identifier: Apache-2.0 + +from typing import Dict + +from .access import SWAccess +from .lib import check_keys, check_str, check_bool, check_int +from .params import ReggenParams + + +REQUIRED_FIELDS = { + 'name': ['s', "name of the window"], + 'desc': ['t', "description of the window"], + 'items': ['d', "size in fieldaccess width words of the window"], + 'swaccess': ['s', "software access permitted"], +} + +# TODO potential for additional optional to give more type info? +# eg sram-hw-port: "none", "sync", "async" +OPTIONAL_FIELDS = { + 'data-intg-passthru': [ + 's', "True if the window has data integrity pass through. " + "Defaults to false if not present." + ], + 'byte-write': [ + 's', "True if byte writes are supported. " + "Defaults to false if not present." + ], + 'validbits': [ + 'd', "Number of valid data bits within " + "regwidth sized word. " + "Defaults to regwidth. If " + "smaller than the regwidth then in each " + "word of the window bits " + "[regwidth-1:validbits] are unused and " + "bits [validbits-1:0] are valid." + ], + 'unusual': [ + 's', "True if window has unusual parameters " + "(set to prevent Unusual: errors)." + "Defaults to false if not present." + ] +} + + +class Window: + '''A class representing a memory window''' + def __init__(self, + name: str, + desc: str, + unusual: bool, + byte_write: bool, + data_intg_passthru: bool, + validbits: int, + items: int, + size_in_bytes: int, + offset: int, + swaccess: SWAccess): + assert 0 < validbits + assert 0 < items <= size_in_bytes + + self.name = name + self.desc = desc + self.unusual = unusual + self.byte_write = byte_write + self.data_intg_passthru = data_intg_passthru + self.validbits = validbits + self.items = items + self.size_in_bytes = size_in_bytes + self.offset = offset + self.swaccess = swaccess + + # Check that offset has been adjusted so that the first item in the + # window has all zeros in the low bits. + po2_size = 1 << (self.size_in_bytes - 1).bit_length() + assert not (offset & (po2_size - 1)) + + @staticmethod + def from_raw(offset: int, + reg_width: int, + params: ReggenParams, + raw: object) -> 'Window': + rd = check_keys(raw, 'window', + list(REQUIRED_FIELDS.keys()), + list(OPTIONAL_FIELDS.keys())) + + wind_desc = 'window at offset {:#x}'.format(offset) + name = check_str(rd['name'], wind_desc) + wind_desc = '{!r} {}'.format(name, wind_desc) + + desc = check_str(rd['desc'], 'desc field for ' + wind_desc) + + unusual = check_bool(rd.get('unusual', False), + 'unusual field for ' + wind_desc) + byte_write = check_bool(rd.get('byte-write', False), + 'byte-write field for ' + wind_desc) + data_intg_passthru = check_bool(rd.get('data-intg-passthru', False), + 'data-intg-passthru field for ' + wind_desc) + + validbits = check_int(rd.get('validbits', reg_width), + 'validbits field for ' + wind_desc) + if validbits <= 0: + raise ValueError('validbits field for {} is not positive.' + .format(wind_desc)) + if validbits > reg_width: + raise ValueError('validbits field for {} is {}, ' + 'which is greater than {}, the register width.' + .format(wind_desc, validbits, reg_width)) + + r_items = check_str(rd['items'], 'items field for ' + wind_desc) + items = params.expand(r_items, 'items field for ' + wind_desc) + if items <= 0: + raise ValueError("Items field for {} is {}, " + "which isn't positive." + .format(wind_desc, items)) + + assert reg_width % 8 == 0 + size_in_bytes = items * (reg_width // 8) + + # Round size_in_bytes up to the next power of 2. The calculation is + # like clog2 calculations in SystemVerilog, where we start with the + # last index, rather than the number of elements. + assert size_in_bytes > 0 + po2_size = 1 << (size_in_bytes - 1).bit_length() + + # A size that isn't a power of 2 is not allowed unless the unusual flag + # is set. + if po2_size != size_in_bytes and not unusual: + raise ValueError('Items field for {} is {}, which gives a size of ' + '{} bytes. This is not a power of 2 (next power ' + 'of 2 is {}). If you want to do this even so, ' + 'set the "unusual" flag.' + .format(wind_desc, items, + size_in_bytes, po2_size)) + + # Adjust offset if necessary to make sure the base address of the first + # item in the window has all zeros in the low bits. + addr_mask = po2_size - 1 + if offset & addr_mask: + offset = (offset | addr_mask) + 1 + offset = offset + + swaccess = SWAccess(wind_desc, rd['swaccess']) + if not (swaccess.value[4] or unusual): + raise ValueError('swaccess field for {} is {}, which is an ' + 'unusual access type for a window. If you want ' + 'to do this, set the "unusual" flag.' + .format(wind_desc, swaccess.key)) + + return Window(name, desc, unusual, byte_write, data_intg_passthru, + validbits, items, size_in_bytes, offset, swaccess) + + def next_offset(self, addrsep: int) -> int: + return self.offset + self.size_in_bytes + + def _asdict(self) -> Dict[str, object]: + rd = { + 'desc': self.desc, + 'items': self.items, + 'swaccess': self.swaccess.key, + 'byte-write': self.byte_write, + 'validbits': self.validbits, + 'unusual': self.unusual + } + if self.name is not None: + rd['name'] = self.name + + return {'window': rd} diff --git a/utils/reggen/regtool.py b/utils/reggen/regtool.py new file mode 100755 index 0000000..76268c9 --- /dev/null +++ b/utils/reggen/regtool.py @@ -0,0 +1,238 @@ +#!/usr/bin/env python3 +# Copyright lowRISC contributors. +# Licensed under the Apache License, Version 2.0, see LICENSE for details. +# SPDX-License-Identifier: Apache-2.0 +r"""Command-line tool to validate and convert register hjson + +""" +import argparse +import logging as log +import re +import sys +from pathlib import PurePath + +from reggen import (gen_cheader, gen_dv, gen_fpv, gen_html, + gen_json, gen_rtl, gen_selfdoc, version) +from reggen.ip_block import IpBlock + +DESC = """regtool, generate register info from Hjson source""" + +USAGE = ''' + regtool [options] + regtool [options] + regtool (-h | --help) + regtool (-V | --version) +''' + + +def main(): + verbose = 0 + + parser = argparse.ArgumentParser( + prog="regtool", + formatter_class=argparse.RawDescriptionHelpFormatter, + usage=USAGE, + description=DESC) + parser.add_argument('input', + nargs='?', + metavar='file', + type=argparse.FileType('r'), + default=sys.stdin, + help='input file in Hjson type') + parser.add_argument('-d', + action='store_true', + help='Output register documentation (html)') + parser.add_argument('--cdefines', + '-D', + action='store_true', + help='Output C defines header') + parser.add_argument('--doc', + action='store_true', + help='Output source file documentation (gfm)') + parser.add_argument('-j', + action='store_true', + help='Output as formatted JSON') + parser.add_argument('-c', action='store_true', help='Output as JSON') + parser.add_argument('-r', + action='store_true', + help='Output as SystemVerilog RTL') + parser.add_argument('-s', + action='store_true', + help='Output as UVM Register class') + parser.add_argument('-f', + action='store_true', + help='Output as FPV CSR rw assertion module') + parser.add_argument('--outdir', + '-t', + help='Target directory for generated RTL; ' + 'tool uses ../rtl if blank.') + parser.add_argument('--dv-base-prefix', + default='dv_base', + help='Prefix for the DV register classes from which ' + 'the register models are derived.') + parser.add_argument('--outfile', + '-o', + type=argparse.FileType('w'), + default=sys.stdout, + help='Target filename for json, html, gfm.') + parser.add_argument('--verbose', + '-v', + action='store_true', + help='Verbose and run validate twice') + parser.add_argument('--param', + '-p', + type=str, + default="", + help='''Change the Parameter values. + Only integer value is supported. + You can add multiple param arguments. + + Format: ParamA=ValA;ParamB=ValB + ''') + parser.add_argument('--version', + '-V', + action='store_true', + help='Show version') + parser.add_argument('--novalidate', + action='store_true', + help='Skip validate, just output json') + + args = parser.parse_args() + + if args.version: + version.show_and_exit(__file__, ["Hjson", "Mako"]) + + verbose = args.verbose + if (verbose): + log.basicConfig(format="%(levelname)s: %(message)s", level=log.DEBUG) + else: + log.basicConfig(format="%(levelname)s: %(message)s") + + # Entries are triples of the form (arg, (format, dirspec)). + # + # arg is the name of the argument that selects the format. format is the + # name of the format. dirspec is None if the output is a single file; if + # the output needs a directory, it is a default path relative to the source + # file (used when --outdir is not given). + arg_to_format = [('j', ('json', None)), ('c', ('compact', None)), + ('d', ('html', None)), ('doc', ('doc', None)), + ('r', ('rtl', 'rtl')), ('s', ('dv', 'dv')), + ('f', ('fpv', 'fpv/vip')), ('cdefines', ('cdh', None))] + format = None + dirspec = None + for arg_name, spec in arg_to_format: + if getattr(args, arg_name): + if format is not None: + log.error('Multiple output formats specified on ' + 'command line ({} and {}).'.format(format, spec[0])) + sys.exit(1) + format, dirspec = spec + if format is None: + format = 'hjson' + + infile = args.input + + # Split parameters into key=value pairs. + raw_params = args.param.split(';') if args.param else [] + params = [] + for idx, raw_param in enumerate(raw_params): + tokens = raw_param.split('=') + if len(tokens) != 2: + raise ValueError('Entry {} in list of parameter defaults to ' + 'apply is {!r}, which is not of the form ' + 'param=value.' + .format(idx, raw_param)) + params.append((tokens[0], tokens[1])) + + # Define either outfile or outdir (but not both), depending on the output + # format. + outfile = None + outdir = None + if dirspec is None: + if args.outdir is not None: + log.error('The {} format expects an output file, ' + 'not an output directory.'.format(format)) + sys.exit(1) + + outfile = args.outfile + else: + if args.outfile is not sys.stdout: + log.error('The {} format expects an output directory, ' + 'not an output file.'.format(format)) + sys.exit(1) + + if args.outdir is not None: + outdir = args.outdir + elif infile is not sys.stdin: + outdir = str(PurePath(infile.name).parents[1].joinpath(dirspec)) + else: + # We're using sys.stdin, so can't infer an output directory name + log.error( + 'The {} format writes to an output directory, which ' + 'cannot be inferred automatically if the input comes ' + 'from stdin. Use --outdir to specify it manually.'.format( + format)) + sys.exit(1) + + if format == 'doc': + with outfile: + gen_selfdoc.document(outfile) + exit(0) + + srcfull = infile.read() + + try: + obj = IpBlock.from_text(srcfull, params, infile.name) + except ValueError as err: + log.error(str(err)) + exit(1) + + if args.novalidate: + with outfile: + gen_json.gen_json(obj, outfile, format) + outfile.write('\n') + else: + if format == 'rtl': + return gen_rtl.gen_rtl(obj, outdir) + if format == 'dv': + return gen_dv.gen_dv(obj, args.dv_base_prefix, outdir) + if format == 'fpv': + return gen_fpv.gen_fpv(obj, outdir) + src_lic = None + src_copy = '' + found_spdx = None + found_lunder = None + copy = re.compile(r'.*(copyright.*)|(.*\(c\).*)', re.IGNORECASE) + spdx = re.compile(r'.*(SPDX-License-Identifier:.+)') + lunder = re.compile(r'.*(Licensed under.+)', re.IGNORECASE) + for line in srcfull.splitlines(): + mat = copy.match(line) + if mat is not None: + src_copy += mat.group(1) + mat = spdx.match(line) + if mat is not None: + found_spdx = mat.group(1) + mat = lunder.match(line) + if mat is not None: + found_lunder = mat.group(1) + if found_lunder: + src_lic = found_lunder + if found_spdx: + if src_lic is None: + src_lic = '\n' + found_spdx + else: + src_lic += '\n' + found_spdx + + with outfile: + if format == 'html': + return gen_html.gen_html(obj, outfile) + elif format == 'cdh': + return gen_cheader.gen_cdefines(obj, outfile, src_lic, src_copy) + else: + return gen_json.gen_json(obj, outfile, format) + + outfile.write('\n') + + +if __name__ == '__main__': + sys.exit(main()) diff --git a/utils/reggen/topgen/__init__.py b/utils/reggen/topgen/__init__.py new file mode 100644 index 0000000..d8a3400 --- /dev/null +++ b/utils/reggen/topgen/__init__.py @@ -0,0 +1,8 @@ +# Copyright lowRISC contributors. +# Licensed under the Apache License, Version 2.0, see LICENSE for details. +# SPDX-License-Identifier: Apache-2.0 + +from .lib import get_hjsonobj_xbars, search_ips # noqa: F401 +# noqa: F401 These functions are used in topgen.py +from .merge import amend_clocks, merge_top # noqa: F401 +from .validate import validate_top, check_flash # noqa: F401 diff --git a/utils/reggen/topgen/c.py b/utils/reggen/topgen/c.py new file mode 100644 index 0000000..58760a3 --- /dev/null +++ b/utils/reggen/topgen/c.py @@ -0,0 +1,444 @@ +# Copyright lowRISC contributors. +# Licensed under the Apache License, Version 2.0, see LICENSE for details. +# SPDX-License-Identifier: Apache-2.0 +"""This contains a class which is used to help generate `top_{name}.h` and +`top_{name}.h`. +""" +from collections import OrderedDict +from typing import Dict, List, Optional, Tuple + +from mako.template import Template + +from .lib import get_base_and_size, Name + +from reggen.ip_block import IpBlock + + +class MemoryRegion(object): + def __init__(self, name: Name, base_addr: int, size_bytes: int): + assert isinstance(base_addr, int) + self.name = name + self.base_addr = base_addr + self.size_bytes = size_bytes + self.size_words = (size_bytes + 3) // 4 + + def base_addr_name(self): + return self.name + Name(["base", "addr"]) + + def offset_name(self): + return self.name + Name(["offset"]) + + def size_bytes_name(self): + return self.name + Name(["size", "bytes"]) + + def size_words_name(self): + return self.name + Name(["size", "words"]) + + +class CEnum(object): + def __init__(self, name): + self.name = name + self.enum_counter = 0 + self.finalized = False + + self.constants = [] + + def add_constant(self, constant_name, docstring=""): + assert not self.finalized + + full_name = self.name + constant_name + + value = self.enum_counter + self.enum_counter += 1 + + self.constants.append((full_name, value, docstring)) + + return full_name + + def add_last_constant(self, docstring=""): + assert not self.finalized + + full_name = self.name + Name(["last"]) + + _, last_val, _ = self.constants[-1] + + self.constants.append((full_name, last_val, r"\internal " + docstring)) + self.finalized = True + + def render(self): + template = ("typedef enum ${enum.name.as_snake_case()} {\n" + "% for name, value, docstring in enum.constants:\n" + " ${name.as_c_enum()} = ${value}, /**< ${docstring} */\n" + "% endfor\n" + "} ${enum.name.as_c_type()};") + return Template(template).render(enum=self) + + +class CArrayMapping(object): + def __init__(self, name, output_type_name): + self.name = name + self.output_type_name = output_type_name + + self.mapping = OrderedDict() + + def add_entry(self, in_name, out_name): + self.mapping[in_name] = out_name + + def render_declaration(self): + template = ( + "extern const ${mapping.output_type_name.as_c_type()}\n" + " ${mapping.name.as_snake_case()}[${len(mapping.mapping)}];") + return Template(template).render(mapping=self) + + def render_definition(self): + template = ( + "const ${mapping.output_type_name.as_c_type()}\n" + " ${mapping.name.as_snake_case()}[${len(mapping.mapping)}] = {\n" + "% for in_name, out_name in mapping.mapping.items():\n" + " [${in_name.as_c_enum()}] = ${out_name.as_c_enum()},\n" + "% endfor\n" + "};\n") + return Template(template).render(mapping=self) + + +class TopGenC: + def __init__(self, top_info, name_to_block: Dict[str, IpBlock]): + self.top = top_info + self._top_name = Name(["top"]) + Name.from_snake_case(top_info["name"]) + self._name_to_block = name_to_block + + # The .c file needs the .h file's relative path, store it here + self.header_path = None + + self._init_plic_targets() + self._init_plic_mapping() + self._init_alert_mapping() + self._init_pinmux_mapping() + self._init_pwrmgr_wakeups() + self._init_rstmgr_sw_rsts() + self._init_pwrmgr_reset_requests() + self._init_clkmgr_clocks() + + def devices(self) -> List[Tuple[Tuple[str, Optional[str]], MemoryRegion]]: + '''Return a list of MemoryRegion objects for devices on the bus + + The list returned is pairs (full_if, region) where full_if is itself a + pair (inst_name, if_name). inst_name is the name of some IP block + instantiation. if_name is the name of the interface (may be None). + region is a MemoryRegion object representing the device. + + ''' + ret = [] # type: List[Tuple[Tuple[str, Optional[str]], MemoryRegion]] + for inst in self.top['module']: + block = self._name_to_block[inst['type']] + for if_name, rb in block.reg_blocks.items(): + full_if = (inst['name'], if_name) + full_if_name = Name.from_snake_case(full_if[0]) + if if_name is not None: + full_if_name += Name.from_snake_case(if_name) + + name = self._top_name + full_if_name + base, size = get_base_and_size(self._name_to_block, + inst, if_name) + + region = MemoryRegion(name, base, size) + ret.append((full_if, region)) + + return ret + + def memories(self): + return [(m["name"], + MemoryRegion(self._top_name + Name.from_snake_case(m["name"]), + int(m["base_addr"], 0), + int(m["size"], 0))) + for m in self.top["memory"]] + + def _init_plic_targets(self): + enum = CEnum(self._top_name + Name(["plic", "target"])) + + for core_id in range(int(self.top["num_cores"])): + enum.add_constant(Name(["ibex", str(core_id)]), + docstring="Ibex Core {}".format(core_id)) + + enum.add_last_constant("Final PLIC target") + + self.plic_targets = enum + + def _init_plic_mapping(self): + """We eventually want to generate a mapping from interrupt id to the + source peripheral. + + In order to do so, we generate two enums (one for interrupts, one for + sources), and store the generated names in a dictionary that represents + the mapping. + + PLIC Interrupt ID 0 corresponds to no interrupt, and so no peripheral, + so we encode that in the enum as "unknown". + + The interrupts have to be added in order, with "none" first, to ensure + that they get the correct mapping to their PLIC id, which is used for + addressing the right registers and bits. + """ + sources = CEnum(self._top_name + Name(["plic", "peripheral"])) + interrupts = CEnum(self._top_name + Name(["plic", "irq", "id"])) + plic_mapping = CArrayMapping( + self._top_name + Name(["plic", "interrupt", "for", "peripheral"]), + sources.name) + + unknown_source = sources.add_constant(Name(["unknown"]), + docstring="Unknown Peripheral") + none_irq_id = interrupts.add_constant(Name(["none"]), + docstring="No Interrupt") + plic_mapping.add_entry(none_irq_id, unknown_source) + + # When we generate the `interrupts` enum, the only info we have about + # the source is the module name. We'll use `source_name_map` to map a + # short module name to the full name object used for the enum constant. + source_name_map = {} + + for name in self.top["interrupt_module"]: + source_name = sources.add_constant(Name.from_snake_case(name), + docstring=name) + source_name_map[name] = source_name + + sources.add_last_constant("Final PLIC peripheral") + + for intr in self.top["interrupt"]: + # Some interrupts are multiple bits wide. Here we deal with that by + # adding a bit-index suffix + if "width" in intr and int(intr["width"]) != 1: + for i in range(int(intr["width"])): + name = Name.from_snake_case(intr["name"]) + Name([str(i)]) + irq_id = interrupts.add_constant(name, + docstring="{} {}".format( + intr["name"], i)) + source_name = source_name_map[intr["module_name"]] + plic_mapping.add_entry(irq_id, source_name) + else: + name = Name.from_snake_case(intr["name"]) + irq_id = interrupts.add_constant(name, docstring=intr["name"]) + source_name = source_name_map[intr["module_name"]] + plic_mapping.add_entry(irq_id, source_name) + + interrupts.add_last_constant("The Last Valid Interrupt ID.") + + self.plic_sources = sources + self.plic_interrupts = interrupts + self.plic_mapping = plic_mapping + + def _init_alert_mapping(self): + """We eventually want to generate a mapping from alert id to the source + peripheral. + + In order to do so, we generate two enums (one for alerts, one for + sources), and store the generated names in a dictionary that represents + the mapping. + + Alert Handler has no concept of "no alert", unlike the PLIC. + + The alerts have to be added in order, to ensure that they get the + correct mapping to their alert id, which is used for addressing the + right registers and bits. + """ + sources = CEnum(self._top_name + Name(["alert", "peripheral"])) + alerts = CEnum(self._top_name + Name(["alert", "id"])) + alert_mapping = CArrayMapping( + self._top_name + Name(["alert", "for", "peripheral"]), + sources.name) + + # When we generate the `alerts` enum, the only info we have about the + # source is the module name. We'll use `source_name_map` to map a short + # module name to the full name object used for the enum constant. + source_name_map = {} + + for name in self.top["alert_module"]: + source_name = sources.add_constant(Name.from_snake_case(name), + docstring=name) + source_name_map[name] = source_name + + sources.add_last_constant("Final Alert peripheral") + + for alert in self.top["alert"]: + if "width" in alert and int(alert["width"]) != 1: + for i in range(int(alert["width"])): + name = Name.from_snake_case(alert["name"]) + Name([str(i)]) + irq_id = alerts.add_constant(name, + docstring="{} {}".format( + alert["name"], i)) + source_name = source_name_map[alert["module_name"]] + alert_mapping.add_entry(irq_id, source_name) + else: + name = Name.from_snake_case(alert["name"]) + alert_id = alerts.add_constant(name, docstring=alert["name"]) + source_name = source_name_map[alert["module_name"]] + alert_mapping.add_entry(alert_id, source_name) + + alerts.add_last_constant("The Last Valid Alert ID.") + + self.alert_sources = sources + self.alert_alerts = alerts + self.alert_mapping = alert_mapping + + def _init_pinmux_mapping(self): + """Generate C enums for addressing pinmux registers and in/out selects. + + Inputs/outputs are connected in the order the modules are listed in + the hjson under the "mio_modules" key. For each module, the corresponding + inouts are connected first, followed by either the inputs or the outputs. + + Inputs: + - Peripheral chooses register field (pinmux_peripheral_in) + - Insel chooses MIO input (pinmux_insel) + + Outputs: + - MIO chooses register field (pinmux_mio_out) + - Outsel chooses peripheral output (pinmux_outsel) + + Insel and outsel have some special values which are captured here too. + """ + pinmux_info = self.top['pinmux'] + pinout_info = self.top['pinout'] + + # Peripheral Inputs + peripheral_in = CEnum(self._top_name + + Name(['pinmux', 'peripheral', 'in'])) + i = 0 + for sig in pinmux_info['ios']: + if sig['connection'] == 'muxed' and sig['type'] in ['inout', 'input']: + index = Name([str(sig['idx'])]) if sig['idx'] != -1 else Name([]) + name = Name.from_snake_case(sig['name']) + index + peripheral_in.add_constant(name, docstring='Peripheral Input {}'.format(i)) + i += 1 + + peripheral_in.add_last_constant('Last valid peripheral input') + + # Pinmux Input Selects + insel = CEnum(self._top_name + Name(['pinmux', 'insel'])) + insel.add_constant(Name(['constant', 'zero']), + docstring='Tie constantly to zero') + insel.add_constant(Name(['constant', 'one']), + docstring='Tie constantly to one') + i = 0 + for pad in pinout_info['pads']: + if pad['connection'] == 'muxed': + insel.add_constant(Name([pad['name']]), + docstring='MIO Pad {}'.format(i)) + i += 1 + insel.add_last_constant('Last valid insel value') + + # MIO Outputs + mio_out = CEnum(self._top_name + Name(['pinmux', 'mio', 'out'])) + i = 0 + for pad in pinout_info['pads']: + if pad['connection'] == 'muxed': + mio_out.add_constant(Name.from_snake_case(pad['name']), + docstring='MIO Pad {}'.format(i)) + i += 1 + mio_out.add_last_constant('Last valid mio output') + + # Pinmux Output Selects + outsel = CEnum(self._top_name + Name(['pinmux', 'outsel'])) + outsel.add_constant(Name(['constant', 'zero']), + docstring='Tie constantly to zero') + outsel.add_constant(Name(['constant', 'one']), + docstring='Tie constantly to one') + outsel.add_constant(Name(['constant', 'high', 'z']), + docstring='Tie constantly to high-Z') + i = 0 + for sig in pinmux_info['ios']: + if sig['connection'] == 'muxed' and sig['type'] in ['inout', 'output']: + index = Name([str(sig['idx'])]) if sig['idx'] != -1 else Name([]) + name = Name.from_snake_case(sig['name']) + index + outsel.add_constant(name, docstring='Peripheral Output {}'.format(i)) + i += 1 + + outsel.add_last_constant('Last valid outsel value') + + self.pinmux_peripheral_in = peripheral_in + self.pinmux_insel = insel + self.pinmux_mio_out = mio_out + self.pinmux_outsel = outsel + + def _init_pwrmgr_wakeups(self): + enum = CEnum(self._top_name + + Name(["power", "manager", "wake", "ups"])) + + for signal in self.top["wakeups"]: + enum.add_constant( + Name.from_snake_case(signal["module"]) + + Name.from_snake_case(signal["name"])) + + enum.add_last_constant("Last valid pwrmgr wakeup signal") + + self.pwrmgr_wakeups = enum + + # Enumerates the positions of all software controllable resets + def _init_rstmgr_sw_rsts(self): + sw_rsts = [ + rst for rst in self.top["resets"]["nodes"] + if 'sw' in rst and rst['sw'] == 1 + ] + + enum = CEnum(self._top_name + + Name(["reset", "manager", "sw", "resets"])) + + for rst in sw_rsts: + enum.add_constant(Name.from_snake_case(rst["name"])) + + enum.add_last_constant("Last valid rstmgr software reset request") + + self.rstmgr_sw_rsts = enum + + def _init_pwrmgr_reset_requests(self): + enum = CEnum(self._top_name + + Name(["power", "manager", "reset", "requests"])) + + for signal in self.top["reset_requests"]: + enum.add_constant( + Name.from_snake_case(signal["module"]) + + Name.from_snake_case(signal["name"])) + + enum.add_last_constant("Last valid pwrmgr reset_request signal") + + self.pwrmgr_reset_requests = enum + + def _init_clkmgr_clocks(self): + """ + Creates CEnums for accessing the software-controlled clocks in the + design. + + The logic here matches the logic in topgen.py in how it instantiates the + clock manager with the described clocks. + + We differentiate "gateable" clocks and "hintable" clocks because the + clock manager has separate register interfaces for each group. + """ + + aon_clocks = set() + for src in self.top['clocks']['srcs'] + self.top['clocks'][ + 'derived_srcs']: + if src['aon'] == 'yes': + aon_clocks.add(src['name']) + + gateable_clocks = CEnum(self._top_name + Name(["gateable", "clocks"])) + hintable_clocks = CEnum(self._top_name + Name(["hintable", "clocks"])) + + # This replicates the behaviour in `topgen.py` in deriving `hints` and + # `sw_clocks`. + for group in self.top['clocks']['groups']: + for (name, source) in group['clocks'].items(): + if source not in aon_clocks: + # All these clocks start with `clk_` which is redundant. + clock_name = Name.from_snake_case(name).remove_part("clk") + docstring = "Clock {} in group {}".format( + name, group['name']) + if group["sw_cg"] == "yes": + gateable_clocks.add_constant(clock_name, docstring) + elif group["sw_cg"] == "hint": + hintable_clocks.add_constant(clock_name, docstring) + + gateable_clocks.add_last_constant("Last Valid Gateable Clock") + hintable_clocks.add_last_constant("Last Valid Hintable Clock") + + self.clkmgr_gateable_clocks = gateable_clocks + self.clkmgr_hintable_clocks = hintable_clocks diff --git a/utils/reggen/topgen/gen_dv.py b/utils/reggen/topgen/gen_dv.py new file mode 100644 index 0000000..5e2beaa --- /dev/null +++ b/utils/reggen/topgen/gen_dv.py @@ -0,0 +1,46 @@ +# Copyright lowRISC contributors. +# Licensed under the Apache License, Version 2.0, see LICENSE for details. +# SPDX-License-Identifier: Apache-2.0 + +import logging as log +from typing import Optional, Tuple + +from mako import exceptions # type: ignore +from mako.lookup import TemplateLookup # type: ignore +from pkg_resources import resource_filename + +from reggen.gen_dv import gen_core_file + +from .top import Top + + +def sv_base_addr(top: Top, if_name: Tuple[str, Optional[str]]) -> str: + '''Get the base address of a device interface in SV syntax''' + return "{}'h{:x}".format(top.regwidth, top.if_addrs[if_name]) + + +def gen_dv(top: Top, + dv_base_prefix: str, + outdir: str) -> int: + '''Generate DV RAL model for a Top''' + # Read template + lookup = TemplateLookup(directories=[resource_filename('topgen', '.'), + resource_filename('reggen', '.')]) + uvm_reg_tpl = lookup.get_template('top_uvm_reg.sv.tpl') + + # Expand template + try: + to_write = uvm_reg_tpl.render(top=top, + dv_base_prefix=dv_base_prefix) + except: # noqa: E722 + log.error(exceptions.text_error_template().render()) + return 1 + + # Dump to output file + dest_path = '{}/chip_ral_pkg.sv'.format(outdir) + with open(dest_path, 'w') as fout: + fout.write(to_write) + + gen_core_file(outdir, 'chip', dv_base_prefix, ['chip_ral_pkg.sv']) + + return 0 diff --git a/utils/reggen/topgen/intermodule.py b/utils/reggen/topgen/intermodule.py new file mode 100644 index 0000000..d1048c1 --- /dev/null +++ b/utils/reggen/topgen/intermodule.py @@ -0,0 +1,1005 @@ +# Copyright lowRISC contributors. +# Licensed under the Apache License, Version 2.0, see LICENSE for details. +# SPDX-License-Identifier: Apache-2.0 + +import logging as log +import re +from collections import OrderedDict +from enum import Enum +from typing import Dict, List, Tuple + +from reggen.ip_block import IpBlock +from reggen.inter_signal import InterSignal +from reggen.validate import check_int +from topgen import lib + +IM_TYPES = ['uni', 'req_rsp'] +IM_ACTS = ['req', 'rsp', 'rcv'] +IM_VALID_TYPEACT = {'uni': ['req', 'rcv'], 'req_rsp': ['req', 'rsp']} +IM_CONN_TYPE = ['1-to-1', '1-to-N', 'broadcast'] + + +class ImType(Enum): + Uni = 1 + ReqRsp = 2 + + +class ImAct(Enum): + Req = 1 + Rsp = 2 + Rcv = 3 + + +class ImConn(Enum): + OneToOne = 1 # req <-> {rsp,rcv} with same width + OneToN = 2 # req width N <-> N x {rsp,rcv}s width 1 + Broadcast = 3 # req width 1 <-> N x rcvs width 1 + + +def intersignal_format(req: Dict) -> str: + """Determine the signal format of the inter-module connections + + @param[req] Request struct. It has instance name, package format + and etc. + """ + + # TODO: Handle array signal + result = "{req}_{struct}".format(req=req["inst_name"], struct=req["name"]) + + # check signal length if exceeds 100 + + # 7 : space + . + # 3 : _{i|o}( + # 6 : _{req|rsp}), + req_length = 7 + len(req["name"]) + 3 + len(result) + 6 + + if req_length > 100: + logmsg = "signal {0} length cannot be greater than 100" + log.warning(logmsg.format(result)) + log.warning("Please consider shorten the instance name") + return result + + +def get_suffixes(ims: OrderedDict) -> Tuple[str, str]: + """Get suffixes of the struct. + + TL-UL struct uses `h2d`, `d2h` suffixes for req, rsp pair. + """ + if ims["package"] == "tlul_pkg" and ims["struct"] == "tl": + return ("_h2d", "_d2h") + + return ("_req", "_rsp") + + +def add_intermodule_connection(obj: OrderedDict, req_m: str, req_s: str, + rsp_m: str, rsp_s: str): + """Add if doesn't exist the connection + + Add a connection into obj['inter_module']['connect'] dictionary if doesn't exist. + + Parameters: + obj: Top dictionary object + req_m: Requester module name + req_s: Requester signal name + rsp_m: Responder module name + rsp_s: Responder signal name + + Returns: + No return type for this function + """ + req_key = "{}.{}".format(req_m, req_s) + rsp_key = "{}.{}".format(rsp_m, rsp_s) + + connect = obj["inter_module"]["connect"] + if req_key in connect: + # check if rsp has data + if rsp_key in connect[req_key]: + return + req_key.append(rsp_key) + return + + # req_key is not in connect: + # check if rsp_key + if rsp_key in connect: + # check if rsp has data + if req_key in connect[rsp_key]: + return + rsp_key.append(req_key) + return + + # Add new key and connect + connect[req_key] = [rsp_key] + + +def autoconnect_xbar(topcfg: OrderedDict, + name_to_block: Dict[str, IpBlock], + xbar: OrderedDict) -> None: + # The crossbar is connecting to modules and memories in topcfg, plus + # possible external connections. Make indices for the modules and memories + # for quick lookup and add some assertions to make sure no name appears in + # multiple places. + name_to_module = {} + for mod in topcfg['module']: + assert mod['name'] not in name_to_module + if lib.is_inst(mod): + name_to_module[mod['name']] = mod + + name_to_memory = {} + for mem in topcfg['memory']: + assert mem['name'] not in name_to_memory + if lib.is_inst(mem): + name_to_memory[mem['name']] = mem + + # The names of modules and memories should be disjoint + assert not (set(name_to_module.keys()) & set(name_to_memory.keys())) + + external_names = (set(topcfg['inter_module']['top']) | + set(topcfg["inter_module"]["external"].keys())) + + ports = [x for x in xbar["nodes"] if x["type"] in ["host", "device"]] + for port in ports: + # Here, we expect port_name to either be a single identifier (in which + # case, it's taken as the name of some module or memory) to be a dotted + # pair MOD.INAME where MOD is the name of some module and INAME is the + # associated interface name. + name_parts = port['name'].split('.', 1) + port_base = name_parts[0] + port_iname = name_parts[1] if len(name_parts) > 1 else None + esc_name = port['name'].replace('.', '__') + + if port["xbar"]: + if port_iname is not None: + log.error('A crossbar connection may not ' + 'have a target of the form MOD.INAME (saw {!r})' + .format(port['name'])) + continue + + if port["type"] == "host": + # Skip as device will add connection + continue + + # Device port adds signal + add_intermodule_connection(obj=topcfg, + req_m=xbar["name"], + req_s="tl_" + esc_name, + rsp_m=esc_name, + rsp_s="tl_" + xbar["name"]) + continue # xbar port case + + port_mod = name_to_module.get(port_base) + port_mem = name_to_memory.get(port_base) + assert port_mod is None or port_mem is None + + if not (port_mod or port_mem): + # if not in module, memory, should be existed in top or ext field + module_key = "{}.tl_{}".format(xbar["name"], esc_name) + if module_key not in external_names: + log.error("Inter-module key {} cannot be found in module, " + "memory, top, or external lists.".format(module_key)) + + continue + + if port_iname is not None and port_mem is not None: + log.error('Cannot make connection for {!r}: the base of the name ' + 'points to a memory but memories do not support ' + 'interface names.' + .format(port['name'])) + + is_host = port['type'] == 'host' + + # If the hit is a module, it originally came from reggen (via + # merge.py's amend_ip() function). In this case, we should have a + # BusInterfaces object as well as a list of InterSignal objects. + # + # If not, this is a memory that will just have a dictionary of inter + # signals. + if port_mod is not None: + block = name_to_block[port_mod['type']] + try: + sig_name = block.bus_interfaces.find_port_name(is_host, + port_iname) + except KeyError: + log.error('Cannot make {} connection for {!r}: the base of ' + 'the target module has no matching bus interface.' + .format('host' if is_host else 'device', + port['name'])) + continue + else: + inter_signal_list = port_mem['inter_signal_list'] + act = 'req' if is_host else 'rsp' + matches = [ + x for x in inter_signal_list + if (x.get('package') == 'tlul_pkg' and + x['struct'] == 'tl' and + x['act'] == act) + ] + if not matches: + log.error('Cannot make {} connection for {!r}: the memory ' + 'has no signal with an action of {}.' + .format('host' if is_host else 'device', + port['name'], + act)) + continue + + assert len(matches) == 1 + sig_name = matches[0]['name'] + + add_intermodule_connection(obj=topcfg, + req_m=port_base, + req_s=sig_name, + rsp_m=xbar["name"], + rsp_s="tl_" + esc_name) + + +def autoconnect(topcfg: OrderedDict, name_to_block: Dict[str, IpBlock]): + """Matching the connection based on the naming rule + between {memory, module} <-> Xbar. + """ + + # Add xbar connection to the modules, memories + for xbar in topcfg["xbar"]: + autoconnect_xbar(topcfg, name_to_block, xbar) + + +def _get_default_name(sig, suffix): + """Generate default for a net if one does not already exist. + """ + + # The else case covers the scenario where neither package nor default is provided. + # Specifically, the interface is 'logic' and has no default value. + # In this situation, just return 0's + if sig['default']: + return sig['default'] + elif sig['package']: + return "{}::{}_DEFAULT".format(sig['package'], (sig["struct"] + suffix).upper()) + else: + return "'0" + + +def elab_intermodule(topcfg: OrderedDict): + """Check the connection of inter-module and categorize them + + In the top template, it uses updated inter_module fields to create + connections between the modules (incl. memories). This function is to + create and check the validity of the connections `inter_module` using IPs' + `inter_signal_list`. + """ + + list_of_intersignals = [] + + if "inter_signal" not in topcfg: + topcfg["inter_signal"] = OrderedDict() + + # Gather the inter_signal_list + instances = topcfg["module"] + topcfg["memory"] + topcfg["xbar"] + \ + topcfg["host"] + topcfg["port"] + + for x in instances: + old_isl = x.get('inter_signal_list') + if old_isl is None: + continue + + new_isl = [] + for entry in old_isl: + # Convert any InterSignal objects to the expected dictionary format. + sig = (entry.as_dict() + if isinstance(entry, InterSignal) + else entry.copy()) + + # Add instance name to the entry and add to list_of_intersignals + sig["inst_name"] = x["name"] + list_of_intersignals.append(sig) + new_isl.append(sig) + + x['inter_signal_list'] = new_isl + + # Add field to the topcfg + topcfg["inter_signal"]["signals"] = list_of_intersignals + + # TODO: Cross check Can be done here not in validate as ipobj is not + # available in validate + error = check_intermodule(topcfg, "Inter-module Check") + assert error == 0, "Inter-module validation is failed cannot move forward." + + # intermodule + definitions = [] + + # Check the originator + # As inter-module connection allow only 1:1, 1:N, or N:1, pick the most + # common signals. If a requester connects to multiple responders/receivers, + # the requester is main connector so that the definition becomes array. + # + # For example: + # inter_module: { + # 'connect': { + # 'pwr_mgr.pwrup': ['lc.pwrup', 'otp.pwrup'] + # } + # } + # The tool adds `struct [1:0] pwr_mgr_pwrup` + # It doesn't matter whether `pwr_mgr.pwrup` is requester or responder. + # If the key is responder type, then the connection is made in reverse, + # such that `lc.pwrup --> pwr_mgr.pwrup[0]` and + # `otp.pwrup --> pwr_mgr.pwrup[1]` + + uid = 0 # Unique connection ID across the top + + for req, rsps in topcfg["inter_module"]["connect"].items(): + log.info("{req} --> {rsps}".format(req=req, rsps=rsps)) + + # Split index + req_module, req_signal, req_index = filter_index(req) + + # get the module signal + req_struct = find_intermodule_signal(list_of_intersignals, req_module, + req_signal) + + # decide signal format based on the `key` + sig_name = intersignal_format(req_struct) + req_struct["top_signame"] = sig_name + + # Find package in req, rsps + if "package" in req_struct: + package = req_struct["package"] + else: + for rsp in rsps: + rsp_module, rsp_signal, rsp_index = filter_index(rsp) + rsp_struct = find_intermodule_signal(list_of_intersignals, + rsp_module, rsp_signal) + if "package" in rsp_struct: + package = rsp_struct["package"] + break + if not package: + package = "" + + # Add to definition + if req_struct["type"] == "req_rsp": + req_suffix, rsp_suffix = get_suffixes(req_struct) + req_default = _get_default_name(req_struct, req_suffix) + rsp_default = _get_default_name(req_struct, rsp_suffix) + + # based on the active direction of the req_struct, one of the directions does not + # need a default since it will be an output + if (req_struct["act"] == 'req'): + req_default = '' + else: + rsp_default = '' + + # Add two definitions + definitions.append( + OrderedDict([('package', package), + ('struct', req_struct["struct"] + req_suffix), + ('signame', sig_name + "_req"), + ('width', req_struct["width"]), + ('type', req_struct["type"]), + ('end_idx', req_struct["end_idx"]), + ('act', req_struct["act"]), + ('suffix', "req"), + ('default', req_default)])) + definitions.append( + OrderedDict([('package', package), + ('struct', req_struct["struct"] + rsp_suffix), + ('signame', sig_name + "_rsp"), + ('width', req_struct["width"]), + ('type', req_struct["type"]), + ('end_idx', req_struct["end_idx"]), + ('act', req_struct["act"]), + ('suffix', "rsp"), + ('default', rsp_default)])) + else: + # unidirection + default = _get_default_name(req_struct, "") + definitions.append( + OrderedDict([('package', package), + ('struct', req_struct["struct"]), + ('signame', sig_name), + ('width', req_struct["width"]), + ('type', req_struct["type"]), + ('end_idx', req_struct["end_idx"]), + ('act', req_struct["act"]), + ('suffix', ""), + ('default', default)])) + + req_struct["index"] = -1 + + for i, rsp in enumerate(rsps): + # Split index + rsp_module, rsp_signal, rsp_index = filter_index(rsp) + + rsp_struct = find_intermodule_signal(list_of_intersignals, + rsp_module, rsp_signal) + + # determine the signal name + + rsp_struct["top_signame"] = sig_name + if req_struct["type"] == "uni" and req_struct[ + "top_type"] == "broadcast": + rsp_struct["index"] = -1 + elif rsp_struct["width"] == req_struct["width"] and len(rsps) == 1: + rsp_struct["index"] = -1 + else: + rsp_struct["index"] = -1 if req_struct["width"] == 1 else i + + # Assume it is logic + # req_rsp doesn't allow logic + if req_struct["struct"] == "logic": + assert req_struct[ + "type"] != "req_rsp", "logic signal cannot have req_rsp type" + + # increase Unique ID + uid += 1 + + # TODO: Check unconnected port + if "top" not in topcfg["inter_module"]: + topcfg["inter_module"]["top"] = [] + + for s in topcfg["inter_module"]["top"]: + sig_m, sig_s, sig_i = filter_index(s) + assert sig_i == -1, 'top net connection should not use bit index' + sig = find_intermodule_signal(list_of_intersignals, sig_m, sig_s) + sig_name = intersignal_format(sig) + sig["top_signame"] = sig_name + if "index" not in sig: + sig["index"] = -1 + + if sig["type"] == "req_rsp": + req_suffix, rsp_suffix = get_suffixes(sig) + # Add two definitions + definitions.append( + OrderedDict([('package', sig["package"]), + ('struct', sig["struct"] + req_suffix), + ('signame', sig_name + "_req"), + ('width', sig["width"]), ('type', sig["type"]), + ('end_idx', -1), + ('default', sig["default"])])) + definitions.append( + OrderedDict([('package', sig["package"]), + ('struct', sig["struct"] + rsp_suffix), + ('signame', sig_name + "_rsp"), + ('width', sig["width"]), ('type', sig["type"]), + ('end_idx', -1), + ('default', sig["default"])])) + else: # if sig["type"] == "uni": + definitions.append( + OrderedDict([('package', sig["package"]), + ('struct', sig["struct"]), ('signame', sig_name), + ('width', sig["width"]), ('type', sig["type"]), + ('end_idx', -1), + ('default', sig["default"])])) + + topcfg["inter_module"].setdefault('external', []) + topcfg["inter_signal"].setdefault('external', []) + + for s, port in topcfg["inter_module"]["external"].items(): + sig_m, sig_s, sig_i = filter_index(s) + assert sig_i == -1, 'top net connection should not use bit index' + sig = find_intermodule_signal(list_of_intersignals, sig_m, sig_s) + + # To make netname `_o` or `_i` + sig['external'] = True + + sig_name = port if port != "" else intersignal_format(sig) + + # if top signame already defined, then a previous connection category + # is already connected to external pin. Sig name is only used for + # port definition + conn_type = False + if "top_signame" not in sig: + sig["top_signame"] = sig_name + else: + conn_type = True + + if "index" not in sig: + sig["index"] = -1 + + # Add the port definition to top external ports + index = sig["index"] + netname = sig["top_signame"] + if sig["type"] == "req_rsp": + req_suffix, rsp_suffix = get_suffixes(sig) + if sig["act"] == "req": + req_sigsuffix, rsp_sigsuffix = ("_o", "_i") + + else: + req_sigsuffix, rsp_sigsuffix = ("_i", "_o") + + topcfg["inter_signal"]["external"].append( + OrderedDict([('package', sig["package"]), + ('struct', sig["struct"] + req_suffix), + ('signame', sig_name + "_req" + req_sigsuffix), + ('width', sig["width"]), ('type', sig["type"]), + ('default', sig["default"]), + ('direction', + 'out' if sig['act'] == "req" else 'in'), + ('conn_type', conn_type), + ('index', index), + ('netname', netname + req_suffix)])) + topcfg["inter_signal"]["external"].append( + OrderedDict([('package', sig["package"]), + ('struct', sig["struct"] + rsp_suffix), + ('signame', sig_name + "_rsp" + rsp_sigsuffix), + ('width', sig["width"]), ('type', sig["type"]), + ('default', sig["default"]), + ('direction', + 'in' if sig['act'] == "req" else 'out'), + ('conn_type', conn_type), + ('index', index), + ('netname', netname + rsp_suffix)])) + else: # uni + if sig["act"] == "req": + sigsuffix = "_o" + else: + sigsuffix = "_i" + topcfg["inter_signal"]["external"].append( + OrderedDict([('package', sig.get("package", "")), + ('struct', sig["struct"]), + ('signame', sig_name + sigsuffix), + ('width', sig["width"]), ('type', sig["type"]), + ('default', sig["default"]), + ('direction', + 'out' if sig['act'] == "req" else 'in'), + ('conn_type', conn_type), + ('index', index), + ('netname', netname)])) + + for sig in topcfg["inter_signal"]["signals"]: + # Check if it exist in definitions + if "top_signame" in sig: + continue + + # Set index to -1 + sig["index"] = -1 + + # TODO: Handle the unconnected port rule + + if "definitions" not in topcfg["inter_signal"]: + topcfg["inter_signal"]["definitions"] = definitions + + +def filter_index(signame: str) -> Tuple[str, str, int]: + """If the signal has array indicator `[N]` then split and return name and + array index. If not, array index is -1. + + param signame module.sig{[N]} + + result (module_name, signal_name, array_index) + """ + m = re.match(r'(\w+)\.(\w+)(\[(\d+)\])*', signame) + + if not m: + # Cannot match the pattern + return "", "", -1 + + if m.group(3): + # array index is not None + return m.group(1), m.group(2), m.group(4) + + return m.group(1), m.group(2), -1 + + +def find_intermodule_signal(sig_list, m_name, s_name) -> Dict: + """Return the intermodule signal structure + """ + + filtered = [ + x for x in sig_list if x["name"] == s_name and x["inst_name"] == m_name + ] + + if len(filtered) == 1: + return filtered[0] + + log.error("Found {num} entry/entries for {m_name}.{s_name}:".format( + num=len(filtered), m_name=m_name, s_name=s_name)) + return None + + +# Validation +def check_intermodule_field(sig: OrderedDict, + prefix: str = "") -> Tuple[int, OrderedDict]: + error = 0 + + # type check + if sig["type"] not in IM_TYPES: + log.error("{prefix} Inter_signal {name} " + "type {type} is incorrect.".format(prefix=prefix, + name=sig["name"], + type=sig["type"])) + error += 1 + + if sig["act"] not in IM_ACTS: + log.error("{prefix} Inter_signal {name} " + "act {act} is incorrect.".format(prefix=prefix, + name=sig["name"], + act=sig["act"])) + error += 1 + + # Check if type and act are matched + if error == 0: + if sig["act"] not in IM_VALID_TYPEACT[sig['type']]: + log.error("{type} and {act} of {name} are not a valid pair." + "".format(type=sig['type'], + act=sig['act'], + name=sig['name'])) + error += 1 + # Check 'width' field + width = 1 + if "width" not in sig: + sig["width"] = 1 + elif not isinstance(sig["width"], int): + width, err = check_int(sig["width"], sig["name"]) + if err: + log.error("{prefix} Inter-module {inst}.{sig} 'width' " + "should be int type.".format(prefix=prefix, + inst=sig["inst_name"], + sig=sig["name"])) + error += 1 + else: + # convert to int value + sig["width"] = width + + # Add empty string if no explicit default for dangling pins is given. + # In that case, dangling pins of type struct will be tied to the default + # parameter in the corresponding package, and dangling pins of type logic + # will be tied off to '0. + if "default" not in sig: + sig["default"] = "" + + if "package" not in sig: + sig["package"] = "" + + return error, sig + + +def find_otherside_modules(topcfg: OrderedDict, m, + s) -> List[Tuple[str, str, str]]: + """Find far-end port based on given module and signal name + """ + # TODO: handle special cases + special_inst_names = { + ('main', 'tl_rom'): ('tl_adapter_rom', 'tl'), + ('main', 'tl_ram_main'): ('tl_adapter_ram_main', 'tl'), + ('main', 'tl_eflash'): ('tl_adapter_eflash', 'tl'), + ('peri', 'tl_ram_ret_aon'): ('tl_adapter_ram_ret_aon', 'tl'), + ('main', 'tl_corei'): ('rv_core_ibex', 'tl_i'), + ('main', 'tl_cored'): ('rv_core_ibex', 'tl_d'), + ('main', 'tl_dm_sba'): ('dm_top', 'tl_h'), + ('main', 'tl_debug_mem'): ('dm_top', 'tl_d'), + ('peri', 'tl_ast'): ('ast', 'tl') + } + special_result = special_inst_names.get((m, s)) + if special_result is not None: + return [('top', special_result[0], special_result[1])] + + signame = "{}.{}".format(m, s) + for req, rsps in topcfg["inter_module"]["connect"].items(): + if req.startswith(signame): + # return rsps after splitting module instance name and the port + result = [] + for rsp in rsps: + rsp_m, rsp_s, rsp_i = filter_index(rsp) + result.append(('connect', rsp_m, rsp_s)) + return result + + for rsp in rsps: + if signame == rsp: + req_m, req_s, req_i = filter_index(req) + return [('connect', req_m, req_s)] + + # if reaches here, it means either the format is wrong, or floating port. + log.error("`find_otherside_modules()`: " + "No such signal {}.{} exists.".format(m, s)) + return [] + + +def check_intermodule(topcfg: Dict, prefix: str) -> int: + if "inter_module" not in topcfg: + return 0 + + total_error = 0 + + for req, rsps in topcfg["inter_module"]["connect"].items(): + error = 0 + # checking the key, value are in correct format + # Allowed format + # 1. module.signal + # 2. module.signal[index] // Remember array is not yet supported + # // But allow in format checker + # + # Example: + # inter_module: { + # 'connect': { + # 'flash_ctrl.flash': ['eflash.flash_ctrl'], + # 'life_cycle.provision': ['debug_tap.dbg_en', 'dft_ctrl.en'], + # 'otp.pwr_hold': ['pwrmgr.peri[0]'], + # 'flash_ctrl.pwr_hold': ['pwrmgr.peri[1]'], + # } + # } + # + # If length of value list is > 1, then key should be array (width need to match) + # If key is format #2, then length of value list shall be 1 + # If one of the value is format #2, then the key should be 1 bit width and + # entries of value list should be 1 + req_m, req_s, req_i = filter_index(req) + + if req_s == "": + log.error( + "Cannot parse the inter-module signal key '{req}'".format( + req=req)) + error += 1 + + # Check rsps signal format is list + if not isinstance(rsps, list): + log.error("Value of key '{req}' should be a list".format(req=req)) + error += 1 + continue + + req_struct = find_intermodule_signal(topcfg["inter_signal"]["signals"], + req_m, req_s) + + err, req_struct = check_intermodule_field(req_struct) + error += err + + if req_i != -1 and len(rsps) != 1: + # Array format should have one entry + log.error( + "If key {req} has index, only one entry is allowed.".format( + req=req)) + error += 1 + + total_width = 0 + widths = [] + + # Check rsp format + for i, rsp in enumerate(rsps): + rsp_m, rsp_s, rsp_i = filter_index(rsp) + if rsp_s == "": + log.error( + "Cannot parse the inter-module signal key '{req}->{rsp}'". + format(req=req, rsp=rsp)) + error += 1 + + rsp_struct = find_intermodule_signal( + topcfg["inter_signal"]["signals"], rsp_m, rsp_s) + + err, rsp_struct = check_intermodule_field(rsp_struct) + error += err + + total_width += rsp_struct["width"] + widths.append(rsp_struct["width"]) + + # Type check + # If no package was declared, it is declared with an empty string + if not rsp_struct["package"]: + rsp_struct["package"] = req_struct.get("package", "") + elif req_struct["package"] != rsp_struct["package"]: + log.error( + "Inter-module package should be matched: " + "{req}->{rsp} exp({expected}), actual({actual})".format( + req=req_struct["name"], + rsp=rsp_struct["name"], + expected=req_struct["package"], + actual=rsp_struct["package"])) + error += 1 + if req_struct["type"] != rsp_struct["type"]: + log.error( + "Inter-module type should be matched: " + "{req}->{rsp} exp({expected}), actual({actual})".format( + req=req_struct["name"], + rsp=rsp_struct["name"], + expected=req_struct["type"], + actual=rsp_struct["type"])) + error += 1 + + # If len(rsps) is 1, then the width should be matched to req + if req_struct["width"] != 1: + if rsp_struct["width"] not in [1, req_struct["width"]]: + log.error( + "If req {req} is an array, " + "rsp {rsp} shall be non-array or array with same width" + .format(req=req, rsp=rsp)) + error += 1 + + elif rsp_i != -1: + # If rsp has index, req should be width 1 + log.error( + "If rsp {rsp} has an array index, only one-to-one map is allowed." + .format(rsp=rsp)) + error += 1 + + # Determine if broadcast or one-to-N + log.debug("Handling inter-sig {} {}".format(req_struct['name'], total_width)) + req_struct["end_idx"] = -1 + if req_struct["width"] > 1 or len(rsps) != 1: + # If req width is same to the every width of rsps ==> broadcast + if len(rsps) * [req_struct["width"]] == widths: + log.debug("broadcast type") + req_struct["top_type"] = "broadcast" + + # If req width is same as total width of rsps ==> one-to-N + elif req_struct["width"] == total_width: + log.debug("one-to-N type") + req_struct["top_type"] = "one-to-N" + + # one-to-N connection is not fully populated + elif req_struct["width"] > total_width: + log.debug("partial one-to-N type") + req_struct["top_type"] = "partial-one-to-N" + req_struct["end_idx"] = len(rsps) + + # If not, error + else: + log.error("'uni' type connection {req} should be either " + "OneToN or Broadcast".format(req=req)) + error += 1 + + elif req_struct["type"] == "uni": + # one-to-one connection + req_struct["top_type"] = "broadcast" + + # If req is array, it is not allowed to have partial connections. + # Doing for loop again here: Make code separate from other checker + # for easier maintenance + total_error += error + + if error != 0: + # Skip the check + continue + + for item in topcfg["inter_module"]["top"] + list( + topcfg["inter_module"]["external"].keys()): + sig_m, sig_s, sig_i = filter_index(item) + if sig_i != -1: + log.error("{item} cannot have index".format(item=item)) + total_error += 1 + + sig_struct = find_intermodule_signal(topcfg["inter_signal"]["signals"], + sig_m, sig_s) + err, sig_struct = check_intermodule_field(sig_struct) + total_error += err + + return total_error + + +# Template functions +def im_defname(obj: OrderedDict) -> str: + """return definition struct name + + e.g. flash_ctrl::flash_req_t + """ + if obj["package"] == "": + # should be logic + return "logic" + + return "{package}::{struct}_t".format(package=obj["package"], + struct=obj["struct"]) + + +def im_netname(sig: OrderedDict, + suffix: str = "", default_name=False) -> str: + """return top signal name with index + + It also adds suffix for external signal. + + The default name input forces function to return default name, even if object + has a connection. + """ + + # Basic check and add missing fields + err, obj = check_intermodule_field(sig) + assert not err + + # Floating signals + # TODO: Find smarter way to assign default? + if "top_signame" not in obj or default_name: + if obj["act"] == "req" and suffix == "req": + return "" + if obj["act"] == "rsp" and suffix == "rsp": + return "" + if obj["act"] == "req" and suffix == "rsp": + # custom default has been specified + if obj["default"]: + return obj["default"] + if obj["package"] == "tlul_pkg" and obj["struct"] == "tl": + return "{package}::{struct}_D2H_DEFAULT".format( + package=obj["package"], struct=obj["struct"].upper()) + return "{package}::{struct}_RSP_DEFAULT".format( + package=obj["package"], struct=obj["struct"].upper()) + if obj["act"] == "rsp" and suffix == "req": + # custom default has been specified + if obj["default"]: + return obj["default"] + if obj.get("package") == "tlul_pkg" and obj["struct"] == "tl": + return "{package}::{struct}_H2D_DEFAULT".format( + package=obj["package"], struct=obj["struct"].upper()) + # default is used for dangling ports in definitions. + # the struct name already has `_req` suffix + return "{package}::{struct}_REQ_DEFAULT".format( + package=obj.get("package", ''), struct=obj["struct"].upper()) + if obj["act"] == "rcv" and suffix == "" and obj["struct"] == "logic": + # custom default has been specified + if obj["default"]: + return obj["default"] + return "'0" + if obj["act"] == "rcv" and suffix == "": + # custom default has been specified + if obj["default"]: + return obj["default"] + return "{package}::{struct}_DEFAULT".format( + package=obj["package"], struct=obj["struct"].upper()) + + return "" + + # Connected signals + assert suffix in ["", "req", "rsp"] + + suffix_s = "_{suffix}".format(suffix=suffix) if suffix != "" else suffix + + # External signal handling + if "external" in obj and obj["external"]: + pairs = { + # act , suffix: additional suffix + ("req", "req"): "_o", + ("req", "rsp"): "_i", + ("rsp", "req"): "_i", + ("rsp", "rsp"): "_o", + ("req", ""): "_o", + ("rcv", ""): "_i" + } + suffix_s += pairs[(obj['act'], suffix)] + + return "{top_signame}{suffix}{index}".format( + top_signame=obj["top_signame"], + suffix=suffix_s, + index=lib.index(obj["index"])) + + +def im_portname(obj: OrderedDict, suffix: str = "") -> str: + """return IP's port name + + e.g signame_o for requester req signal + """ + act = obj['act'] + name = obj['name'] + + if suffix == "": + suffix_s = "_o" if act == "req" else "_i" + elif suffix == "req": + suffix_s = "_o" if act == "req" else "_i" + else: + suffix_s = "_o" if act == "rsp" else "_i" + + return name + suffix_s + + +def get_dangling_im_def(objs: OrderedDict) -> str: + """return partial inter-module definitions + + Dangling intermodule connections happen when a one-to-N assignment + is not fully populated. + + This can result in two types of dangling: + - outgoing requests not used + - incoming responses not driven + + The determination of which category we fall into follows similar rules + as those used by im_netname. + + When the direction of the net is the same as the active direction of the + the connecting module, it is "unused". + + When the direction of the net is opposite of the active direction of the + the connecting module, it is "undriven". + + As an example, edn is defined as "rsp" of a "req_rsp" pair. It is also used + as the "active" module in inter-module connection. If there are not enough + connecting modules, the 'req' line is undriven, while the 'rsp' line is + unused. + + """ + unused_def = [obj for obj in objs if obj['end_idx'] > 0 and + obj['act'] == obj['suffix']] + + undriven_def = [obj for obj in objs if obj['end_idx'] > 0 and + (obj['act'] == 'req' and obj['suffix'] == 'rsp' or + obj['act'] == 'rsp' and obj['suffix'] == 'req')] + + return unused_def, undriven_def diff --git a/utils/reggen/topgen/lib.py b/utils/reggen/topgen/lib.py new file mode 100644 index 0000000..a1354fd --- /dev/null +++ b/utils/reggen/topgen/lib.py @@ -0,0 +1,497 @@ +# Copyright lowRISC contributors. +# Licensed under the Apache License, Version 2.0, see LICENSE for details. +# SPDX-License-Identifier: Apache-2.0 + +import logging as log +import re +import sys +from collections import OrderedDict +from copy import deepcopy +from pathlib import Path +from typing import Dict, List, Optional, Tuple + +import hjson + +from reggen.ip_block import IpBlock + +# Ignore flake8 warning as the function is used in the template +# disable isort formating, as conflicting with flake8 +from .intermodule import find_otherside_modules # noqa : F401 # isort:skip +from .intermodule import im_portname, im_defname, im_netname # noqa : F401 # isort:skip +from .intermodule import get_dangling_im_def # noqa : F401 # isort:skip + + +class Name: + """ + We often need to format names in specific ways; this class does so. + + To simplify parsing and reassembling of name strings, this class + stores the name parts as a canonical list of strings internally + (in self.parts). + + The "from_*" functions parse and split a name string into the canonical + list, whereas the "as_*" functions reassemble the canonical list in the + format specified. + + For example, ex = Name.from_snake_case("example_name") gets split into + ["example", "name"] internally, and ex.as_camel_case() reassembles this + internal representation into "ExampleName". + """ + def __add__(self, other): + return Name(self.parts + other.parts) + + @staticmethod + def from_snake_case(input: str) -> 'Name': + return Name(input.split("_")) + + def __init__(self, parts: List[str]): + self.parts = parts + for p in parts: + assert len(p) > 0, "cannot add zero-length name piece" + + def as_snake_case(self) -> str: + return "_".join([p.lower() for p in self.parts]) + + def as_camel_case(self) -> str: + out = "" + for p in self.parts: + # If we're about to join two parts which would introduce adjacent + # numbers, put an underscore between them. + if out[-1:].isnumeric() and p[:1].isnumeric(): + out += "_" + p + else: + out += p.capitalize() + return out + + def as_c_define(self) -> str: + return "_".join([p.upper() for p in self.parts]) + + def as_c_enum(self) -> str: + return "k" + self.as_camel_case() + + def as_c_type(self) -> str: + return self.as_snake_case() + "_t" + + def remove_part(self, part_to_remove: str) -> "Name": + return Name([p for p in self.parts if p != part_to_remove]) + + +def is_ipcfg(ip: Path) -> bool: # return bool + log.info("IP Path: %s" % repr(ip)) + ip_name = ip.parents[1].name + hjson_name = ip.name + + log.info("IP Name(%s) and HJSON name (%s)" % (ip_name, hjson_name)) + + if ip_name + ".hjson" == hjson_name or ip_name + "_reg.hjson" == hjson_name: + return True + return False + + +def search_ips(ip_path): # return list of config files + # list the every Hjson file + p = ip_path.glob('*/data/*.hjson') + + # filter only ip_name/data/ip_name{_reg|''}.hjson + ips = [x for x in p if is_ipcfg(x)] + + log.info("Filtered-in IP files: %s" % repr(ips)) + return ips + + +def is_xbarcfg(xbar_obj): + if "type" in xbar_obj and xbar_obj["type"] == "xbar": + return True + + return False + + +def get_hjsonobj_xbars(xbar_path): + """ Search crossbars Hjson files from given path. + + Search every Hjson in the directory and check Hjson type. + It could be type: "top" or type: "xbar" + returns [(name, obj), ... ] + """ + p = xbar_path.glob('*.hjson') + try: + xbar_objs = [ + hjson.load(x.open('r'), + use_decimal=True, + object_pairs_hook=OrderedDict) for x in p + ] + except ValueError: + raise SystemExit(sys.exc_info()[1]) + + xbar_objs = [x for x in xbar_objs if is_xbarcfg(x)] + + return xbar_objs + + +def get_module_by_name(top, name): + """Search in top["module"] by name + """ + module = None + for m in top["module"]: + if m["name"] == name: + module = m + break + + return module + + +def intersignal_to_signalname(top, m_name, s_name) -> str: + + # TODO: Find the signal in the `inter_module_list` and get the correct signal name + + return "{m_name}_{s_name}".format(m_name=m_name, s_name=s_name) + + +def get_package_name_by_intermodule_signal(top, struct) -> str: + """Search inter-module signal package with the struct name + + For instance, if `flash_ctrl` has inter-module signal package, + this function returns the package name + """ + instances = top["module"] + top["memory"] + + intermodule_instances = [ + x["inter_signal_list"] for x in instances if "inter_signal_list" in x + ] + + for m in intermodule_instances: + if m["name"] == struct and "package" in m: + return m["package"] + return "" + + +def get_signal_by_name(module, name): + """Return the signal struct with the type input/output/inout + """ + result = None + for s in module["available_input_list"] + module[ + "available_output_list"] + module["available_inout_list"]: + if s["name"] == name: + result = s + break + + return result + + +def add_module_prefix_to_signal(signal, module): + """Add module prefix to module signal format { name: "sig_name", width: NN } + """ + result = deepcopy(signal) + + if "name" not in signal: + raise SystemExit("signal {} doesn't have name field".format(signal)) + + result["name"] = module + "_" + signal["name"] + result["module_name"] = module + + return result + + +def get_ms_name(name): + """Split module_name.signal_name to module_name , signal_name + """ + + tokens = name.split('.') + + if len(tokens) == 0: + raise SystemExit("This to be catched in validate.py") + + module = tokens[0] + signal = None + if len(tokens) == 2: + signal = tokens[1] + + return module, signal + + +def parse_pad_field(padstr): + """Parse PadName[NN...NN] or PadName[NN] or just PadName + """ + match = re.match(r'^([A-Za-z0-9_]+)(\[([0-9]+)(\.\.([0-9]+))?\]|)', padstr) + return match.group(1), match.group(3), match.group(5) + + +def get_pad_list(padstr): + pads = [] + + pad, first, last = parse_pad_field(padstr) + if first is None: + first = 0 + last = 0 + elif last is None: + last = first + first = int(first, 0) + last = int(last, 0) + # width = first - last + 1 + + for p in range(first, last + 1): + pads.append(OrderedDict([("name", pad), ("index", p)])) + + return pads + + +# Template functions +def ljust(x, width): + return "{:<{width}}".format(x, width=width) + + +def bitarray(d, width): + """Print Systemverilog bit array + + @param d the bit width of the signal + @param width max character width of the signal group + + For instance, if width is 4, the max d value in the signal group could be + 9999. If d is 2, then this function pads 3 spaces at the end of the bit + slice. + + "[1:0] " <- d:=2, width=4 + "[9999:0]" <- max d-1 value + + If d is 1, it means array slice isn't necessary. So it returns empty spaces + """ + + if d <= 0: + log.error("lib.bitarray: Given value {} is smaller than 1".format(d)) + raise ValueError + if d == 1: + return " " * (width + 4) # [x:0] needs 4 more space than char_width + + out = "[{}:0]".format(d - 1) + return out + (" " * (width - len(str(d)))) + + +def parameterize(text): + """Return the value wrapping with quote if not integer nor bits + """ + if re.match(r'(\d+\'[hdb]\s*[0-9a-f_A-F]+|[0-9]+)', text) is None: + return "\"{}\"".format(text) + + return text + + +def index(i: int) -> str: + """Return index if it is not -1 + """ + return "[{}]".format(i) if i != -1 else "" + + +def get_clk_name(clk): + """Return the appropriate clk name + """ + if clk == 'main': + return 'clk_i' + else: + return "clk_{}_i".format(clk) + + +def get_reset_path(reset, domain, reset_cfg): + """Return the appropriate reset path given name + """ + # find matching node for reset + node_match = [node for node in reset_cfg['nodes'] if node['name'] == reset] + assert len(node_match) == 1 + reset_type = node_match[0]['type'] + + # find matching path + hier_path = "" + if reset_type == "int": + log.debug("{} used as internal reset".format(reset["name"])) + else: + hier_path = reset_cfg['hier_paths'][reset_type] + + # find domain selection + domain_sel = '' + if reset_type not in ["ext", "int"]: + domain_sel = "[rstmgr_pkg::Domain{}Sel]".format(domain) + + reset_path = "" + if reset_type == "ext": + reset_path = reset + else: + reset_path = "{}rst_{}_n{}".format(hier_path, reset, domain_sel) + + return reset_path + + +def get_unused_resets(top): + """Return dict of unused resets and associated domain + """ + unused_resets = OrderedDict() + unused_resets = { + reset['name']: domain + for reset in top['resets']['nodes'] + for domain in top['power']['domains'] + if reset['type'] == 'top' and domain not in reset['domains'] + } + + log.debug("Unused resets are {}".format(unused_resets)) + return unused_resets + + +def is_templated(module): + """Returns an indication where a particular module is templated + """ + if "attr" not in module: + return False + elif module["attr"] in ["templated"]: + return True + else: + return False + + +def is_top_reggen(module): + """Returns an indication where a particular module is NOT templated + and requires top level specific reggen + """ + if "attr" not in module: + return False + elif module["attr"] in ["reggen_top", "reggen_only"]: + return True + else: + return False + + +def is_inst(module): + """Returns an indication where a particular module should be instantiated + in the top level + """ + top_level_module = False + top_level_mem = False + + if "attr" not in module: + top_level_module = True + elif module["attr"] in ["normal", "templated", "reggen_top"]: + top_level_module = True + elif module["attr"] in ["reggen_only"]: + top_level_module = False + else: + raise ValueError('Attribute {} in {} is not valid' + .format(module['attr'], module['name'])) + + if module['type'] in ['rom', 'ram_1p_scr', 'eflash']: + top_level_mem = True + + return top_level_mem or top_level_module + + +def get_base_and_size(name_to_block: Dict[str, IpBlock], + inst: Dict[str, object], + ifname: Optional[str]) -> Tuple[int, int]: + min_device_spacing = 0x1000 + + block = name_to_block.get(inst['type']) + if block is None: + # If inst isn't the instantiation of a block, it came from some memory. + # Memories have their sizes defined, so we can just look it up there. + bytes_used = int(inst['size'], 0) + + # Memories don't have multiple or named interfaces, so this will only + # work if ifname is None. + assert ifname is None + base_addr = inst['base_addr'] + + else: + # If inst is the instantiation of some block, find the register block + # that corresponds to ifname + rb = block.reg_blocks.get(ifname) + if rb is None: + log.error('Cannot connect to non-existent {} device interface ' + 'on {!r} (an instance of the {!r} block)' + .format('default' if ifname is None else repr(ifname), + inst['name'], block.name)) + bytes_used = 0 + else: + bytes_used = 1 << rb.get_addr_width() + + base_addr = inst['base_addrs'][ifname] + + # Round up to min_device_spacing if necessary + size_byte = max(bytes_used, min_device_spacing) + + if isinstance(base_addr, str): + base_addr = int(base_addr, 0) + else: + assert isinstance(base_addr, int) + + return (base_addr, size_byte) + + +def get_io_enum_literal(sig: Dict, prefix: str) -> str: + """Returns the DIO pin enum literal with value assignment""" + name = Name.from_snake_case(prefix) + Name.from_snake_case(sig["name"]) + # In this case, the signal is a multibit signal, and hence + # we have to make the signal index part of the parameter + # name to uniquify it. + if sig['width'] > 1: + name += Name([str(sig['idx'])]) + return name.as_camel_case() + + +def make_bit_concatenation(sig_name: str, + indices: List[int], + end_indent: int) -> str: + '''Return SV code for concatenating certain indices from a signal + + sig_name is the name of the signal and indices is a non-empty list of the + indices to use, MSB first. So + + make_bit_concatenation("foo", [0, 100, 20]) + + should give + + {foo[0], foo[100], foo[20]} + + Adjacent bits turn into a range select. For example: + + make_bit_concatenation("foo", [0, 1, 2]) + + should give + + foo[0:2] + + If there are multiple ranges, they are printed one to a line. end_indent + gives the indentation of the closing brace and the range selects in between + get indented to end_indent + 2. + + ''' + assert 0 <= end_indent + + ranges = [] + cur_range_start = indices[0] + cur_range_end = indices[0] + for idx in indices[1:]: + if idx == cur_range_end + 1 and cur_range_start <= cur_range_end: + cur_range_end += 1 + continue + if idx == cur_range_end - 1 and cur_range_start >= cur_range_end: + cur_range_end -= 1 + continue + ranges.append((cur_range_start, cur_range_end)) + cur_range_start = idx + cur_range_end = idx + ranges.append((cur_range_start, cur_range_end)) + + items = [] + for range_start, range_end in ranges: + if range_start == range_end: + select = str(range_start) + else: + select = '{}:{}'.format(range_start, range_end) + items.append('{}[{}]'.format(sig_name, select)) + + if len(items) == 1: + return items[0] + + item_indent = '\n' + (' ' * (end_indent + 2)) + + acc = ['{', item_indent, items[0]] + for item in items[1:]: + acc += [',', item_indent, item] + acc += ['\n', ' ' * end_indent, '}'] + return ''.join(acc) diff --git a/utils/reggen/topgen/merge.py b/utils/reggen/topgen/merge.py new file mode 100644 index 0000000..6c0a9cd --- /dev/null +++ b/utils/reggen/topgen/merge.py @@ -0,0 +1,1081 @@ +# Copyright lowRISC contributors. +# Licensed under the Apache License, Version 2.0, see LICENSE for details. +# SPDX-License-Identifier: Apache-2.0 + +import logging as log +import random +from collections import OrderedDict +from copy import deepcopy +from math import ceil, log2 +from typing import Dict, List + +from topgen import c, lib +from reggen.ip_block import IpBlock +from reggen.params import LocalParam, Parameter, RandParameter + + +def _get_random_data_hex_literal(width): + """ Fetch 'width' random bits and return them as hex literal""" + width = int(width) + literal_str = hex(random.getrandbits(width)) + return literal_str + + +def _get_random_perm_hex_literal(numel): + """ Compute a random permutation of 'numel' elements and + return as packed hex literal""" + num_elements = int(numel) + width = int(ceil(log2(num_elements))) + idx = [x for x in range(num_elements)] + random.shuffle(idx) + literal_str = "" + for k in idx: + literal_str += format(k, '0' + str(width) + 'b') + # convert to hex for space efficiency + literal_str = hex(int(literal_str, 2)) + return literal_str + + +def elaborate_instances(top, name_to_block: Dict[str, IpBlock]): + '''Add additional fields to the elements of top['module'] + + These elements represent instantiations of IP blocks. This function adds + extra fields to them to carry across information from the IpBlock objects + that represent the blocks being instantiated. See elaborate_instance for + more details of what gets added. + + ''' + # Initialize RNG for compile-time netlist constants. + random.seed(int(top['rnd_cnst_seed'])) + + for instance in top['module']: + block = name_to_block[instance['type']] + elaborate_instance(instance, block) + + +def elaborate_instance(instance, block: IpBlock): + """Add additional fields to a single instance of a module. + + instance is the instance to be filled in. block is the block that it's + instantiating. + + Altered fields: + - param_list (list of parameters for the instance) + - inter_signal_list (list of inter-module signals) + - base_addrs (a map from interface name to its base address) + + Removed fields: + - base_addr (this is reflected in base_addrs) + + """ + mod_name = instance["name"] + + # param_list + new_params = [] + for param in block.params.by_name.values(): + if isinstance(param, LocalParam): + # Remove local parameters. + continue + + new_param = param.as_dict() + + param_expose = param.expose if isinstance(param, Parameter) else False + + # Check for security-relevant parameters that are not exposed, + # adding a top-level name. + if param.name.lower().startswith("sec") and not param_expose: + log.warning("{} has security-critical parameter {} " + "not exposed to top".format( + mod_name, param.name)) + + # Move special prefixes to the beginnining of the parameter name. + param_prefixes = ["Sec", "RndCnst"] + cc_mod_name = c.Name.from_snake_case(mod_name).as_camel_case() + name_top = cc_mod_name + param.name + for prefix in param_prefixes: + if param.name.lower().startswith(prefix.lower()): + name_top = (prefix + cc_mod_name + + param.name[len(prefix):]) + break + + new_param['name_top'] = name_top + + # Generate random bits or permutation, if needed + if isinstance(param, RandParameter): + if param.randtype == 'data': + new_default = _get_random_data_hex_literal(param.randcount) + # Effective width of the random vector + randwidth = param.randcount + else: + assert param.randtype == 'perm' + new_default = _get_random_perm_hex_literal(param.randcount) + # Effective width of the random vector + randwidth = param.randcount * ceil(log2(param.randcount)) + + new_param['default'] = new_default + new_param['randwidth'] = randwidth + + new_params.append(new_param) + + instance["param_list"] = new_params + + # These objects get added-to in place by code in intermodule.py, so we have + # to convert and copy them here. + instance["inter_signal_list"] = [s.as_dict() for s in block.inter_signals] + + # An instance must either have a 'base_addr' address or a 'base_addrs' + # address, but can't have both. + base_addrs = instance.get('base_addrs') + if base_addrs is None: + if 'base_addr' not in instance: + log.error('Instance {!r} has neither a base_addr ' + 'nor a base_addrs field.' + .format(instance['name'])) + else: + # If the instance has a base_addr field, make sure that the block + # has just one device interface. + if len(block.reg_blocks) != 1: + log.error('Instance {!r} has a base_addr field but it ' + 'instantiates the block {!r}, which has {} ' + 'device interfaces.' + .format(instance['name'], + block.name, len(block.reg_blocks))) + else: + if_name = next(iter(block.reg_blocks)) + base_addrs = {if_name: instance['base_addr']} + + # Fill in a bogus base address (we don't have proper error handling, so + # have to do *something*) + if base_addrs is None: + base_addrs = {None: 0} + + instance['base_addrs'] = base_addrs + else: + if 'base_addr' in instance: + log.error('Instance {!r} has both a base_addr ' + 'and a base_addrs field.' + .format(instance['name'])) + + # Since the instance already has a base_addrs field, make sure that + # it's got the same set of keys as the name of the interfaces in the + # block. + inst_if_names = set(base_addrs.keys()) + block_if_names = set(block.reg_blocks.keys()) + if block_if_names != inst_if_names: + log.error('Instance {!r} has a base_addrs field with keys {} ' + 'but the block it instantiates ({!r}) has device ' + 'interfaces {}.' + .format(instance['name'], inst_if_names, + block.name, block_if_names)) + + if 'base_addr' in instance: + del instance['base_addr'] + + +# TODO: Replace this part to be configurable from Hjson or template +predefined_modules = { + "corei": "rv_core_ibex", + "cored": "rv_core_ibex", + "dm_sba": "rv_dm", + "debug_mem": "rv_dm" +} + + +def is_xbar(top, name): + """Check if the given name is crossbar + """ + xbars = list(filter(lambda node: node["name"] == name, top["xbar"])) + if len(xbars) == 0: + return False, None + + if len(xbars) > 1: + log.error("Matching crossbar {} is more than one.".format(name)) + raise SystemExit() + + return True, xbars[0] + + +def xbar_addhost(top, xbar, host): + """Add host nodes information + + - xbar: bool, true if the host port is from another Xbar + """ + # Check and fetch host if exists in nodes + obj = list(filter(lambda node: node["name"] == host, xbar["nodes"])) + if len(obj) == 0: + log.warning( + "host %s doesn't exist in the node list. Using default values" % + host) + obj = OrderedDict([ + ("name", host), + ("clock", xbar['clock']), + ("reset", xbar['reset']), + ("type", "host"), + ("inst_type", ""), + ("stub", False), + # The default matches RTL default + # pipeline_byp is don't care if pipeline is false + ("pipeline", "true"), + ("pipeline_byp", "true") + ]) + xbar["nodes"].append(obj) + return + + xbar_bool, xbar_h = is_xbar(top, host) + if xbar_bool: + log.info("host {} is a crossbar. Nothing to deal with.".format(host)) + + obj[0]["xbar"] = xbar_bool + + if 'clock' not in obj[0]: + obj[0]["clock"] = xbar['clock'] + + if 'reset' not in obj[0]: + obj[0]["reset"] = xbar["reset"] + + obj[0]["stub"] = False + obj[0]["inst_type"] = predefined_modules[ + host] if host in predefined_modules else "" + obj[0]["pipeline"] = obj[0]["pipeline"] if "pipeline" in obj[0] else "true" + obj[0]["pipeline_byp"] = obj[0]["pipeline_byp"] if obj[0][ + "pipeline"] == "true" and "pipeline_byp" in obj[0] else "true" + + +def process_pipeline_var(node): + """Add device nodes pipeline / pipeline_byp information + + - Supply a default of true / true if not defined by xbar + """ + node["pipeline"] = node["pipeline"] if "pipeline" in node else "true" + node["pipeline_byp"] = node[ + "pipeline_byp"] if "pipeline_byp" in node else "true" + + +def xbar_adddevice(top: Dict[str, object], + name_to_block: Dict[str, IpBlock], + xbar: Dict[str, object], + other_xbars: List[str], + device: str) -> None: + """Add or amend an entry in xbar['nodes'] to represent the device interface + + - clock: comes from module if exist, use xbar default otherwise + - reset: comes from module if exist, use xbar default otherwise + - inst_type: comes from module or memory if exist. + - base_addr: comes from module or memory, or assume rv_plic? + - size_byte: comes from module or memory + - xbar: bool, true if the device port is another xbar + - stub: There is no backing module / memory, instead a tlul port + is created and forwarded above the current hierarchy + """ + device_parts = device.split('.', 1) + device_base = device_parts[0] + device_ifname = device_parts[1] if len(device_parts) > 1 else None + + # Try to find a block or memory instance with name device_base. Object + # names should be unique, so there should never be more than one hit. + instances = [ + node for node in top["module"] + top["memory"] + if node['name'] == device_base + ] + assert len(instances) <= 1 + inst = instances[0] if instances else None + + # Try to find a node in the crossbar called device. Node names should be + # unique, so there should never be more than one hit. + nodes = [ + node for node in xbar['nodes'] + if node['name'] == device + ] + assert len(nodes) <= 1 + node = nodes[0] if nodes else None + + log.info("Handling xbar device {} (matches instance? {}; matches node? {})" + .format(device, inst is not None, node is not None)) + + # case 1: another xbar --> check in xbar list + if node is None and device in other_xbars: + log.error( + "Another crossbar %s needs to be specified in the 'nodes' list" % + device) + return + + # If there is no module or memory with the right name, this might still be + # ok: we might be connecting to another crossbar or to a predefined module. + if inst is None: + # case 1: Crossbar handling + if device in other_xbars: + log.info( + "device {} in Xbar {} is connected to another Xbar".format( + device, xbar["name"])) + assert node is not None + node["xbar"] = True + node["stub"] = False + process_pipeline_var(node) + return + + # case 2: predefined_modules (debug_mem, rv_plic) + # TODO: Find configurable solution not from predefined but from object? + if device in predefined_modules: + if device == "debug_mem": + if node is None: + # Add new debug_mem + xbar["nodes"].append({ + "name": "debug_mem", + "type": "device", + "clock": xbar['clock'], + "reset": xbar['reset'], + "inst_type": predefined_modules["debug_mem"], + "addr_range": [OrderedDict([ + ("base_addr", top["debug_mem_base_addr"]), + ("size_byte", "0x1000"), + ])], + "xbar": False, + "stub": False, + "pipeline": "true", + "pipeline_byp": "true" + }) # yapf: disable + else: + # Update if exists + node["inst_type"] = predefined_modules["debug_mem"] + node["addr_range"] = [ + OrderedDict([("base_addr", top["debug_mem_base_addr"]), + ("size_byte", "0x1000")]) + ] + node["xbar"] = False + node["stub"] = False + process_pipeline_var(node) + else: + log.error("device %s shouldn't be host type" % device) + + return + + # case 3: not defined + # Crossbar check + log.error("Device %s doesn't exist in 'module', 'memory', predefined, " + "or as a node object" % device) + return + + # If we get here, inst points an instance of some block or memory. It + # shouldn't point at a crossbar (because that would imply a naming clash) + assert device_base not in other_xbars + base_addr, size_byte = lib.get_base_and_size(name_to_block, + inst, device_ifname) + addr_range = {"base_addr": hex(base_addr), "size_byte": hex(size_byte)} + + stub = not lib.is_inst(inst) + + if node is None: + log.error('Cannot connect to {!r} because ' + 'the crossbar defines no node for {!r}.' + .format(device, device_base)) + return + + node["inst_type"] = inst["type"] + node["addr_range"] = [addr_range] + node["xbar"] = False + node["stub"] = stub + process_pipeline_var(node) + + +def amend_xbar(top: Dict[str, object], + name_to_block: Dict[str, IpBlock], + xbar: Dict[str, object]): + """Amend crossbar informations to the top list + + Amended fields + - clock: Adopt from module clock if exists + - inst_type: Module instance some module will be hard-coded + the tool searches module list and memory list then put here + - base_addr: from top["module"] + - size: from top["module"] + """ + xbar_list = [x["name"] for x in top["xbar"]] + if not xbar["name"] in xbar_list: + log.info( + "Xbar %s doesn't belong to the top %s. Check if the xbar doesn't need" + % (xbar["name"], top["name"])) + return + + topxbar = list( + filter(lambda node: node["name"] == xbar["name"], top["xbar"]))[0] + + topxbar["connections"] = deepcopy(xbar["connections"]) + if "nodes" in xbar: + topxbar["nodes"] = deepcopy(xbar["nodes"]) + else: + topxbar["nodes"] = [] + + # xbar primary clock and reset + topxbar["clock"] = xbar["clock_primary"] + topxbar["reset"] = xbar["reset_primary"] + + # Build nodes from 'connections' + device_nodes = set() + for host, devices in xbar["connections"].items(): + # add host first + xbar_addhost(top, topxbar, host) + + # add device if doesn't exist + device_nodes.update(devices) + + other_xbars = [x["name"] + for x in top["xbar"] + if x["name"] != xbar["name"]] + + log.info(device_nodes) + for device in device_nodes: + xbar_adddevice(top, name_to_block, topxbar, other_xbars, device) + + +def xbar_cross(xbar, xbars): + """Check if cyclic dependency among xbars + + And gather the address range for device port (to another Xbar) + + @param node_name if not "", the function only search downstream + devices starting from the node_name + @param visited The nodes it visited to reach this port. If any + downstream port from node_name in visited, it means + circular path exists. It should be fatal error. + """ + # Step 1: Visit devices (gather the address range) + log.info("Processing circular path check for {}".format(xbar["name"])) + addr = [] + for node in [ + x for x in xbar["nodes"] + if x["type"] == "device" and "xbar" in x and x["xbar"] is False + ]: + addr.extend(node["addr_range"]) + + # Step 2: visit xbar device ports + xbar_nodes = [ + x for x in xbar["nodes"] + if x["type"] == "device" and "xbar" in x and x["xbar"] is True + ] + + # Now call function to get the device range + # the node["name"] is used to find the host_xbar and its connection. The + # assumption here is that there's only one connection from crossbar A to + # crossbar B. + # + # device_xbar is the crossbar has a device port with name as node["name"]. + # host_xbar is the crossbar has a host port with name as node["name"]. + for node in xbar_nodes: + xbar_addr = xbar_cross_node(node["name"], xbar, xbars, visited=[]) + node["addr_range"] = xbar_addr + + +def xbar_cross_node(node_name, device_xbar, xbars, visited=[]): + # 1. Get the connected xbar + host_xbars = [x for x in xbars if x["name"] == node_name] + assert len(host_xbars) == 1 + host_xbar = host_xbars[0] + + log.info("Processing node {} in Xbar {}.".format(node_name, + device_xbar["name"])) + result = [] # [(base_addr, size), .. ] + # Sweep the devices using connections and gather the address. + # If the device is another xbar, call recursive + visited.append(host_xbar["name"]) + devices = host_xbar["connections"][device_xbar["name"]] + + for node in host_xbar["nodes"]: + if not node["name"] in devices: + continue + if "xbar" in node and node["xbar"] is True: + if "addr_range" not in node: + # Deeper dive into another crossbar + xbar_addr = xbar_cross_node(node["name"], host_xbar, xbars, + visited) + node["addr_range"] = xbar_addr + + result.extend(deepcopy(node["addr_range"])) + + visited.pop() + + return result + + +# find the first instance name of a given type +def _find_module_name(modules, module_type): + for m in modules: + if m['type'] == module_type: + return m['name'] + + return None + + +def amend_clocks(top: OrderedDict): + """Add a list of clocks to each clock group + Amend the clock connections of each entry to reflect the actual gated clock + """ + clks_attr = top['clocks'] + clk_paths = clks_attr['hier_paths'] + clkmgr_name = _find_module_name(top['module'], 'clkmgr') + groups_in_top = [x["name"].lower() for x in clks_attr['groups']] + exported_clks = OrderedDict() + trans_eps = [] + + # Assign default parameters to source clocks + for src in clks_attr['srcs']: + if 'derived' not in src: + src['derived'] = "no" + src['params'] = OrderedDict() + + # Default assignments + for group in clks_attr['groups']: + + # if unique not defined, it defaults to 'no' + if 'unique' not in group: + group['unique'] = "no" + + # if no hardwired clocks, define an empty set + group['clocks'] = OrderedDict( + ) if 'clocks' not in group else group['clocks'] + + for ep in top['module'] + top['memory'] + top['xbar']: + + clock_connections = OrderedDict() + + # Ensure each module has a default case + export_if = ep.get('clock_reset_export', []) + + # if no clock group assigned, default is unique + ep['clock_group'] = 'secure' if 'clock_group' not in ep else ep[ + 'clock_group'] + ep_grp = ep['clock_group'] + + # if ep is in the transactional group, collect into list below + if ep['clock_group'] == 'trans': + trans_eps.append(ep['name']) + + # end point names and clocks + ep_name = ep['name'] + ep_clks = [] + + # clock group index + cg_idx = groups_in_top.index(ep_grp) + + # unique property of each group + unique = clks_attr['groups'][cg_idx]['unique'] + + # src property of each group + src = clks_attr['groups'][cg_idx]['src'] + + for port, clk in ep['clock_srcs'].items(): + ep_clks.append(clk) + + name = '' + hier_name = clk_paths[src] + + if src == 'ext': + # clock comes from top ports + if clk == 'main': + name = "i" + else: + name = "{}_i".format(clk) + + elif unique == "yes": + # new unqiue clock name + name = "{}_{}".format(clk, ep_name) + + else: + # new group clock name + name = "{}_{}".format(clk, ep_grp) + + clk_name = "clk_" + name + + # add clock to a particular group + clks_attr['groups'][cg_idx]['clocks'][clk_name] = clk + + # add clock connections + clock_connections[port] = hier_name + clk_name + + # clocks for this module are exported + for intf in export_if: + log.info("{} export clock name is {}".format(ep_name, name)) + + # create dict entry if it does not exit + if intf not in exported_clks: + exported_clks[intf] = OrderedDict() + + # if first time encounter end point, declare + if ep_name not in exported_clks[intf]: + exported_clks[intf][ep_name] = [] + + # append clocks + exported_clks[intf][ep_name].append(name) + + # Add to endpoint structure + ep['clock_connections'] = clock_connections + + # add entry to top level json + top['exported_clks'] = exported_clks + + # add entry to inter_module automatically + for intf in top['exported_clks']: + top['inter_module']['external']['{}.clocks_{}'.format( + clkmgr_name, intf)] = "clks_{}".format(intf) + + # add to intermodule connections + for ep in trans_eps: + entry = ep + ".idle" + top['inter_module']['connect']['{}.idle'.format(clkmgr_name)].append(entry) + + +def amend_resets(top): + """Generate exported reset structure and automatically connect to + intermodule. + """ + + rstmgr_name = _find_module_name(top['module'], 'rstmgr') + + # Generate exported reset list + exported_rsts = OrderedDict() + for module in top["module"]: + + # This code is here to ensure if amend_clocks/resets switched order + # everything would still work + export_if = module.get('clock_reset_export', []) + + # There may be multiple export interfaces + for intf in export_if: + # create dict entry if it does not exit + if intf not in exported_rsts: + exported_rsts[intf] = OrderedDict() + + # grab directly from reset_connections definition + rsts = [rst for rst in module['reset_connections'].values()] + exported_rsts[intf][module['name']] = rsts + + # add entry to top level json + top['exported_rsts'] = exported_rsts + + # add entry to inter_module automatically + for intf in top['exported_rsts']: + top['inter_module']['external']['{}.resets_{}'.format( + rstmgr_name, intf)] = "rsts_{}".format(intf) + """Discover the full path and selection to each reset connection. + This is done by modifying the reset connection of each end point. + """ + for end_point in top['module'] + top['memory'] + top['xbar']: + for port, net in end_point['reset_connections'].items(): + reset_path = lib.get_reset_path(net, end_point['domain'], + top['resets']) + end_point['reset_connections'][port] = reset_path + + # reset paths are still needed temporarily until host only modules are properly automated + reset_paths = OrderedDict() + reset_hiers = top["resets"]['hier_paths'] + + for reset in top["resets"]["nodes"]: + if "type" not in reset: + log.error("{} missing type field".format(reset["name"])) + return + + if reset["type"] == "top": + reset_paths[reset["name"]] = "{}rst_{}_n".format( + reset_hiers["top"], reset["name"]) + + elif reset["type"] == "ext": + reset_paths[reset["name"]] = reset_hiers["ext"] + reset['name'] + elif reset["type"] == "int": + log.info("{} used as internal reset".format(reset["name"])) + else: + log.error("{} type is invalid".format(reset["type"])) + + top["reset_paths"] = reset_paths + + return + + +def ensure_interrupt_modules(top: OrderedDict, name_to_block: Dict[str, IpBlock]): + '''Populate top['interrupt_module'] if necessary + + Do this by adding each module in top['modules'] that defines at least one + interrupt. + + ''' + if 'interrupt_module' in top: + return + + modules = [] + for module in top['module']: + block = name_to_block[module['type']] + if block.interrupts: + modules.append(module['name']) + + top['interrupt_module'] = modules + + +def amend_interrupt(top: OrderedDict, name_to_block: Dict[str, IpBlock]): + """Check interrupt_module if exists, or just use all modules + """ + ensure_interrupt_modules(top, name_to_block) + + if "interrupt" not in top or top["interrupt"] == "": + top["interrupt"] = [] + + for m in top["interrupt_module"]: + ips = list(filter(lambda module: module["name"] == m, top["module"])) + if len(ips) == 0: + log.warning( + "Cannot find IP %s which is used in the interrupt_module" % m) + continue + + ip = ips[0] + block = name_to_block[ip['type']] + + log.info("Adding interrupts from module %s" % ip["name"]) + for signal in block.interrupts: + sig_dict = signal.as_nwt_dict('interrupt') + qual = lib.add_module_prefix_to_signal(sig_dict, + module=m.lower()) + top["interrupt"].append(qual) + + +def ensure_alert_modules(top: OrderedDict, name_to_block: Dict[str, IpBlock]): + '''Populate top['alert_module'] if necessary + + Do this by adding each module in top['modules'] that defines at least one + alert. + + ''' + if 'alert_module' in top: + return + + modules = [] + for module in top['module']: + block = name_to_block[module['type']] + if block.alerts: + modules.append(module['name']) + + top['alert_module'] = modules + + +def amend_alert(top: OrderedDict, name_to_block: Dict[str, IpBlock]): + """Check interrupt_module if exists, or just use all modules + """ + ensure_alert_modules(top, name_to_block) + + if "alert" not in top or top["alert"] == "": + top["alert"] = [] + + # Find the alert handler and extract the name of its clock + alert_clock = None + for instance in top['module']: + if instance['type'].lower() == 'alert_handler': + alert_clock = instance['clock_srcs']['clk_i'] + break + assert alert_clock is not None + + for m in top["alert_module"]: + ips = list(filter(lambda module: module["name"] == m, top["module"])) + if len(ips) == 0: + log.warning("Cannot find IP %s which is used in the alert_module" % + m) + continue + + ip = ips[0] + block = name_to_block[ip['type']] + + log.info("Adding alert from module %s" % ip["name"]) + has_async_alerts = ip['clock_srcs']['clk_i'] != alert_clock + for alert in block.alerts: + alert_dict = alert.as_nwt_dict('alert') + alert_dict['async'] = '1' if has_async_alerts else '0' + qual_sig = lib.add_module_prefix_to_signal(alert_dict, + module=m.lower()) + top["alert"].append(qual_sig) + + +def amend_wkup(topcfg: OrderedDict, name_to_block: Dict[str, IpBlock]): + + pwrmgr_name = _find_module_name(topcfg['module'], 'pwrmgr') + + if "wakeups" not in topcfg or topcfg["wakeups"] == "": + topcfg["wakeups"] = [] + + # create list of wakeup signals + for m in topcfg["module"]: + log.info("Adding wakeup from module %s" % m["name"]) + block = name_to_block[m['type']] + for signal in block.wakeups: + log.info("Adding signal %s" % signal.name) + topcfg["wakeups"].append({ + 'name': signal.name, + 'width': str(signal.bits.width()), + 'module': m["name"] + }) + + # add wakeup signals to pwrmgr connections + signal_names = [ + "{}.{}".format(s["module"].lower(), s["name"].lower()) + for s in topcfg["wakeups"] + ] + + topcfg["inter_module"]["connect"]["{}.wakeups".format(pwrmgr_name)] = signal_names + log.info("Intermodule signals: {}".format( + topcfg["inter_module"]["connect"])) + + +# Handle reset requests from modules +def amend_reset_request(topcfg: OrderedDict, + name_to_block: Dict[str, IpBlock]): + + pwrmgr_name = _find_module_name(topcfg['module'], 'pwrmgr') + + if "reset_requests" not in topcfg or topcfg["reset_requests"] == "": + topcfg["reset_requests"] = [] + + # create list of reset signals + for m in topcfg["module"]: + log.info("Adding reset requests from module %s" % m["name"]) + block = name_to_block[m['type']] + for signal in block.reset_requests: + log.info("Adding signal %s" % signal.name) + topcfg["reset_requests"].append({ + 'name': signal.name, + 'width': str(signal.bits.width()), + 'module': m["name"] + }) + + # add reset requests to pwrmgr connections + signal_names = [ + "{}.{}".format(s["module"].lower(), s["name"].lower()) + for s in topcfg["reset_requests"] + ] + + topcfg["inter_module"]["connect"]["{}.rstreqs".format(pwrmgr_name)] = signal_names + log.info("Intermodule signals: {}".format( + topcfg["inter_module"]["connect"])) + + +def append_io_signal(temp: Dict, sig_inst: Dict) -> None: + '''Appends the signal to the correct list''' + if sig_inst['type'] == 'inout': + temp['inouts'].append(sig_inst) + if sig_inst['type'] == 'input': + temp['inputs'].append(sig_inst) + if sig_inst['type'] == 'output': + temp['outputs'].append(sig_inst) + + +def get_index_and_incr(ctrs: Dict, connection: str, io_dir: str) -> Dict: + '''Get correct index counter and increment''' + + if connection != 'muxed': + connection = 'dedicated' + + if io_dir in 'inout': + result = ctrs[connection]['inouts'] + ctrs[connection]['inouts'] += 1 + elif connection == 'muxed': + # For MIOs, the input/output arrays differ in RTL + # I.e., the input array contains {inputs, inouts}, whereas + # the output array contains {outputs, inouts}. + if io_dir == 'input': + result = ctrs[connection]['inputs'] + ctrs[connection]['inouts'] + ctrs[connection]['inputs'] += 1 + elif io_dir == 'output': + result = ctrs[connection]['outputs'] + ctrs[connection]['inouts'] + ctrs[connection]['outputs'] += 1 + else: + assert(0) # should not happen + else: + # For DIOs, the input/output arrays are identical in terms of index layout. + # Unused inputs are left unconnected and unused outputs are tied off. + if io_dir == 'input': + result = ctrs[connection]['inputs'] + ctrs[connection]['inouts'] + ctrs[connection]['inputs'] += 1 + elif io_dir == 'output': + result = (ctrs[connection]['outputs'] + + ctrs[connection]['inouts'] + + ctrs[connection]['inputs']) + ctrs[connection]['outputs'] += 1 + else: + assert(0) # should not happen + + return result + + +def amend_pinmux_io(top: Dict, name_to_block: Dict[str, IpBlock]): + """ Process pinmux/pinout configuration and assign available IOs + """ + pinmux = top['pinmux'] + pinout = top['pinout'] + targets = top['targets'] + + temp = {} + temp['inouts'] = [] + temp['inputs'] = [] + temp['outputs'] = [] + + for sig in pinmux['signals']: + # Get the signal information from the IP block type of this instance/ + mod_name = sig['instance'] + m = lib.get_module_by_name(top, mod_name) + + if m is None: + raise SystemExit("Module {} is not searchable.".format(mod_name)) + + block = name_to_block[m['type']] + + # If the signal is explicitly named. + if sig['port'] != '': + + # If this is a bus signal with explicit indexes. + if '[' in sig['port']: + name_split = sig['port'].split('[') + sig_name = name_split[0] + idx = int(name_split[1][:-1]) + else: + idx = -1 + sig_name = sig['port'] + + sig_inst = deepcopy(block.get_signal_by_name_as_dict(sig_name)) + + if idx >= 0 and idx >= sig_inst['width']: + raise SystemExit("Index {} is out of bounds for signal {}" + " with width {}.".format(idx, sig_name, sig_inst['width'])) + if idx == -1 and sig_inst['width'] != 1: + raise SystemExit("Bus signal {} requires an index.".format(sig_name)) + + # If we got this far we know that the signal is valid and exists. + # Augment this signal instance with additional information. + sig_inst.update({'idx': idx, + 'pad': sig['pad'], + 'attr': sig['attr'], + 'connection': sig['connection']}) + sig_inst['name'] = mod_name + '_' + sig_inst['name'] + append_io_signal(temp, sig_inst) + + # Otherwise the name is a wildcard for selecting all available IO signals + # of this block and we need to extract them here one by one signals here. + else: + sig_list = deepcopy(block.get_signals_as_list_of_dicts()) + + for sig_inst in sig_list: + # If this is a multibit signal, unroll the bus and + # generate a single bit IO signal entry for each one. + if sig_inst['width'] > 1: + for idx in range(sig_inst['width']): + sig_inst_copy = deepcopy(sig_inst) + sig_inst_copy.update({'idx': idx, + 'pad': sig['pad'], + 'attr': sig['attr'], + 'connection': sig['connection']}) + sig_inst_copy['name'] = sig['instance'] + '_' + sig_inst_copy['name'] + append_io_signal(temp, sig_inst_copy) + else: + sig_inst.update({'idx': -1, + 'pad': sig['pad'], + 'attr': sig['attr'], + 'connection': sig['connection']}) + sig_inst['name'] = sig['instance'] + '_' + sig_inst['name'] + append_io_signal(temp, sig_inst) + + # Now that we've collected all input and output signals, + # we can go through once again and stack them into one unified + # list, and calculate MIO/DIO global indices. + pinmux['ios'] = (temp['inouts'] + + temp['inputs'] + + temp['outputs']) + + # Remember these counts to facilitate the RTL generation + pinmux['io_counts'] = {'dedicated': {'inouts': 0, 'inputs': 0, 'outputs': 0, 'pads': 0}, + 'muxed': {'inouts': 0, 'inputs': 0, 'outputs': 0, 'pads': 0}} + + for sig in pinmux['ios']: + glob_idx = get_index_and_incr(pinmux['io_counts'], sig['connection'], sig['type']) + sig['glob_idx'] = glob_idx + + # Calculate global indices for pads. + j = k = 0 + for pad in pinout['pads']: + if pad['connection'] == 'muxed': + pad['idx'] = j + j += 1 + else: + pad['idx'] = k + k += 1 + pinmux['io_counts']['muxed']['pads'] = j + pinmux['io_counts']['dedicated']['pads'] = k + + # For each target configuration, calculate the special signal indices. + known_muxed_pads = {} + for pad in pinout['pads']: + if pad['connection'] == 'muxed': + known_muxed_pads[pad['name']] = pad + + known_mapped_dio_pads = {} + for sig in pinmux['ios']: + if sig['connection'] in ['muxed', 'manual']: + continue + if sig['pad'] in known_mapped_dio_pads: + raise SystemExit('Cannot have multiple IOs mapped to the same DIO pad {}' + .format(sig['pad'])) + known_mapped_dio_pads[sig['pad']] = sig + + for target in targets: + for entry in target['pinmux']['special_signals']: + # If this is a muxed pad, the resolution is + # straightforward. I.e., we just assign the MIO index. + if entry['pad'] in known_muxed_pads: + entry['idx'] = known_muxed_pads[entry['pad']]['idx'] + # Otherwise we need to find out which DIO this pad is mapped to. + # Note that we can't have special_signals that are manual, since + # there needs to exist a DIO connection. + elif entry['pad'] in known_mapped_dio_pads: + # This index refers to the stacked {dio, mio} array + # on the chip-level, hence we have to add the amount of MIO pads. + idx = (known_mapped_dio_pads[entry['pad']]['glob_idx'] + + pinmux['io_counts']['muxed']['pads']) + entry['idx'] = idx + else: + assert(0) # Entry should be guaranteed to exist at this point + + +def merge_top(topcfg: OrderedDict, + name_to_block: Dict[str, IpBlock], + xbarobjs: OrderedDict) -> OrderedDict: + + # Combine ip cfg into topcfg + elaborate_instances(topcfg, name_to_block) + + # Create clock connections for each block + # Assign clocks into appropriate groups + # Note, elaborate_instances references clock information to establish async handling + # as part of alerts. + # amend_clocks(topcfg) + + # Combine the wakeups + amend_wkup(topcfg, name_to_block) + amend_reset_request(topcfg, name_to_block) + + # Combine the interrupt (should be processed prior to xbar) + amend_interrupt(topcfg, name_to_block) + + # Combine the alert (should be processed prior to xbar) + amend_alert(topcfg, name_to_block) + + # Creates input/output list in the pinmux + log.info("Processing PINMUX") + amend_pinmux_io(topcfg, name_to_block) + + # Combine xbar into topcfg + for xbar in xbarobjs: + amend_xbar(topcfg, name_to_block, xbar) + + # 2nd phase of xbar (gathering the devices address range) + for xbar in topcfg["xbar"]: + xbar_cross(xbar, topcfg["xbar"]) + + # Add path names to declared resets. + # Declare structure for exported resets. + amend_resets(topcfg) + + # remove unwanted fields 'debug_mem_base_addr' + topcfg.pop('debug_mem_base_addr', None) + + return topcfg diff --git a/utils/reggen/topgen/templates/README.md b/utils/reggen/topgen/templates/README.md new file mode 100644 index 0000000..afd488a --- /dev/null +++ b/utils/reggen/topgen/templates/README.md @@ -0,0 +1,4 @@ +# OpenTitan topgen templates + +This directory contains templates used by topgen to assembly a chip toplevel. + diff --git a/utils/reggen/topgen/templates/chip_env_pkg__params.sv.tpl b/utils/reggen/topgen/templates/chip_env_pkg__params.sv.tpl new file mode 100644 index 0000000..3407900 --- /dev/null +++ b/utils/reggen/topgen/templates/chip_env_pkg__params.sv.tpl @@ -0,0 +1,17 @@ +// Copyright lowRISC contributors. +// Licensed under the Apache License, Version 2.0, see LICENSE for details. +// SPDX-License-Identifier: Apache-2.0 + +// Generated by topgen.py + +parameter string LIST_OF_ALERTS[] = { +% for alert in top["alert"]: + % if loop.last: + "${alert["name"]}" + % else: + "${alert["name"]}", + % endif +% endfor +}; + +parameter uint NUM_ALERTS = ${len(top["alert"])}; diff --git a/utils/reggen/topgen/templates/chiplevel.sv.tpl b/utils/reggen/topgen/templates/chiplevel.sv.tpl new file mode 100644 index 0000000..aafec5b --- /dev/null +++ b/utils/reggen/topgen/templates/chiplevel.sv.tpl @@ -0,0 +1,1218 @@ +// Copyright lowRISC contributors. +// Licensed under the Apache License, Version 2.0, see LICENSE for details. +// SPDX-License-Identifier: Apache-2.0 +${gencmd} +<% +import re +import topgen.lib as lib +from copy import deepcopy + +# Provide shortcuts for some commonly used variables +pinmux = top['pinmux'] +pinout = top['pinout'] + +num_mio_inputs = pinmux['io_counts']['muxed']['inouts'] + \ + pinmux['io_counts']['muxed']['inputs'] +num_mio_outputs = pinmux['io_counts']['muxed']['inouts'] + \ + pinmux['io_counts']['muxed']['outputs'] +num_mio_pads = pinmux['io_counts']['muxed']['pads'] + +num_dio_inputs = pinmux['io_counts']['dedicated']['inouts'] + \ + pinmux['io_counts']['dedicated']['inputs'] +num_dio_outputs = pinmux['io_counts']['dedicated']['inouts'] + \ + pinmux['io_counts']['dedicated']['outputs'] +num_dio_total = pinmux['io_counts']['dedicated']['inouts'] + \ + pinmux['io_counts']['dedicated']['inputs'] + \ + pinmux['io_counts']['dedicated']['outputs'] + +def get_dio_sig(pinmux: {}, pad: {}): + '''Get DIO signal associated with this pad or return None''' + for sig in pinmux["ios"]: + if sig["connection"] == "direct" and pad["name"] == sig["pad"]: + return sig + else: + return None + +# Modify the pad lists on the fly, based on target config +maxwidth = 0 +muxed_pads = [] +dedicated_pads = [] +k = 0 +for pad in pinout["pads"]: + if pad["connection"] == "muxed": + if pad["name"] not in target["pinout"]["remove_pads"]: + maxwidth = max(maxwidth, len(pad["name"])) + muxed_pads.append(pad) + else: + k = pad["idx"] + if pad["name"] not in target["pinout"]["remove_pads"]: + maxwidth = max(maxwidth, len(pad["name"])) + dedicated_pads.append(pad) + +for pad in target["pinout"]["add_pads"]: + # Since these additional pads have not been elaborated in the merge phase, + # we need to add their global index here. + amended_pad = deepcopy(pad) + amended_pad.update({"idx" : k}) + dedicated_pads.append(pad) + k += 1 + +num_im = sum([x["width"] if "width" in x else 1 for x in top["inter_signal"]["external"]]) + +max_sigwidth = max([x["width"] if "width" in x else 1 for x in top["pinmux"]["ios"]]) +max_sigwidth = len("{}".format(max_sigwidth)) + +clks_attr = top['clocks'] +cpu_clk = top['clocks']['hier_paths']['top'] + "clk_proc_main" +cpu_rst = top["reset_paths"]["sys"] +dm_rst = top["reset_paths"]["lc"] +esc_clk = top['clocks']['hier_paths']['top'] + "clk_io_div4_timers" +esc_rst = top["reset_paths"]["sys_io_div4"] + +unused_resets = lib.get_unused_resets(top) +unused_im_defs, undriven_im_defs = lib.get_dangling_im_def(top["inter_signal"]["definitions"]) + +%>\ +% if target["name"] != "asic": +module chip_${top["name"]}_${target["name"]} #( + // Path to a VMEM file containing the contents of the boot ROM, which will be + // baked into the FPGA bitstream. + parameter BootRomInitFile = "boot_rom_fpga_${target["name"]}.32.vmem", + // Path to a VMEM file containing the contents of the emulated OTP, which will be + // baked into the FPGA bitstream. + parameter OtpCtrlMemInitFile = "otp_img_fpga_${target["name"]}.vmem", + // TODO: Remove this 0 once infra is ready + parameter bit RomCtrlSkipCheck = 1 +) ( +% else: +module chip_${top["name"]}_${target["name"]} #( + // TODO: Remove this 0 once infra is ready + parameter bit RomCtrlSkipCheck = 1 +) ( +% endif +<% + +%>\ + // Dedicated Pads +% for pad in dedicated_pads: +<% + sig = get_dio_sig(pinmux, pad) + if sig is not None: + comment = "// Dedicated Pad for {}".format(sig["name"]) + else: + comment = "// Manual Pad" +%>\ + inout ${pad["name"]}, ${comment} +% endfor + + // Muxed Pads +% for pad in muxed_pads: + inout ${pad["name"]}${" " if loop.last else ","} // MIO Pad ${pad["idx"]} +% endfor +); + + import top_${top["name"]}_pkg::*; + import prim_pad_wrapper_pkg::*; + +% if target["pinmux"]["special_signals"]: + //////////////////////////// + // Special Signal Indices // + //////////////////////////// + + % for entry in target["pinmux"]["special_signals"]: +<% param_name = (lib.Name.from_snake_case(entry["name"]) + + lib.Name(["pad", "idx"])).as_camel_case() +%>\ + parameter int ${param_name} = ${entry["idx"]}; + % endfor +% endif + + // DFT and Debug signal positions in the pinout. + localparam pinmux_pkg::target_cfg_t PinmuxTargetCfg = '{ + tck_idx: TckPadIdx, + tms_idx: TmsPadIdx, + trst_idx: TrstNPadIdx, + tdi_idx: TdiPadIdx, + tdo_idx: TdoPadIdx, + tap_strap0_idx: Tap0PadIdx, + tap_strap1_idx: Tap1PadIdx, + dft_strap0_idx: Dft0PadIdx, + dft_strap1_idx: Dft1PadIdx, + // TODO: check whether there is a better way to pass these USB-specific params + usb_dp_idx: DioUsbdevDp, + usb_dn_idx: DioUsbdevDn, + usb_dp_pullup_idx: DioUsbdevDpPullup, + usb_dn_pullup_idx: DioUsbdevDnPullup, + // Pad types for attribute WARL behavior + dio_pad_type: { +<% + pad_attr = [] + for sig in list(reversed(top["pinmux"]["ios"])): + if sig["connection"] != "muxed": + pad_attr.append((sig['name'], sig["attr"])) +%>\ +% for name, attr in pad_attr: + ${attr}${" " if loop.last else ","} // DIO ${name} +% endfor + }, + mio_pad_type: { +<% + pad_attr = [] + for pad in list(reversed(pinout["pads"])): + if pad["connection"] == "muxed": + pad_attr.append(pad["type"]) +%>\ +% for attr in pad_attr: + ${attr}${" " if loop.last else ","} // MIO Pad ${len(pad_attr) - loop.index - 1} +% endfor + } + }; + + //////////////////////// + // Signal definitions // + //////////////////////// + + pad_attr_t [pinmux_reg_pkg::NMioPads-1:0] mio_attr; + pad_attr_t [pinmux_reg_pkg::NDioPads-1:0] dio_attr; + logic [pinmux_reg_pkg::NMioPads-1:0] mio_out; + logic [pinmux_reg_pkg::NMioPads-1:0] mio_oe; + logic [pinmux_reg_pkg::NMioPads-1:0] mio_in; + logic [pinmux_reg_pkg::NMioPads-1:0] mio_in_raw; + logic [pinmux_reg_pkg::NDioPads-1:0] dio_out; + logic [pinmux_reg_pkg::NDioPads-1:0] dio_oe; + logic [pinmux_reg_pkg::NDioPads-1:0] dio_in; + + logic unused_mio_in_raw; + assign unused_mio_in_raw = ^mio_in_raw; + + // Manual pads +% for pad in dedicated_pads: +<% + pad_prefix = pad["name"].lower() +%>\ +% if not get_dio_sig(pinmux, pad): + logic manual_in_${pad_prefix}, manual_out_${pad_prefix}, manual_oe_${pad_prefix}; +% endif +% endfor + +% for pad in dedicated_pads: +<% + pad_prefix = pad["name"].lower() +%>\ +% if not get_dio_sig(pinmux, pad): + pad_attr_t manual_attr_${pad_prefix}; +% endif +% endfor + +% if target["pinout"]["remove_pads"]: + ///////////////////////// + // Stubbed pad tie-off // + ///////////////////////// + + // Only signals going to non-custom pads need to be tied off. + logic [${len(pinout["pads"])-1}:0] unused_sig; +% for pad in pinout["pads"]: + % if pad["connection"] == 'muxed': + % if pad["name"] in target["pinout"]["remove_pads"]: + assign mio_in[${pad["idx"]}] = 1'b0; + assign unused_sig[${loop.index}] = mio_out[${pad["idx"]}] ^ mio_oe[${pad["idx"]}]; + % endif + % else: + % if pad["name"] in target["pinout"]["remove_pads"]: +<% + ## Only need to tie off if this is not a custom pad. + sig = get_dio_sig(pinmux, pad) + if sig is not None: + sig_index = lib.get_io_enum_literal(sig, 'dio') +%>\ + % if sig is not None: + assign dio_in[${lib.get_io_enum_literal(sig, 'dio')}] = 1'b0; + assign unused_sig[${loop.index}] = dio_out[${sig_index}] ^ dio_oe[${sig_index}]; + % endif + % endif + % endif +% endfor +%endif + + ////////////////////// + // Padring Instance // + ////////////////////// + +% if target["name"] == "asic": + // AST signals needed in padring + ast_pkg::ast_clks_t ast_base_clks; + logic scan_rst_n; + lc_ctrl_pkg::lc_tx_t scanmode; +% endif + + padring #( + // Padring specific counts may differ from pinmux config due + // to custom, stubbed or added pads. + .NDioPads(${len(dedicated_pads)}), + .NMioPads(${len(muxed_pads)}), +% if target["name"] == "asic": + .PhysicalPads(1), + .NIoBanks(int'(IoBankCount)), + .DioScanRole ({ +% for pad in list(reversed(dedicated_pads)): + scan_role_pkg::${lib.Name.from_snake_case('dio_pad_' + pad["name"] + '_scan_role').as_camel_case()}${"" if loop.last else ","} +% endfor + }), + .MioScanRole ({ +% for pad in list(reversed(muxed_pads)): + scan_role_pkg::${lib.Name.from_snake_case('mio_pad_' + pad["name"] + '_scan_role').as_camel_case()}${"" if loop.last else ","} +% endfor + }), + .DioPadBank ({ +% for pad in list(reversed(dedicated_pads)): + ${lib.Name.from_snake_case('io_bank_' + pad["bank"]).as_camel_case()}${" " if loop.last else ","} // ${pad['name']} +% endfor + }), + .MioPadBank ({ +% for pad in list(reversed(muxed_pads)): + ${lib.Name.from_snake_case('io_bank_' + pad["bank"]).as_camel_case()}${" " if loop.last else ","} // ${pad['name']} +% endfor + }), +% endif +\ +\ + .DioPadType ({ +% for pad in list(reversed(dedicated_pads)): + ${pad["type"]}${" " if loop.last else ","} // ${pad['name']} +% endfor + }), + .MioPadType ({ +% for pad in list(reversed(muxed_pads)): + ${pad["type"]}${" " if loop.last else ","} // ${pad['name']} +% endfor + }) + ) u_padring ( + // This is only used for scan and DFT purposes +% if target["name"] == "asic": + .clk_scan_i ( ast_base_clks.clk_sys ), + .scanmode_i ( scanmode ), +% else: + .clk_scan_i ( 1'b0 ), + .scanmode_i ( lc_ctrl_pkg::Off ), + % endif + .dio_in_raw_o ( ), + .mio_in_raw_o ( mio_in_raw ), + // Chip IOs + .dio_pad_io ({ +% for pad in list(reversed(dedicated_pads)): + ${pad["name"]}${"" if loop.last else ","} +% endfor + }), + + .mio_pad_io ({ +% for pad in list(reversed(muxed_pads)): + ${pad["name"]}${"" if loop.last else ","} +% endfor + }), + + // Core-facing +% for port in ["in_o", "out_i", "oe_i", "attr_i"]: + .dio_${port} ({ + % for pad in list(reversed(dedicated_pads)): + <% + sig = get_dio_sig(pinmux, pad) + %>\ + % if sig is None: + manual_${port[:-2]}_${pad["name"].lower()}${"" if loop.last else ","} + % else: + dio_${port[:-2]}[${lib.get_io_enum_literal(sig, 'dio')}]${"" if loop.last else ","} + % endif + % endfor + }), +% endfor + +% for port in ["in_o", "out_i", "oe_i", "attr_i"]: +<% + sig_name = 'mio_' + port[:-2] + indices = list(reversed(list(pad['idx'] for pad in muxed_pads))) +%>\ + .mio_${port} (${lib.make_bit_concatenation(sig_name, indices, 6)})${"" if loop.last else ","} +% endfor + ); + + +################################################################### +## USB for CW305 ## +################################################################### +% if target["name"] == "cw305": + // Connect the DP pad + assign dio_in[DioUsbdevDp] = manual_in_usb_p; + assign manual_out_usb_p = dio_out[DioUsbdevDp]; + assign manual_oe_usb_p = dio_oe[DioUsbdevDp]; + assign manual_attr_usb_p = dio_attr[DioUsbdevDp]; + + // Connect the DN pad + assign dio_in[DioUsbdevDn] = manual_in_usb_n; + assign manual_out_usb_n = dio_out[DioUsbdevDn]; + assign manual_oe_usb_n = dio_oe[DioUsbdevDn]; + assign manual_attr_usb_n = dio_attr[DioUsbdevDn]; + + // Connect sense pad + assign dio_in[DioUsbdevSense] = manual_in_io_usb_sense0; + assign manual_out_io_usb_sense0 = dio_out[DioUsbdevSense]; + assign manual_oe_io_usb_sense0 = dio_oe[DioUsbdevSense]; + assign manual_attr_io_sense0 = dio_attr[DioUsbdevSense]; + + // Connect DN pullup + assign dio_in[DioUsbdevDnPullup] = manual_in_io_usb_dnpullup0; + assign manual_out_io_usb_dnpullup0 = dio_out[DioUsbdevDnPullup]; + assign manual_oe_io_usb_dnpullup0 = dio_oe[DioUsbdevDnPullup]; + assign manual_attr_io_dnpullup0 = dio_attr[DioUsbdevDnPullup]; + + // Connect DP pullup + assign dio_in[DioUsbdevDpPullup] = manual_in_io_usb_dppullup0; + assign manual_out_io_usb_dppullup0 = dio_out[DioUsbdevDpPullup]; + assign manual_oe_io_usb_dppullup0 = dio_oe[DioUsbdevDpPullup]; + assign manual_attr_io_dppullup0 = dio_attr[DioUsbdevDpPullup]; + + // Tie-off unused signals + assign dio_in[DioUsbdevSe0] = 1'b0; + assign dio_in[DioUsbdevTxModeSe] = 1'b0; + assign dio_in[DioUsbdevSuspend] = 1'b0; + + logic unused_usb_sigs; + assign unused_usb_sigs = ^{ + // SE0 + dio_out[DioUsbdevSe0], + dio_oe[DioUsbdevSe0], + dio_attr[DioUsbdevSe0], + // TX Mode + dio_out[DioUsbdevTxModeSe], + dio_oe[DioUsbdevTxModeSe], + dio_attr[DioUsbdevTxModeSe], + // Suspend + dio_out[DioUsbdevSuspend], + dio_oe[DioUsbdevSuspend], + dio_attr[DioUsbdevSuspend], + // D is used as an input only + dio_out[DioUsbdevD], + dio_oe[DioUsbdevD], + dio_attr[DioUsbdevD] + }; + +% endif + +################################################################### +## USB for Nexysvideo ## +################################################################### +% if target["name"] == "nexysvideo": + + ///////////////////// + // USB Overlay Mux // + ///////////////////// + + // TODO: generalize this USB mux code and align with other tops. + + // Software can enable the pinflip feature inside usbdev. + // The example hello_usbdev does this based on GPIO0 (a switch on the board) + // + // Here, we use the state of the DN pullup to effectively undo the + // swapping such that the PCB always sees the unflipped D+/D-. We + // could do the same inside the .xdc file but then two FPGA + // bitstreams would be needed for testing. + // + // dio_in/out/oe map is: PADS <- _padring <- JTAG mux -> _umux -> USB mux -> _core + + // Split out for differential PHY testing + + // Outputs always drive and just copy the value + // Let them go to the normal place too because it won't do any harm + // and it simplifies the changes needed + + // The output enable for IO_USB_DNPULLUP0 is used to decide whether we need to undo the swapping. + logic undo_swap; + assign undo_swap = dio_oe[DioUsbdevDnPullup]; + + // GPIO[2] = Switch 2 on board is used to select using the UPHY + // Keep GPIO[1] for selecting differential in sw + logic use_uphy; + assign use_uphy = mio_in[MioPadIoa2]; + + // DioUsbdevDn + assign manual_attr_usb_n = '0; + assign manual_attr_io_uphy_dn_tx = '0; + + assign manual_out_io_uphy_dn_tx = manual_out_usb_n; + assign manual_out_usb_n = undo_swap ? dio_out[DioUsbdevDp] : + dio_out[DioUsbdevDn]; + + assign manual_oe_io_uphy_dn_tx = manual_oe_usb_n; + assign manual_oe_usb_n = undo_swap ? dio_oe[DioUsbdevDp] : + dio_oe[DioUsbdevDn]; + + assign dio_in[DioUsbdevDn] = use_uphy ? + (undo_swap ? manual_in_io_uphy_dp_rx : + manual_in_io_uphy_dn_rx) : + (undo_swap ? manual_in_usb_p : + manual_in_usb_n); + // DioUsbdevDp + assign manual_attr_usb_p = '0; + assign manual_attr_io_uphy_dp_tx = '0; + + assign manual_out_io_uphy_dp_tx = manual_out_usb_p; + assign manual_out_usb_p = undo_swap ? dio_out[DioUsbdevDn] : + dio_out[DioUsbdevDp]; + + assign manual_oe_io_uphy_dp_tx = manual_oe_usb_p; + assign manual_oe_usb_p = undo_swap ? dio_oe[DioUsbdevDn] : + dio_oe[DioUsbdevDp]; + assign dio_in[DioUsbdevDp] = use_uphy ? + (undo_swap ? manual_in_io_uphy_dn_rx : + manual_in_io_uphy_dp_rx) : + (undo_swap ? manual_in_usb_n : + manual_in_usb_p); + // DioUsbdevD + // This is not connected at the moment + logic unused_out_usb_d; + assign unused_out_usb_d = dio_out[DioUsbdevD] ^ + dio_oe[DioUsbdevD]; + assign dio_in[DioUsbdevD] = use_uphy ? + (undo_swap ? ~manual_in_io_uphy_d_rx : + manual_in_io_uphy_d_rx) : + // This is not connected at the moment + (undo_swap ? 1'b1 : 1'b0); + assign manual_out_io_uphy_d_rx = 1'b0; + assign manual_oe_io_uphy_d_rx = 1'b0; + + // DioUsbdevDnPullup + assign manual_attr_io_usb_dnpullup0 = '0; + assign manual_out_io_usb_dnpullup0 = undo_swap ? dio_out[DioUsbdevDpPullup] : + dio_out[DioUsbdevDnPullup]; + assign manual_oe_io_usb_dnpullup0 = undo_swap ? dio_oe[DioUsbdevDpPullup] : + dio_oe[DioUsbdevDnPullup]; + assign dio_in[DioUsbdevDnPullup] = manual_in_io_usb_dnpullup0; + + // DioUsbdevDpPullup + assign manual_attr_io_usb_dppullup0 = '0; + assign manual_out_io_usb_dppullup0 = undo_swap ? dio_out[DioUsbdevDnPullup] : + dio_out[DioUsbdevDpPullup]; + assign manual_oe_io_usb_dppullup0 = undo_swap ? dio_oe[DioUsbdevDnPullup] : + dio_oe[DioUsbdevDpPullup]; + assign dio_in[DioUsbdevDpPullup] = manual_in_io_usb_dppullup0; + + // DioUsbdevSense + assign manual_out_io_usb_sense0 = dio_out[DioUsbdevSense]; + assign manual_oe_io_usb_sense0 = dio_oe[DioUsbdevSense]; + assign dio_in[DioUsbdevSense] = use_uphy ? manual_in_io_uphy_sense : + manual_in_io_usb_sense0; + assign manual_out_io_uphy_sense = 1'b0; + assign manual_oe_io_uphy_sense = 1'b0; + + // DioUsbdevRxEnable + assign dio_in[DioUsbdevRxEnable] = 1'b0; + + // Additional outputs for uphy + assign manual_oe_io_uphy_dppullup = 1'b1; + assign manual_out_io_uphy_dppullup = manual_out_io_usb_dppullup0 & + manual_oe_io_usb_dppullup0; + + logic unused_in_io_uphy_dppullup; + assign unused_in_io_uphy_dppullup = manual_in_io_uphy_dppullup; + + assign manual_oe_io_uphy_oe_n = 1'b1; + assign manual_out_io_uphy_oe_n = ~manual_oe_usb_p; + + logic unused_in_io_uphy_oe_n; + assign unused_in_io_uphy_oe_n = manual_in_io_uphy_oe_n; + +% endif + +################################################################### +## ASIC ## +################################################################### +% if target["name"] == "asic": + + ////////////////////////////////// + // Manual Pad / Signal Tie-offs // + ////////////////////////////////// + + assign manual_out_por_n = 1'b0; + assign manual_oe_por_n = 1'b0; + + assign manual_out_cc1 = 1'b0; + assign manual_oe_cc1 = 1'b0; + assign manual_out_cc2 = 1'b0; + assign manual_oe_cc2 = 1'b0; + + assign manual_out_flash_test_mode0 = 1'b0; + assign manual_oe_flash_test_mode0 = 1'b0; + assign manual_out_flash_test_mode1 = 1'b0; + assign manual_oe_flash_test_mode1 = 1'b0; + assign manual_out_flash_test_volt = 1'b0; + assign manual_oe_flash_test_volt = 1'b0; + assign manual_out_otp_ext_volt = 1'b0; + assign manual_oe_otp_ext_volt = 1'b0; + + // These pad attributes currently tied off permanently (these are all input-only pads). + assign manual_attr_por_n = '0; + assign manual_attr_cc1 = '0; + assign manual_attr_cc2 = '0; + assign manual_attr_flash_test_mode0 = '0; + assign manual_attr_flash_test_mode1 = '0; + assign manual_attr_flash_test_volt = '0; + assign manual_attr_otp_ext_volt = '0; + + logic unused_manual_sigs; + assign unused_manual_sigs = ^{ + manual_in_cc2, + manual_in_cc1, + manual_in_flash_test_volt, + manual_in_flash_test_mode0, + manual_in_flash_test_mode1, + manual_in_otp_ext_volt + }; + + /////////////////////////////// + // Differential USB Receiver // + /////////////////////////////// + + // TODO: generalize this USB mux code and align with other tops. + + // Connect the DP pad + assign dio_in[DioUsbdevDp] = manual_in_usb_p; + assign manual_out_usb_p = dio_out[DioUsbdevDp]; + assign manual_oe_usb_p = dio_oe[DioUsbdevDp]; + assign manual_attr_usb_p = dio_attr[DioUsbdevDp]; + + // Connect the DN pad + assign dio_in[DioUsbdevDn] = manual_in_usb_n; + assign manual_out_usb_n = dio_out[DioUsbdevDn]; + assign manual_oe_usb_n = dio_oe[DioUsbdevDn]; + assign manual_attr_usb_n = dio_attr[DioUsbdevDn]; + + // Pullups + logic usb_pullup_p_en, usb_pullup_n_en; + assign usb_pullup_p_en = dio_out[DioUsbdevDpPullup] & dio_oe[DioUsbdevDpPullup]; + assign usb_pullup_n_en = dio_out[DioUsbdevDnPullup] & dio_oe[DioUsbdevDnPullup]; + + logic usb_rx_enable; + assign usb_rx_enable = dio_out[DioUsbdevRxEnable] & dio_oe[DioUsbdevRxEnable]; + + logic [ast_pkg::UsbCalibWidth-1:0] usb_io_pu_cal; + + // pwrmgr interface + pwrmgr_pkg::pwr_ast_req_t base_ast_pwr; + pwrmgr_pkg::pwr_ast_rsp_t ast_base_pwr; + + prim_usb_diff_rx #( + .CalibW(ast_pkg::UsbCalibWidth) + ) u_prim_usb_diff_rx ( + .input_pi ( USB_P ), + .input_ni ( USB_N ), + .input_en_i ( usb_rx_enable ), + .core_pok_i ( ast_base_pwr.main_pok ), + .pullup_p_en_i ( usb_pullup_p_en ), + .pullup_n_en_i ( usb_pullup_n_en ), + .calibration_i ( usb_io_pu_cal ), + .input_o ( dio_in[DioUsbdevD] ) + ); + + // Tie-off unused signals + assign dio_in[DioUsbdevSense] = 1'b0; + assign dio_in[DioUsbdevSe0] = 1'b0; + assign dio_in[DioUsbdevDpPullup] = 1'b0; + assign dio_in[DioUsbdevDnPullup] = 1'b0; + assign dio_in[DioUsbdevTxModeSe] = 1'b0; + assign dio_in[DioUsbdevSuspend] = 1'b0; + assign dio_in[DioUsbdevRxEnable] = 1'b0; + + logic unused_usb_sigs; + assign unused_usb_sigs = ^{ + // Sense + dio_out[DioUsbdevSense], + dio_oe[DioUsbdevSense], + dio_attr[DioUsbdevSense], + // SE0 + dio_out[DioUsbdevSe0], + dio_oe[DioUsbdevSe0], + dio_attr[DioUsbdevSe0], + // TX Mode + dio_out[DioUsbdevTxModeSe], + dio_oe[DioUsbdevTxModeSe], + dio_attr[DioUsbdevTxModeSe], + // Suspend + dio_out[DioUsbdevSuspend], + dio_oe[DioUsbdevSuspend], + dio_attr[DioUsbdevSuspend], + // Rx enable + dio_attr[DioUsbdevRxEnable], + // D is used as an input only + dio_out[DioUsbdevD], + dio_oe[DioUsbdevD], + dio_attr[DioUsbdevD], + // Pullup/down + dio_attr[DioUsbdevDpPullup], + dio_attr[DioUsbdevDnPullup] + }; + + ////////////////////// + // AST // + ////////////////////// + // TLUL interface + tlul_pkg::tl_h2d_t base_ast_bus; + tlul_pkg::tl_d2h_t ast_base_bus; + + // assorted ast status + ast_pkg::ast_status_t ast_status; + + // ast clocks and resets + logic aon_pok; + + // synchronization clocks / rests + clkmgr_pkg::clkmgr_ast_out_t clks_ast; + rstmgr_pkg::rstmgr_ast_out_t rsts_ast; + + // otp power sequence + otp_ctrl_pkg::otp_ast_req_t otp_ctrl_otp_ast_pwr_seq; + otp_ctrl_pkg::otp_ast_rsp_t otp_ctrl_otp_ast_pwr_seq_h; + + logic usb_ref_pulse; + logic usb_ref_val; + + // adc + ast_pkg::adc_ast_req_t adc_req; + ast_pkg::adc_ast_rsp_t adc_rsp; + + // entropy source interface + // The entropy source pacakge definition should eventually be moved to es + entropy_src_pkg::entropy_src_rng_req_t es_rng_req; + entropy_src_pkg::entropy_src_rng_rsp_t es_rng_rsp; + logic es_rng_fips; + + // entropy distribution network + edn_pkg::edn_req_t ast_edn_edn_req; + edn_pkg::edn_rsp_t ast_edn_edn_rsp; + + // alerts interface + ast_pkg::ast_alert_rsp_t ast_alert_rsp; + ast_pkg::ast_alert_req_t ast_alert_req; + + // Flash connections + lc_ctrl_pkg::lc_tx_t flash_bist_enable; + logic flash_power_down_h; + logic flash_power_ready_h; + + // Life cycle clock bypass req/ack + lc_ctrl_pkg::lc_tx_t ast_clk_byp_req; + lc_ctrl_pkg::lc_tx_t ast_clk_byp_ack; + + // DFT connections + logic scan_en; + lc_ctrl_pkg::lc_tx_t dft_en; + pinmux_pkg::dft_strap_test_req_t dft_strap_test; + + // Debug connections + logic [ast_pkg::Ast2PadOutWidth-1:0] ast2pinmux; + logic [ast_pkg::Pad2AstInWidth-1:0] pad2ast; + + assign pad2ast = { + mio_in_raw[MioPadIoc3], + mio_in_raw[MioPadIob8], + mio_in_raw[MioPadIob7], + mio_in_raw[MioPadIob2], + mio_in_raw[MioPadIob1], + mio_in_raw[MioPadIob0] + }; + + + // Jitter enable + logic jen; + + // reset domain connections + import rstmgr_pkg::PowerDomains; + import rstmgr_pkg::DomainAonSel; + import rstmgr_pkg::Domain0Sel; + + // external clock comes in at a fixed position + logic ext_clk; + assign ext_clk = mio_in_raw[MioPadIoc6]; + + // Memory configuration connections + ast_pkg::spm_rm_t ast_ram_1p_cfg; + ast_pkg::spm_rm_t ast_rf_cfg; + ast_pkg::spm_rm_t ast_rom_cfg; + ast_pkg::dpm_rm_t ast_ram_2p_fcfg; + ast_pkg::dpm_rm_t ast_ram_2p_lcfg; + + prim_ram_1p_pkg::ram_1p_cfg_t ram_1p_cfg; + prim_ram_2p_pkg::ram_2p_cfg_t ram_2p_cfg; + prim_rom_pkg::rom_cfg_t rom_cfg; + + // conversion from ast structure to memory centric structures + assign ram_1p_cfg = '{ + ram_cfg: '{ + cfg_en: ast_ram_1p_cfg.marg_en, + cfg: ast_ram_1p_cfg.marg + }, + rf_cfg: '{ + cfg_en: ast_rf_cfg.marg_en, + cfg: ast_rf_cfg.marg + } + }; + + assign ram_2p_cfg = '{ + a_ram_fcfg: '{ + cfg_en: ast_ram_2p_fcfg.marg_en_a, + cfg: ast_ram_2p_fcfg.marg_a + }, + a_ram_lcfg: '{ + cfg_en: ast_ram_2p_lcfg.marg_en_a, + cfg: ast_ram_2p_lcfg.marg_a + }, + b_ram_fcfg: '{ + cfg_en: ast_ram_2p_fcfg.marg_en_b, + cfg: ast_ram_2p_fcfg.marg_b + }, + b_ram_lcfg: '{ + cfg_en: ast_ram_2p_lcfg.marg_en_b, + cfg: ast_ram_2p_lcfg.marg_b + } + }; + + assign rom_cfg = '{ + cfg_en: ast_rom_cfg.marg_en, + cfg: ast_rom_cfg.marg + }; + + + // AST does not use all clocks / resets forwarded to it + logic unused_slow_clk_en; + logic unused_usb_clk_aon; + logic unused_usb_clk_io_div4; + assign unused_slow_clk_en = base_ast_pwr.slow_clk_en; + assign unused_usb_clk_aon = clks_ast.clk_ast_usbdev_aon_peri; + assign unused_usb_clk_io_div4 = clks_ast.clk_ast_usbdev_io_div4_peri; + + logic unused_usb_usb_rst; + logic [PowerDomains-1:0] unused_usb_sys_io_div4_rst; + logic [PowerDomains-1:0] unused_usb_sys_aon_rst; + logic unused_ast_sys_io_div4_rst; + logic unused_sensor_ctrl_sys_io_div4_rst; + logic unused_adc_ctrl_sys_io_div4_rst; + logic unused_entropy_sys_rst; + logic unused_edn_sys_rst; + assign unused_usb_usb_rst = rsts_ast.rst_ast_usbdev_usb_n[DomainAonSel]; + assign unused_usb_sys_io_div4_rst = rsts_ast.rst_ast_usbdev_sys_io_div4_n; + assign unused_usb_sys_aon_rst = rsts_ast.rst_ast_usbdev_sys_aon_n; + assign unused_ast_sys_io_div4_rst = + rsts_ast.rst_ast_ast_sys_io_div4_n[Domain0Sel]; + assign unused_sensor_ctrl_sys_io_div4_rst = + rsts_ast.rst_ast_sensor_ctrl_aon_sys_io_div4_n[Domain0Sel]; + assign unused_adc_ctrl_sys_io_div4_rst = + rsts_ast.rst_ast_adc_ctrl_aon_sys_io_div4_n[Domain0Sel]; + assign unused_entropy_sys_rst = rsts_ast.rst_ast_entropy_src_sys_n[DomainAonSel]; + assign unused_edn_sys_rst = rsts_ast.rst_ast_edn0_sys_n[DomainAonSel]; + + ast_pkg::ast_dif_t flash_alert; + ast_pkg::ast_dif_t otp_alert; + logic ast_init_done; + + ast #( + .EntropyStreams(ast_pkg::EntropyStreams), + .AdcChannels(ast_pkg::AdcChannels), + .AdcDataWidth(ast_pkg::AdcDataWidth), + .UsbCalibWidth(ast_pkg::UsbCalibWidth), + .Ast2PadOutWidth(ast_pkg::Ast2PadOutWidth), + .Pad2AstInWidth(ast_pkg::Pad2AstInWidth) + ) u_ast ( + // tlul + .tl_i ( base_ast_bus ), + .tl_o ( ast_base_bus ), + // init done indication + .ast_init_done_o ( ast_init_done ), + // buffered clocks & resets + // Reset domain connection is manual at the moment + .clk_ast_adc_i ( clks_ast.clk_ast_adc_ctrl_aon_io_div4_peri ), + .rst_ast_adc_ni ( rsts_ast.rst_ast_adc_ctrl_aon_sys_io_div4_n[DomainAonSel] ), + .clk_ast_alert_i ( clks_ast.clk_ast_sensor_ctrl_aon_io_div4_secure ), + .rst_ast_alert_ni ( rsts_ast.rst_ast_sensor_ctrl_aon_sys_io_div4_n[DomainAonSel] ), + .clk_ast_es_i ( clks_ast.clk_ast_edn0_main_secure ), + .rst_ast_es_ni ( rsts_ast.rst_ast_edn0_sys_n[Domain0Sel] ), + .clk_ast_rng_i ( clks_ast.clk_ast_entropy_src_main_secure ), + .rst_ast_rng_ni ( rsts_ast.rst_ast_entropy_src_sys_n[Domain0Sel] ), + .clk_ast_tlul_i ( clks_ast.clk_ast_ast_io_div4_secure ), + .rst_ast_tlul_ni ( rsts_ast.rst_ast_ast_sys_io_div4_n[DomainAonSel] ), + .clk_ast_usb_i ( clks_ast.clk_ast_usbdev_usb_peri ), + .rst_ast_usb_ni ( rsts_ast.rst_ast_usbdev_usb_n[Domain0Sel] ), + .clk_ast_ext_i ( ext_clk ), + .por_ni ( manual_in_por_n ), + // pok test for FPGA + .vcc_supp_i ( 1'b1 ), + .vcaon_supp_i ( 1'b1 ), + .vcmain_supp_i ( 1'b1 ), + .vioa_supp_i ( 1'b1 ), + .viob_supp_i ( 1'b1 ), + // pok + .vcaon_pok_o ( aon_pok ), + .vcmain_pok_o ( ast_base_pwr.main_pok ), + .vioa_pok_o ( ast_status.io_pok[0] ), + .viob_pok_o ( ast_status.io_pok[1] ), + // main regulator + .main_env_iso_en_i ( base_ast_pwr.pwr_clamp_env ), + .main_pd_ni ( base_ast_pwr.main_pd_n ), + // pdm control (flash)/otp + .flash_power_down_h_o ( flash_power_down_h ), + .flash_power_ready_h_o ( flash_power_ready_h ), + .otp_power_seq_i ( otp_ctrl_otp_ast_pwr_seq ), + .otp_power_seq_h_o ( otp_ctrl_otp_ast_pwr_seq_h ), + // system source clock + .clk_src_sys_en_i ( base_ast_pwr.core_clk_en ), + // need to add function in clkmgr + .clk_src_sys_jen_i ( jen ), + .clk_src_sys_o ( ast_base_clks.clk_sys ), + .clk_src_sys_val_o ( ast_base_pwr.core_clk_val ), + // aon source clock + .clk_src_aon_o ( ast_base_clks.clk_aon ), + .clk_src_aon_val_o ( ast_base_pwr.slow_clk_val ), + // io source clock + .clk_src_io_en_i ( base_ast_pwr.io_clk_en ), + .clk_src_io_o ( ast_base_clks.clk_io ), + .clk_src_io_val_o ( ast_base_pwr.io_clk_val ), + // usb source clock + .usb_ref_pulse_i ( usb_ref_pulse ), + .usb_ref_val_i ( usb_ref_val ), + .clk_src_usb_en_i ( base_ast_pwr.usb_clk_en ), + .clk_src_usb_o ( ast_base_clks.clk_usb ), + .clk_src_usb_val_o ( ast_base_pwr.usb_clk_val ), + // USB IO Pull-up Calibration Setting + .usb_io_pu_cal_o ( usb_io_pu_cal ), + // adc + .adc_a0_ai ( CC1 ), + .adc_a1_ai ( CC2 ), + .adc_pd_i ( adc_req.pd ), + .adc_chnsel_i ( adc_req.channel_sel ), + .adc_d_o ( adc_rsp.data ), + .adc_d_val_o ( adc_rsp.data_valid ), + // rng + .rng_en_i ( es_rng_req.rng_enable ), + .rng_fips_i ( es_rng_fips ), + .rng_val_o ( es_rng_rsp.rng_valid ), + .rng_b_o ( es_rng_rsp.rng_b ), + // entropy + .entropy_rsp_i ( ast_edn_edn_rsp ), + .entropy_req_o ( ast_edn_edn_req ), + // alerts + .fla_alert_in_i ( flash_alert ), + .otp_alert_in_i ( otp_alert ), + .alert_rsp_i ( ast_alert_rsp ), + .alert_req_o ( ast_alert_req ), + // dft + .dft_strap_test_i ( dft_strap_test ), + .lc_dft_en_i ( dft_en ), + // pinmux related + .padmux2ast_i ( pad2ast ), + .ast2padmux_o ( ast2pinmux ), + // Direct short to PAD + .pad2ast_t0_ai ( IOA4 ), + .pad2ast_t1_ai ( IOA5 ), + .ast2pad_t0_ao ( IOA2 ), + .ast2pad_t1_ao ( IOA3 ), + .lc_clk_byp_req_i ( ast_clk_byp_req ), + .lc_clk_byp_ack_o ( ast_clk_byp_ack ), + .flash_bist_en_o ( flash_bist_enable ), + // Memory configuration connections + .dpram_rmf_o ( ast_ram_2p_fcfg ), + .dpram_rml_o ( ast_ram_2p_lcfg ), + .spram_rm_o ( ast_ram_1p_cfg ), + .sprgf_rm_o ( ast_rf_cfg ), + .sprom_rm_o ( ast_rom_cfg ), + // scan + .dft_scan_md_o ( scanmode ), + .scan_shift_en_o ( scan_en ), + .scan_reset_no ( scan_rst_n ) + ); + + ////////////////////// + // Top-level design // + ////////////////////// + + top_${top["name"]} #( + .AesMasking(1'b1), + .AesSBoxImpl(aes_pkg::SBoxImplDom), + .SecAesStartTriggerDelay(0), + .SecAesAllowForcingMasks(1'b0), + .KmacEnMasking(1), // DOM AND + Masking scheme + .KmacReuseShare(0), + .SramCtrlRetAonInstrExec(0), + .SramCtrlMainInstrExec(1), + .PinmuxAonTargetCfg(PinmuxTargetCfg), + .RomCtrlSkipCheck(RomCtrlSkipCheck) + ) top_${top["name"]} ( + .rst_ni ( aon_pok ), + // ast connections + .clk_main_i ( ast_base_clks.clk_sys ), + .clk_io_i ( ast_base_clks.clk_io ), + .clk_usb_i ( ast_base_clks.clk_usb ), + .clk_aon_i ( ast_base_clks.clk_aon ), + .clks_ast_o ( clks_ast ), + .clk_main_jitter_en_o ( jen ), + .rsts_ast_o ( rsts_ast ), + .pwrmgr_ast_req_o ( base_ast_pwr ), + .pwrmgr_ast_rsp_i ( ast_base_pwr ), + .sensor_ctrl_ast_alert_req_i ( ast_alert_req ), + .sensor_ctrl_ast_alert_rsp_o ( ast_alert_rsp ), + .sensor_ctrl_ast_status_i ( ast_status ), + .usbdev_usb_ref_val_o ( usb_ref_pulse ), + .usbdev_usb_ref_pulse_o ( usb_ref_val ), + .ast_tl_req_o ( base_ast_bus ), + .ast_tl_rsp_i ( ast_base_bus ), + .adc_req_o ( adc_req ), + .adc_rsp_i ( adc_rsp ), + .ast_edn_req_i ( ast_edn_edn_req ), + .ast_edn_rsp_o ( ast_edn_edn_rsp ), + .otp_ctrl_otp_ast_pwr_seq_o ( otp_ctrl_otp_ast_pwr_seq ), + .otp_ctrl_otp_ast_pwr_seq_h_i ( otp_ctrl_otp_ast_pwr_seq_h ), + .otp_alert_o ( otp_alert ), + .flash_bist_enable_i ( flash_bist_enable ), + .flash_power_down_h_i ( flash_power_down_h ), + .flash_power_ready_h_i ( flash_power_ready_h ), + .flash_alert_o ( flash_alert ), + .es_rng_req_o ( es_rng_req ), + .es_rng_rsp_i ( es_rng_rsp ), + .es_rng_fips_o ( es_rng_fips ), + .ast_clk_byp_req_o ( ast_clk_byp_req ), + .ast_clk_byp_ack_i ( ast_clk_byp_ack ), + .ast2pinmux_i ( ast2pinmux ), + .ast_init_done_i ( ast_init_done ), + + // Flash test mode voltages + .flash_test_mode_a_io ( {FLASH_TEST_MODE1, + FLASH_TEST_MODE0} ), + .flash_test_voltage_h_io ( FLASH_TEST_VOLT ), + + // OTP external voltage + .otp_ext_voltage_h_io ( OTP_EXT_VOLT ), + + // Multiplexed I/O + .mio_in_i ( mio_in ), + .mio_out_o ( mio_out ), + .mio_oe_o ( mio_oe ), + + // Dedicated I/O + .dio_in_i ( dio_in ), + .dio_out_o ( dio_out ), + .dio_oe_o ( dio_oe ), + + // Pad attributes + .mio_attr_o ( mio_attr ), + .dio_attr_o ( dio_attr ), + + // Memory attributes + .ram_1p_cfg_i ( ram_1p_cfg ), + .ram_2p_cfg_i ( ram_2p_cfg ), + .rom_cfg_i ( rom_cfg ), + + // DFT signals + .ast_lc_dft_en_o ( dft_en ), + .dft_strap_test_o ( dft_strap_test ), + .dft_hold_tap_sel_i ( '0 ), + .scan_rst_ni ( scan_rst_n ), + .scan_en_i ( scan_en ), + .scanmode_i ( scanmode ) + ); +% endif + +################################################################### +## FPGA shared ## +################################################################### +% if target["name"] in ["cw305", "nexysvideo"]: + ////////////////// + // PLL for FPGA // + ////////////////// + + assign manual_out_io_clk = 1'b0; + assign manual_oe_io_clk = 1'b0; + assign manual_out_por_n = 1'b0; + assign manual_oe_por_n = 1'b0; + assign manual_out_io_jsrst_n = 1'b0; + assign manual_oe_io_jsrst_n = 1'b0; + + logic clk_main, clk_usb_48mhz, clk_aon, rst_n; + clkgen_xil7series # ( + .AddClkBuf(0) + ) clkgen ( + .clk_i(manual_in_io_clk), + .rst_ni(manual_in_por_n), + .jtag_srst_ni(manual_in_io_jsrst_n), + .clk_main_o(clk_main), + .clk_48MHz_o(clk_usb_48mhz), + .clk_aon_o(clk_aon), + .rst_no(rst_n) + ); + + ////////////////////// + // Top-level design // + ////////////////////// + pwrmgr_pkg::pwr_ast_rsp_t ast_base_pwr; + ast_pkg::ast_alert_req_t ast_base_alerts; + ast_pkg::ast_status_t ast_base_status; + + assign ast_base_pwr.slow_clk_val = 1'b1; + assign ast_base_pwr.core_clk_val = 1'b1; + assign ast_base_pwr.io_clk_val = 1'b1; + assign ast_base_pwr.usb_clk_val = 1'b1; + assign ast_base_pwr.main_pok = 1'b1; + + ast_pkg::ast_dif_t silent_alert = '{ + p: 1'b0, + n: 1'b1 + }; + + assign ast_base_alerts.alerts = {ast_pkg::NumAlerts{silent_alert}}; + assign ast_base_status.io_pok = {ast_pkg::NumIoRails{1'b1}}; + + // the rst_ni pin only goes to AST + // the rest of the logic generates reset based on the 'pok' signal. + // for verilator purposes, make these two the same. + lc_ctrl_pkg::lc_tx_t lc_clk_bypass; + +% if target["name"] == "cw305": + // This is used for outputting the capture trigger + logic [pinmux_reg_pkg::NMioPads-1:0] mio_out_pre; +% endif + +// TODO: align this with ASIC version to minimize the duplication. +// Also need to add AST simulation and FPGA emulation models for things like entropy source - +// otherwise Verilator / FPGA will hang. + top_${top["name"]} #( +% if target["name"] == "cw305": + .AesMasking(1'b1), + .AesSBoxImpl(aes_pkg::SBoxImplDom), + .SecAesStartTriggerDelay(40), + .SecAesAllowForcingMasks(1'b1), + .SecAesSkipPRNGReseeding(1'b1), + .IbexICache(0), + .BootRomInitFile(BootRomInitFile), +% else: + .AesMasking(1'b0), + .AesSBoxImpl(aes_pkg::SBoxImplLut), + .SecAesStartTriggerDelay(0), + .SecAesAllowForcingMasks(1'b0), + .SecAesSkipPRNGReseeding(1'b0), + .EntropySrcStub(1'b1), + .CsrngSBoxImpl(aes_pkg::SBoxImplLut), + .OtbnRegFile(otbn_pkg::RegFileFPGA), + .OtbnStub(1'b1), + .OtpCtrlMemInitFile(OtpCtrlMemInitFile), + .RomCtrlBootRomInitFile(BootRomInitFile), +% endif + .IbexRegFile(ibex_pkg::RegFileFPGA), + .IbexPipeLine(1), + .SecureIbex(0), + .SramCtrlRetAonInstrExec(0), + .SramCtrlMainInstrExec(1), + .PinmuxAonTargetCfg(PinmuxTargetCfg) + ) top_${top["name"]} ( + .rst_ni ( rst_n ), + .clk_main_i ( clk_main ), + .clk_io_i ( clk_main ), + .clk_usb_i ( clk_usb_48mhz ), + .clk_aon_i ( clk_aon ), + .clks_ast_o ( ), + .clk_main_jitter_en_o ( ), + .rsts_ast_o ( ), + .pwrmgr_ast_req_o ( ), + .pwrmgr_ast_rsp_i ( ast_base_pwr ), + .sensor_ctrl_ast_alert_req_i ( ast_base_alerts ), + .sensor_ctrl_ast_alert_rsp_o ( ), + .sensor_ctrl_ast_status_i ( ast_base_status ), + .usbdev_usb_ref_val_o ( ), + .usbdev_usb_ref_pulse_o ( ), + .ast_edn_req_i ( '0 ), + .ast_edn_rsp_o ( ), + .flash_bist_enable_i ( lc_ctrl_pkg::Off ), + .flash_power_down_h_i ( 1'b0 ), + .flash_power_ready_h_i ( 1'b1 ), + .ast_clk_byp_req_o ( lc_clk_bypass ), + .ast_clk_byp_ack_i ( lc_clk_bypass ), + +% if target["name"] != "cw305": + .ast_tl_req_o ( ), + .ast_tl_rsp_i ( '0 ), + .otp_ctrl_otp_ast_pwr_seq_o ( ), + .otp_ctrl_otp_ast_pwr_seq_h_i ( '0 ), + .otp_alert_o ( ), + .es_rng_req_o ( ), + .es_rng_rsp_i ( '0 ), + .es_rng_fips_o ( ), + .ast2pinmux_i ( '0 ), +% endif + + // Multiplexed I/O + .mio_in_i ( mio_in ), +% if target["name"] == "cw305": + .mio_out_o ( mio_out_pre ), +% else: + .mio_out_o ( mio_out ), +% endif + .mio_oe_o ( mio_oe ), + + // Dedicated I/O + .dio_in_i ( dio_in ), + .dio_out_o ( dio_out ), + .dio_oe_o ( dio_oe ), + + // Pad attributes + .mio_attr_o ( mio_attr ), + .dio_attr_o ( dio_attr ), + + // Memory attributes + .ram_1p_cfg_i ( '0 ), + .ram_2p_cfg_i ( '0 ), + .rom_cfg_i ( '0 ), + + // DFT signals + .dft_hold_tap_sel_i ( '0 ), + .scan_rst_ni ( 1'b1 ), + .scan_en_i ( 1'b0 ), + .scanmode_i ( lc_ctrl_pkg::Off ) + ); +% endif + + +################################################################### +## CW305 capture trigger ## +################################################################### +% if target["name"] == "cw305": + + ////////////////////////////////////// + // Generate precise capture trigger // + ////////////////////////////////////// + + // TODO: make this a "manual" IO specific to the CW305 target + // such that we can decouple this from the MIO signals. + localparam int MioIdxTrigger = 15; + + // To obtain a more precise capture trigger for side-channel analysis, we only forward the + // software-controlled capture trigger when the AES module is actually busy (performing + // either encryption/decryption or clearing internal registers). + // GPIO15 is used as capture trigger (mapped to IOB9 at the moment in pinmux.c). + always_comb begin : p_trigger + mio_out = mio_out_pre; + mio_out[MioIdxTrigger] = mio_out_pre[MioIdxTrigger] & + ~top_englishbreakfast.clkmgr_aon_idle[clkmgr_pkg::Aes]; + end + + ////////////////////// + // ChipWhisperer IO // + ////////////////////// + + logic unused_inputs; + assign unused_inputs = manual_in_tio_clkout ^ manual_in_io_utx_debug; + + // Clock ouput to capture board. + assign manual_out_tio_clkout = manual_in_io_clk; + assign manual_oe_tio_clkout = 1'b1; + + // UART Tx for debugging. The UART itself is connected to the capture board. + assign manual_out_io_utx_debug = top_${top["name"]}.cio_uart0_tx_d2p; + assign manual_oe_io_utx_debug = 1'b1; + +% endif + +endmodule : chip_${top["name"]}_${target["name"]} diff --git a/utils/reggen/topgen/templates/clang-format b/utils/reggen/topgen/templates/clang-format new file mode 100644 index 0000000..7cb47a7 --- /dev/null +++ b/utils/reggen/topgen/templates/clang-format @@ -0,0 +1,4 @@ +# This disables clang-format on all files in the sw/autogen directory. +# This is needed so that git-clang-format and similar scripts work. +DisableFormat: true +SortIncludes: false diff --git a/utils/reggen/topgen/templates/tb__alert_handler_connect.sv.tpl b/utils/reggen/topgen/templates/tb__alert_handler_connect.sv.tpl new file mode 100644 index 0000000..559926a --- /dev/null +++ b/utils/reggen/topgen/templates/tb__alert_handler_connect.sv.tpl @@ -0,0 +1,21 @@ +// Copyright lowRISC contributors. +// Licensed under the Apache License, Version 2.0, see LICENSE for details. +// SPDX-License-Identifier: Apache-2.0 +// +// tb__alert_handler_connect.sv is auto-generated by `topgen.py` tool + +<% + index = 0 + module_name = "" +%>\ +% for alert in top["alert"]: + % if alert["module_name"] == module_name: +<% index = index + 1 %>\ + % else: +<% + module_name = alert["module_name"] + index = 0 +%>\ + % endif +assign alert_if[${loop.index}].alert_tx = `CHIP_HIER.u_${module_name}.alert_tx_o[${index}]; +% endfor diff --git a/utils/reggen/topgen/templates/tb__xbar_connect.sv.tpl b/utils/reggen/topgen/templates/tb__xbar_connect.sv.tpl new file mode 100644 index 0000000..d491095 --- /dev/null +++ b/utils/reggen/topgen/templates/tb__xbar_connect.sv.tpl @@ -0,0 +1,124 @@ +// Copyright lowRISC contributors. +// Licensed under the Apache License, Version 2.0, see LICENSE for details. +// SPDX-License-Identifier: Apache-2.0 +// +// tb__xbar_connect generated by `topgen.py` tool +<% +from collections import OrderedDict +import topgen.lib as lib + +top_hier = 'tb.dut.top_' + top["name"] + '.' +clk_hier = top_hier + top["clocks"]["hier_paths"]["top"] + +clk_src = OrderedDict() +for xbar in top["xbar"]: + for clk, src in xbar["clock_srcs"].items(): + clk_src[clk] = src + +clk_freq = OrderedDict() +for clock in top["clocks"]["srcs"] + top["clocks"]["derived_srcs"]: + if clock["name"] in clk_src.values(): + clk_freq[clock["name"]] = clock["freq"] + +hosts = OrderedDict() +devices = OrderedDict() +for xbar in top["xbar"]: + for node in xbar["nodes"]: + if node["type"] == "host" and not node["xbar"]: + hosts[node["name"]] = "clk_" + clk_src[node["clock"]] + elif node["type"] == "device" and not node["xbar"]: + devices[node["name"]] = "clk_" + clk_src[node["clock"]] + +def escape_if_name(qual_if_name): + return qual_if_name.replace('.', '__') + +%>\ +<%text> +`define DRIVE_CHIP_TL_HOST_IF(tl_name, inst_name, sig_name) \ + force ``tl_name``_tl_if.d2h = dut.top_earlgrey.u_``inst_name``.``sig_name``_i; \ + force dut.top_earlgrey.u_``inst_name``.``sig_name``_o = ``tl_name``_tl_if.h2d; \ + force dut.top_earlgrey.u_``inst_name``.clk_i = 0; \ + uvm_config_db#(virtual tl_if)::set(null, $sformatf("*%0s*", `"tl_name`"), "vif", \ + ``tl_name``_tl_if); + +`define DRIVE_CHIP_TL_DEVICE_IF(tl_name, inst_name, sig_name) \ + force ``tl_name``_tl_if.h2d = dut.top_earlgrey.u_``inst_name``.``sig_name``_i; \ + force dut.top_earlgrey.u_``inst_name``.``sig_name``_o = ``tl_name``_tl_if.d2h; \ + force dut.top_earlgrey.u_``inst_name``.clk_i = 0; \ + uvm_config_db#(virtual tl_if)::set(null, $sformatf("*%0s*", `"tl_name`"), "vif", \ + ``tl_name``_tl_if); + +`define DRIVE_CHIP_TL_EXT_DEVICE_IF(tl_name, port_name) \ + force ``tl_name``_tl_if.h2d = dut.top_earlgrey.``port_name``_req_o; \ + force dut.top_earlgrey.``port_name``_rsp_i = ``tl_name``_tl_if.d2h; \ + uvm_config_db#(virtual tl_if)::set(null, $sformatf("*%0s*", `"tl_name`"), "vif", \ + ``tl_name``_tl_if); +\ + +% for c in clk_freq.keys(): +wire clk_${c}; +clk_rst_if clk_rst_if_${c}(.clk(clk_${c}), .rst_n(rst_n)); +% endfor + +% for i, clk in hosts.items(): +tl_if ${escape_if_name(i)}_tl_if(${clk}, rst_n); +% endfor + +% for i, clk in devices.items(): +tl_if ${escape_if_name(i)}_tl_if(${clk}, rst_n); +% endfor + +initial begin + bit xbar_mode; + void'($value$plusargs("xbar_mode=%0b", xbar_mode)); + if (xbar_mode) begin + // only enable assertions in xbar as many pins are unconnected + $assertoff(0, tb); +% for xbar in top["xbar"]: + $asserton(0, tb.dut.top_${top["name"]}.u_xbar_${xbar["name"]}); +% endfor + +% for c in clk_freq.keys(): + clk_rst_if_${c}.set_active(.drive_rst_n_val(0)); + clk_rst_if_${c}.set_freq_khz(${clk_freq[c]} / 1000); +% endfor + + // bypass clkmgr, force clocks directly +% for xbar in top["xbar"]: + % for clk, src in xbar["clock_srcs"].items(): + force ${top_hier}u_xbar_${xbar["name"]}.${clk} = clk_${src}; + % endfor +% endfor + + // bypass rstmgr, force resets directly +% for xbar in top["xbar"]: + % for rst in xbar["reset_connections"]: + force ${top_hier}u_xbar_${xbar["name"]}.${rst} = rst_n; + % endfor +% endfor + +% for xbar in top["xbar"]: + % for node in xbar["nodes"]: +<% +clk = 'clk_' + clk_src[node["clock"]] +esc_name = node['name'].replace('.', '__') +inst_sig_list = lib.find_otherside_modules(top, xbar["name"], 'tl_' + esc_name) +inst_name = inst_sig_list[0][1] +sig_name = inst_sig_list[0][2] + +%>\ + % if node["type"] == "host" and not node["xbar"]: + `DRIVE_CHIP_TL_HOST_IF(${esc_name}, ${inst_name}, ${sig_name}) + % elif node["type"] == "device" and not node["xbar"] and node["stub"]: + `DRIVE_CHIP_TL_EXT_DEVICE_IF(${esc_name}, ${inst_name}_${sig_name}) + % elif node["type"] == "device" and not node["xbar"]: + `DRIVE_CHIP_TL_DEVICE_IF(${esc_name}, ${inst_name}, ${sig_name}) + % endif + % endfor +% endfor + end +end + +`undef DRIVE_CHIP_TL_HOST_IF +`undef DRIVE_CHIP_TL_DEVICE_IF +`undef DRIVE_CHIP_TL_EXT_DEVICE_IF diff --git a/utils/reggen/topgen/templates/toplevel.c.tpl b/utils/reggen/topgen/templates/toplevel.c.tpl new file mode 100644 index 0000000..4cfbabe --- /dev/null +++ b/utils/reggen/topgen/templates/toplevel.c.tpl @@ -0,0 +1,21 @@ +// Copyright lowRISC contributors. +// Licensed under the Apache License, Version 2.0, see LICENSE for details. +// SPDX-License-Identifier: Apache-2.0 + +#include "${helper.header_path}" + +/** + * PLIC Interrupt Source to Peripheral Map + * + * This array is a mapping from `${helper.plic_interrupts.name.as_c_type()}` to + * `${helper.plic_sources.name.as_c_type()}`. + */ +${helper.plic_mapping.render_definition()} + +/** + * Alert Handler Alert Source to Peripheral Map + * + * This array is a mapping from `${helper.alert_alerts.name.as_c_type()}` to + * `${helper.alert_sources.name.as_c_type()}`. + */ +${helper.alert_mapping.render_definition()} diff --git a/utils/reggen/topgen/templates/toplevel.h.tpl b/utils/reggen/topgen/templates/toplevel.h.tpl new file mode 100644 index 0000000..24ba410 --- /dev/null +++ b/utils/reggen/topgen/templates/toplevel.h.tpl @@ -0,0 +1,201 @@ +// Copyright lowRISC contributors. +// Licensed under the Apache License, Version 2.0, see LICENSE for details. +// SPDX-License-Identifier: Apache-2.0 + +#ifndef _TOP_${top["name"].upper()}_H_ +#define _TOP_${top["name"].upper()}_H_ + +/** + * @file + * @brief Top-specific Definitions + * + * This file contains preprocessor and type definitions for use within the + * device C/C++ codebase. + * + * These definitions are for information that depends on the top-specific chip + * configuration, which includes: + * - Device Memory Information (for Peripherals and Memory) + * - PLIC Interrupt ID Names and Source Mappings + * - Alert ID Names and Source Mappings + * - Pinmux Pin/Select Names + * - Power Manager Wakeups + */ + +#ifdef __cplusplus +extern "C" { +#endif + +% for (inst_name, if_name), region in helper.devices(): +<% + if_desc = inst_name if if_name is None else '{} device on {}'.format(if_name, inst_name) + hex_base_addr = "0x{:X}u".format(region.base_addr) + hex_size_bytes = "0x{:X}u".format(region.size_bytes) + + base_addr_name = region.base_addr_name().as_c_define() + size_bytes_name = region.size_bytes_name().as_c_define() + +%>\ +/** + * Peripheral base address for ${if_desc} in top ${top["name"]}. + * + * This should be used with #mmio_region_from_addr to access the memory-mapped + * registers associated with the peripheral (usually via a DIF). + */ +#define ${base_addr_name} ${hex_base_addr} + +/** + * Peripheral size for ${if_desc} in top ${top["name"]}. + * + * This is the size (in bytes) of the peripheral's reserved memory area. All + * memory-mapped registers associated with this peripheral should have an + * address between #${base_addr_name} and + * `${base_addr_name} + ${size_bytes_name}`. + */ +#define ${size_bytes_name} ${hex_size_bytes} + +% endfor + +% for name, region in helper.memories(): +<% + hex_base_addr = "0x{:X}u".format(region.base_addr) + hex_size_bytes = "0x{:X}u".format(region.size_bytes) + + base_addr_name = region.base_addr_name().as_c_define() + size_bytes_name = region.size_bytes_name().as_c_define() + +%>\ +/** + * Memory base address for ${name} in top ${top["name"]}. + */ +#define ${base_addr_name} ${hex_base_addr} + +/** + * Memory size for ${name} in top ${top["name"]}. + */ +#define ${size_bytes_name} ${hex_size_bytes} + +% endfor + +/** + * PLIC Interrupt Source Peripheral. + * + * Enumeration used to determine which peripheral asserted the corresponding + * interrupt. + */ +${helper.plic_sources.render()} + +/** + * PLIC Interrupt Source. + * + * Enumeration of all PLIC interrupt sources. The interrupt sources belonging to + * the same peripheral are guaranteed to be consecutive. + */ +${helper.plic_interrupts.render()} + +/** + * PLIC Interrupt Source to Peripheral Map + * + * This array is a mapping from `${helper.plic_interrupts.name.as_c_type()}` to + * `${helper.plic_sources.name.as_c_type()}`. + */ +${helper.plic_mapping.render_declaration()} + +/** + * PLIC Interrupt Target. + * + * Enumeration used to determine which set of IE, CC, threshold registers to + * access for a given interrupt target. + */ +${helper.plic_targets.render()} + +/** + * Alert Handler Source Peripheral. + * + * Enumeration used to determine which peripheral asserted the corresponding + * alert. + */ +${helper.alert_sources.render()} + +/** + * Alert Handler Alert Source. + * + * Enumeration of all Alert Handler Alert Sources. The alert sources belonging to + * the same peripheral are guaranteed to be consecutive. + */ +${helper.alert_alerts.render()} + +/** + * Alert Handler Alert Source to Peripheral Map + * + * This array is a mapping from `${helper.alert_alerts.name.as_c_type()}` to + * `${helper.alert_sources.name.as_c_type()}`. + */ +${helper.alert_mapping.render_declaration()} + +#define PINMUX_MIO_PERIPH_INSEL_IDX_OFFSET 2 + +// PERIPH_INSEL ranges from 0 to NUM_MIO_PADS + 2 -1} +// 0 and 1 are tied to value 0 and 1 +#define NUM_MIO_PADS ${top["pinmux"]["io_counts"]["muxed"]["pads"]} +#define NUM_DIO_PADS ${top["pinmux"]["io_counts"]["dedicated"]["inouts"] + \ + top["pinmux"]["io_counts"]["dedicated"]["inputs"] + \ + top["pinmux"]["io_counts"]["dedicated"]["outputs"] } + +#define PINMUX_PERIPH_OUTSEL_IDX_OFFSET 3 + +/** + * Pinmux Peripheral Input. + */ +${helper.pinmux_peripheral_in.render()} + +/** + * Pinmux MIO Input Selector. + */ +${helper.pinmux_insel.render()} + +/** + * Pinmux MIO Output. + */ +${helper.pinmux_mio_out.render()} + +/** + * Pinmux Peripheral Output Selector. + */ +${helper.pinmux_outsel.render()} + +/** + * Power Manager Wakeup Signals + */ +${helper.pwrmgr_wakeups.render()} + +/** + * Reset Manager Software Controlled Resets + */ +${helper.rstmgr_sw_rsts.render()} + +/** + * Power Manager Reset Request Signals + */ +${helper.pwrmgr_reset_requests.render()} + +/** + * Clock Manager Software-Controlled ("Gated") Clocks. + * + * The Software has full control over these clocks. + */ +${helper.clkmgr_gateable_clocks.render()} + +/** + * Clock Manager Software-Hinted Clocks. + * + * The Software has partial control over these clocks. It can ask them to stop, + * but the clock manager is in control of whether the clock actually is stopped. + */ +${helper.clkmgr_hintable_clocks.render()} + +// Header Extern Guard +#ifdef __cplusplus +} // extern "C" +#endif + +#endif // _TOP_${top["name"].upper()}_H_ diff --git a/utils/reggen/topgen/templates/toplevel.sv.tpl b/utils/reggen/topgen/templates/toplevel.sv.tpl new file mode 100644 index 0000000..f0248b3 --- /dev/null +++ b/utils/reggen/topgen/templates/toplevel.sv.tpl @@ -0,0 +1,832 @@ +// Copyright lowRISC contributors. +// Licensed under the Apache License, Version 2.0, see LICENSE for details. +// SPDX-License-Identifier: Apache-2.0 +${gencmd} +<% +import re +import topgen.lib as lib + +num_mio_inputs = top['pinmux']['io_counts']['muxed']['inouts'] + \ + top['pinmux']['io_counts']['muxed']['inputs'] +num_mio_outputs = top['pinmux']['io_counts']['muxed']['inouts'] + \ + top['pinmux']['io_counts']['muxed']['outputs'] +num_mio_pads = top['pinmux']['io_counts']['muxed']['pads'] + +num_dio_inputs = top['pinmux']['io_counts']['dedicated']['inouts'] + \ + top['pinmux']['io_counts']['dedicated']['inputs'] +num_dio_outputs = top['pinmux']['io_counts']['dedicated']['inouts'] + \ + top['pinmux']['io_counts']['dedicated']['outputs'] +num_dio_total = top['pinmux']['io_counts']['dedicated']['inouts'] + \ + top['pinmux']['io_counts']['dedicated']['inputs'] + \ + top['pinmux']['io_counts']['dedicated']['outputs'] + +num_im = sum([x["width"] if "width" in x else 1 for x in top["inter_signal"]["external"]]) + +max_sigwidth = max([x["width"] if "width" in x else 1 for x in top["pinmux"]["ios"]]) +max_sigwidth = len("{}".format(max_sigwidth)) + +clks_attr = top['clocks'] +cpu_clk = top['clocks']['hier_paths']['top'] + "clk_proc_main" +cpu_rst = top["reset_paths"]["sys"] +dm_rst = top["reset_paths"]["lc"] +esc_clk = top['clocks']['hier_paths']['top'] + "clk_io_div4_timers" +esc_rst = top["reset_paths"]["sys_io_div4"] + +unused_resets = lib.get_unused_resets(top) +unused_im_defs, undriven_im_defs = lib.get_dangling_im_def(top["inter_signal"]["definitions"]) + +has_toplevel_rom = False +for m in top['memory']: + if m['type'] == 'rom': + has_toplevel_rom = True + +%>\ +module top_${top["name"]} #( + // Auto-inferred parameters +% for m in top["module"]: + % if not lib.is_inst(m): +<% continue %> + % endif + % for p_exp in filter(lambda p: p.get("expose") == "true", m["param_list"]): + parameter ${p_exp["type"]} ${p_exp["name_top"]} = ${p_exp["default"]}, + % endfor +% endfor + + // Manually defined parameters +% if has_toplevel_rom: + parameter BootRomInitFile = "", +% endif + parameter ibex_pkg::regfile_e IbexRegFile = ibex_pkg::RegFileFF, + parameter bit IbexICache = 1, + parameter bit IbexPipeLine = 0, + parameter bit SecureIbex = 1 +) ( + // Reset, clocks defined as part of intermodule + input rst_ni, + +% if num_mio_pads != 0: + // Multiplexed I/O + input ${lib.bitarray(num_mio_pads, max_sigwidth)} mio_in_i, + output logic ${lib.bitarray(num_mio_pads, max_sigwidth)} mio_out_o, + output logic ${lib.bitarray(num_mio_pads, max_sigwidth)} mio_oe_o, +% endif +% if num_dio_total != 0: + // Dedicated I/O + input ${lib.bitarray(num_dio_total, max_sigwidth)} dio_in_i, + output logic ${lib.bitarray(num_dio_total, max_sigwidth)} dio_out_o, + output logic ${lib.bitarray(num_dio_total, max_sigwidth)} dio_oe_o, +% endif + +% if "pinmux" in top: + // pad attributes to padring + output prim_pad_wrapper_pkg::pad_attr_t [pinmux_reg_pkg::NMioPads-1:0] mio_attr_o, + output prim_pad_wrapper_pkg::pad_attr_t [pinmux_reg_pkg::NDioPads-1:0] dio_attr_o, +% endif + +% if num_im != 0: + + // Inter-module Signal External type + % for sig in top["inter_signal"]["external"]: + ${"input " if sig["direction"] == "in" else "output"} ${lib.im_defname(sig)} ${lib.bitarray(sig["width"],1)} ${sig["signame"]}, + % endfor + + // Flash specific voltages + inout [1:0] flash_test_mode_a_io, + inout flash_test_voltage_h_io, + + // OTP specific voltages + inout otp_ext_voltage_h_io, + +% endif + input scan_rst_ni, // reset used for test mode + input scan_en_i, + input lc_ctrl_pkg::lc_tx_t scanmode_i // lc_ctrl_pkg::On for Scan +); + + // JTAG IDCODE for development versions of this code. + // Manufacturers of OpenTitan chips must replace this code with one of their + // own IDs. + // Field structure as defined in the IEEE 1149.1 (JTAG) specification, + // section 12.1.1. + localparam logic [31:0] JTAG_IDCODE = { + 4'h0, // Version + 16'h4F54, // Part Number: "OT" + 11'h426, // Manufacturer Identity: Google + 1'b1 // (fixed) + }; + + import tlul_pkg::*; + import top_pkg::*; + import tl_main_pkg::*; + import top_${top["name"]}_pkg::*; + // Compile-time random constants + import top_${top["name"]}_rnd_cnst_pkg::*; + + // Signals + logic [${num_mio_inputs - 1}:0] mio_p2d; + logic [${num_mio_outputs - 1}:0] mio_d2p; + logic [${num_mio_outputs - 1}:0] mio_en_d2p; + logic [${num_dio_total - 1}:0] dio_p2d; + logic [${num_dio_total - 1}:0] dio_d2p; + logic [${num_dio_total - 1}:0] dio_en_d2p; +% for m in top["module"]: + % if not lib.is_inst(m): +<% continue %> + % endif +<% + block = name_to_block[m['type']] + inouts, inputs, outputs = block.xputs +%>\ + // ${m["name"]} + % for p_in in inputs + inouts: + logic ${lib.bitarray(p_in.bits.width(), max_sigwidth)} cio_${m["name"]}_${p_in.name}_p2d; + % endfor + % for p_out in outputs + inouts: + logic ${lib.bitarray(p_out.bits.width(), max_sigwidth)} cio_${m["name"]}_${p_out.name}_d2p; + logic ${lib.bitarray(p_out.bits.width(), max_sigwidth)} cio_${m["name"]}_${p_out.name}_en_d2p; + % endfor +% endfor + + +<% + # Interrupt source 0 is tied to 0 to conform RISC-V PLIC spec. + # So, total number of interrupts are the number of entries in the list + 1 + interrupt_num = sum([x["width"] if "width" in x else 1 for x in top["interrupt"]]) + 1 +%>\ + logic [${interrupt_num-1}:0] intr_vector; + // Interrupt source list +% for m in top["module"]: +<% + block = name_to_block[m['type']] +%>\ + % if not lib.is_inst(m): +<% continue %> + % endif + % for intr in block.interrupts: + % if intr.bits.width() != 1: + logic [${intr.bits.width()-1}:0] intr_${m["name"]}_${intr.name}; + % else: + logic intr_${m["name"]}_${intr.name}; + % endif + % endfor +% endfor + + +<% add_spaces = " " * len(str((interrupt_num-1).bit_length()-1)) %> + logic [0:0]${add_spaces}irq_plic; + logic [0:0]${add_spaces}msip; + logic [${(interrupt_num-1).bit_length()-1}:0] irq_id[1]; + logic [${(interrupt_num-1).bit_length()-1}:0] unused_irq_id[1]; + + // this avoids lint errors + assign unused_irq_id = irq_id; + + // Alert list + prim_alert_pkg::alert_tx_t [alert_pkg::NAlerts-1:0] alert_tx; + prim_alert_pkg::alert_rx_t [alert_pkg::NAlerts-1:0] alert_rx; + +% if not top["alert"]: + for (genvar k = 0; k < alert_pkg::NAlerts; k++) begin : gen_alert_tie_off + // tie off if no alerts present in the system + assign alert_tx[k].alert_p = 1'b0; + assign alert_tx[k].alert_n = 1'b1; + end +% endif + +## Inter-module Definitions +% if len(top["inter_signal"]["definitions"]) >= 1: + // define inter-module signals +% endif +% for sig in top["inter_signal"]["definitions"]: + ${lib.im_defname(sig)} ${lib.bitarray(sig["width"],1)} ${sig["signame"]}; +% endfor + +## Mixed connection to port +## Index greater than 0 means a port is assigned to an inter-module array +## whereas an index of 0 means a port is directly driven by a module + // define mixed connection to port +% for port in top['inter_signal']['external']: + % if port['conn_type'] and port['index'] > 0: + % if port['direction'] == 'in': + assign ${port['netname']}[${port['index']}] = ${port['signame']}; + % else: + assign ${port['signame']} = ${port['netname']}[${port['index']}]; + % endif + % elif port['conn_type']: + % if port['direction'] == 'in': + assign ${port['netname']} = ${port['signame']}; + % else: + assign ${port['signame']} = ${port['netname']}; + % endif + % endif +% endfor + +## Partial inter-module definition tie-off + // define partial inter-module tie-off +% for sig in unused_im_defs: + % for idx in range(sig['end_idx'], sig['width']): + ${lib.im_defname(sig)} unused_${sig["signame"]}${idx}; + % endfor +% endfor + + // assign partial inter-module tie-off +% for sig in unused_im_defs: + % for idx in range(sig['end_idx'], sig['width']): + assign unused_${sig["signame"]}${idx} = ${sig["signame"]}[${idx}]; + % endfor +% endfor +% for sig in undriven_im_defs: + % for idx in range(sig['end_idx'], sig['width']): + assign ${sig["signame"]}[${idx}] = ${sig["default"]}; + % endfor +% endfor + +## Inter-module signal collection + + // Unused reset signals +% for k, v in unused_resets.items(): + logic unused_d${v.lower()}_rst_${k}; +% endfor +% for k, v in unused_resets.items(): + assign unused_d${v.lower()}_rst_${k} = ${lib.get_reset_path(k, v, top['resets'])}; +% endfor + + // Non-debug module reset == reset for everything except for the debug module + logic ndmreset_req; + + // debug request from rv_dm to core + logic debug_req; + + // processor core + rv_core_ibex #( + .PMPEnable (1), + .PMPGranularity (0), // 2^(PMPGranularity+2) == 4 byte granularity + .PMPNumRegions (16), + .MHPMCounterNum (10), + .MHPMCounterWidth (32), + .RV32E (0), + .RV32M (ibex_pkg::RV32MSingleCycle), + .RV32B (ibex_pkg::RV32BNone), + .RegFile (IbexRegFile), + .BranchTargetALU (1), + .WritebackStage (1), + .ICache (IbexICache), + .ICacheECC (1), + .BranchPredictor (0), + .DbgTriggerEn (1), + .SecureIbex (SecureIbex), + .DmHaltAddr (ADDR_SPACE_DEBUG_MEM + dm::HaltAddress[31:0]), + .DmExceptionAddr (ADDR_SPACE_DEBUG_MEM + dm::ExceptionAddress[31:0]), + .PipeLine (IbexPipeLine) + ) u_rv_core_ibex ( + // clock and reset + .clk_i (${cpu_clk}), + .rst_ni (${cpu_rst}[rstmgr_pkg::Domain0Sel]), + .clk_esc_i (${esc_clk}), + .rst_esc_ni (${esc_rst}[rstmgr_pkg::Domain0Sel]), + .ram_cfg_i (ast_ram_1p_cfg), + // static pinning + .hart_id_i (32'b0), + .boot_addr_i (ADDR_SPACE_ROM_CTRL__ROM), + // TL-UL buses + .tl_i_o (main_tl_corei_req), + .tl_i_i (main_tl_corei_rsp), + .tl_d_o (main_tl_cored_req), + .tl_d_i (main_tl_cored_rsp), + // interrupts + .irq_software_i (msip), + .irq_timer_i (intr_rv_timer_timer_expired_0_0), + .irq_external_i (irq_plic), + // escalation input from alert handler (NMI) + .esc_tx_i (alert_handler_esc_tx[0]), + .esc_rx_o (alert_handler_esc_rx[0]), + // debug interface + .debug_req_i (debug_req), + // crash dump interface + .crash_dump_o (rv_core_ibex_crash_dump), + // CPU control signals + .lc_cpu_en_i (lc_ctrl_lc_cpu_en), + .pwrmgr_cpu_en_i (pwrmgr_aon_fetch_en), + .core_sleep_o (pwrmgr_aon_pwr_cpu.core_sleeping), + + // dft bypass + .scan_rst_ni, + .scanmode_i + ); + + // Debug Module (RISC-V Debug Spec 0.13) + // + + rv_dm #( + .NrHarts (1), + .IdcodeValue (JTAG_IDCODE) + ) u_dm_top ( + .clk_i (${cpu_clk}), + .rst_ni (${dm_rst}[rstmgr_pkg::Domain0Sel]), + .hw_debug_en_i (lc_ctrl_lc_hw_debug_en), + .scanmode_i, + .scan_rst_ni, + .ndmreset_o (ndmreset_req), + .dmactive_o (), + .debug_req_o (debug_req), + .unavailable_i (1'b0), + + // bus device with debug memory (for execution-based debug) + .tl_d_i (main_tl_debug_mem_req), + .tl_d_o (main_tl_debug_mem_rsp), + + // bus host (for system bus accesses, SBA) + .tl_h_o (main_tl_dm_sba_req), + .tl_h_i (main_tl_dm_sba_rsp), + + //JTAG + .jtag_req_i (pinmux_aon_rv_jtag_req), + .jtag_rsp_o (pinmux_aon_rv_jtag_rsp) + ); + + assign rstmgr_aon_cpu.ndmreset_req = ndmreset_req; + assign rstmgr_aon_cpu.rst_cpu_n = ${top["reset_paths"]["sys"]}[rstmgr_pkg::Domain0Sel]; + +## Memory Instantiation +% for m in top["memory"]: +<% + resets = m['reset_connections'] + clocks = m['clock_connections'] +%>\ + % if m["type"] == "ram_1p_scr": +<% + data_width = int(top["datawidth"]) + full_data_width = data_width + int(m["integ_width"]) + dw_byte = data_width // 8 + addr_width = ((int(m["size"], 0) // dw_byte) -1).bit_length() + sram_depth = (int(m["size"], 0) // dw_byte) + max_char = len(str(max(data_width, addr_width))) +%>\ + // sram device + logic ${lib.bitarray(1, max_char)} ${m["name"]}_req; + logic ${lib.bitarray(1, max_char)} ${m["name"]}_gnt; + logic ${lib.bitarray(1, max_char)} ${m["name"]}_we; + logic ${lib.bitarray(1, max_char)} ${m["name"]}_intg_err; + logic ${lib.bitarray(addr_width, max_char)} ${m["name"]}_addr; + logic ${lib.bitarray(full_data_width, max_char)} ${m["name"]}_wdata; + logic ${lib.bitarray(full_data_width, max_char)} ${m["name"]}_wmask; + logic ${lib.bitarray(full_data_width, max_char)} ${m["name"]}_rdata; + logic ${lib.bitarray(1, max_char)} ${m["name"]}_rvalid; + logic ${lib.bitarray(2, max_char)} ${m["name"]}_rerror; + + tlul_adapter_sram #( + .SramAw(${addr_width}), + .SramDw(${data_width}), + .Outstanding(2), + .CmdIntgCheck(1), + .EnableRspIntgGen(1), + .EnableDataIntgGen(0), + .EnableDataIntgPt(1) + ) u_tl_adapter_${m["name"]} ( + % for key in clocks: + .${key} (${clocks[key]}), + % endfor + % for key, value in resets.items(): + .${key} (${value}), + % endfor + .tl_i (${m["name"]}_tl_req), + .tl_o (${m["name"]}_tl_rsp), + .en_ifetch_i (${m["inter_signal_list"][3]["top_signame"]}), + .req_o (${m["name"]}_req), + .req_type_o (), + .gnt_i (${m["name"]}_gnt), + .we_o (${m["name"]}_we), + .addr_o (${m["name"]}_addr), + .wdata_o (${m["name"]}_wdata), + .wmask_o (${m["name"]}_wmask), + .intg_error_o(${m["name"]}_intg_err), + .rdata_i (${m["name"]}_rdata), + .rvalid_i (${m["name"]}_rvalid), + .rerror_i (${m["name"]}_rerror) + ); + +<% +mem_name = m["name"].split("_") +mem_name = lib.Name(mem_name[1:]) +%>\ + prim_ram_1p_scr #( + .Width(${full_data_width}), + .Depth(${sram_depth}), + .EnableParity(0), + .LfsrWidth(${data_width}), + .StatePerm(RndCnstSramCtrl${mem_name.as_camel_case()}SramLfsrPerm), + .DataBitsPerMask(1), // TODO: Temporary change to ensure byte updates can still be done + .DiffWidth(8) + ) u_ram1p_${m["name"]} ( + % for key in clocks: + .${key} (${clocks[key]}), + % endfor + % for key, value in resets.items(): + .${key} (${value}), + % endfor + + .key_valid_i (${m["inter_signal_list"][1]["top_signame"]}_req.valid), + .key_i (${m["inter_signal_list"][1]["top_signame"]}_req.key), + .nonce_i (${m["inter_signal_list"][1]["top_signame"]}_req.nonce), + .init_req_i (${m["inter_signal_list"][2]["top_signame"]}_req.req), + .init_seed_i (${m["inter_signal_list"][2]["top_signame"]}_req.seed), + .init_ack_o (${m["inter_signal_list"][2]["top_signame"]}_rsp.ack), + + .req_i (${m["name"]}_req), + .intg_error_i(${m["name"]}_intg_err), + .gnt_o (${m["name"]}_gnt), + .write_i (${m["name"]}_we), + .addr_i (${m["name"]}_addr), + .wdata_i (${m["name"]}_wdata), + .wmask_i (${m["name"]}_wmask), + .rdata_o (${m["name"]}_rdata), + .rvalid_o (${m["name"]}_rvalid), + .rerror_o (${m["name"]}_rerror), + .raddr_o (${m["inter_signal_list"][1]["top_signame"]}_rsp.raddr), + .intg_error_o(${m["inter_signal_list"][4]["top_signame"]}), + .cfg_i (ram_1p_cfg_i) + ); + + assign ${m["inter_signal_list"][1]["top_signame"]}_rsp.rerror = ${m["name"]}_rerror; + + % elif m["type"] == "rom": +<% + data_width = int(top["datawidth"]) + full_data_width = data_width + int(m['integ_width']) + dw_byte = data_width // 8 + addr_width = ((int(m["size"], 0) // dw_byte) -1).bit_length() + rom_depth = (int(m["size"], 0) // dw_byte) + max_char = len(str(max(data_width, addr_width))) +%>\ + // ROM device + logic ${lib.bitarray(1, max_char)} ${m["name"]}_req; + logic ${lib.bitarray(addr_width, max_char)} ${m["name"]}_addr; + logic ${lib.bitarray(full_data_width, max_char)} ${m["name"]}_rdata; + logic ${lib.bitarray(1, max_char)} ${m["name"]}_rvalid; + + tlul_adapter_sram #( + .SramAw(${addr_width}), + .SramDw(${data_width}), + .Outstanding(2), + .ErrOnWrite(1), + .CmdIntgCheck(1), + .EnableRspIntgGen(1), + .EnableDataIntgGen(1) // TODO: Needs to be updated for intgerity passthrough + ) u_tl_adapter_${m["name"]} ( + % for key in clocks: + .${key} (${clocks[key]}), + % endfor + % for key, value in resets.items(): + .${key} (${value}), + % endfor + + .tl_i (${m["name"]}_tl_req), + .tl_o (${m["name"]}_tl_rsp), + .en_ifetch_i (tlul_pkg::InstrEn), + .req_o (${m["name"]}_req), + .req_type_o (), + .gnt_i (1'b1), // Always grant as only one requester exists + .we_o (), + .addr_o (${m["name"]}_addr), + .wdata_o (), + .wmask_o (), + .intg_error_o(), // Connect to ROM checker and ROM scramble later + .rdata_i (${m["name"]}_rdata[${data_width-1}:0]), + .rvalid_i (${m["name"]}_rvalid), + .rerror_i (2'b00) + ); + + prim_rom_adv #( + .Width(${full_data_width}), + .Depth(${rom_depth}), + .MemInitFile(BootRomInitFile) + ) u_rom_${m["name"]} ( + % for key in clocks: + .${key} (${clocks[key]}), + % endfor + % for key, value in resets.items(): + .${key} (${value}), + % endfor + .req_i (${m["name"]}_req), + .addr_i (${m["name"]}_addr), + .rdata_o (${m["name"]}_rdata), + .rvalid_o (${m["name"]}_rvalid), + .cfg_i (rom_cfg_i) + ); + + % elif m["type"] == "eflash": + + // host to flash communication + logic flash_host_req; + tlul_pkg::tl_type_e flash_host_req_type; + logic flash_host_req_rdy; + logic flash_host_req_done; + logic flash_host_rderr; + logic [flash_ctrl_pkg::BusWidth-1:0] flash_host_rdata; + logic [flash_ctrl_pkg::BusAddrW-1:0] flash_host_addr; + logic flash_host_intg_err; + + tlul_adapter_sram #( + .SramAw(flash_ctrl_pkg::BusAddrW), + .SramDw(flash_ctrl_pkg::BusWidth), + .Outstanding(2), + .ByteAccess(0), + .ErrOnWrite(1), + .CmdIntgCheck(1), + .EnableRspIntgGen(1), + .EnableDataIntgGen(1) + ) u_tl_adapter_${m["name"]} ( + % for key in clocks: + .${key} (${clocks[key]}), + % endfor + % for key, value in resets.items(): + .${key} (${value}), + % endfor + + .tl_i (${m["name"]}_tl_req), + .tl_o (${m["name"]}_tl_rsp), + .en_ifetch_i (tlul_pkg::InstrEn), // tie this to secure boot somehow + .req_o (flash_host_req), + .req_type_o (flash_host_req_type), + .gnt_i (flash_host_req_rdy), + .we_o (), + .addr_o (flash_host_addr), + .wdata_o (), + .wmask_o (), + .intg_error_o(flash_host_intg_err), + .rdata_i (flash_host_rdata), + .rvalid_i (flash_host_req_done), + .rerror_i ({flash_host_rderr,1'b0}) + ); + + flash_phy u_flash_${m["name"]} ( + % for key in clocks: + .${key} (${clocks[key]}), + % endfor + % for key, value in resets.items(): + .${key} (${value}), + % endfor + .host_req_i (flash_host_req), + .host_intg_err_i (flash_host_intg_err), + .host_req_type_i (flash_host_req_type), + .host_addr_i (flash_host_addr), + .host_req_rdy_o (flash_host_req_rdy), + .host_req_done_o (flash_host_req_done), + .host_rderr_o (flash_host_rderr), + .host_rdata_o (flash_host_rdata), + .flash_ctrl_i (${m["inter_signal_list"][0]["top_signame"]}_req), + .flash_ctrl_o (${m["inter_signal_list"][0]["top_signame"]}_rsp), + .lc_nvm_debug_en_i (${m["inter_signal_list"][2]["top_signame"]}), + .flash_bist_enable_i, + .flash_power_down_h_i, + .flash_power_ready_h_i, + .flash_test_mode_a_io, + .flash_test_voltage_h_io, + .flash_alert_o, + .scanmode_i, + .scan_en_i, + .scan_rst_ni + ); + + % else: + // flash memory is embedded within controller + % endif +% endfor +## Peripheral Instantiation + +<% alert_idx = 0 %> +% for m in top["module"]: +<% +if not lib.is_inst(m): + continue + +block = name_to_block[m['type']] +inouts, inputs, outputs = block.xputs + +port_list = inputs + outputs + inouts +max_sigwidth = max(len(x.name) for x in port_list) if port_list else 0 +max_intrwidth = (max(len(x.name) for x in block.interrupts) + if block.interrupts else 0) +%>\ + % if m["param_list"] or block.alerts: + ${m["type"]} #( + % if block.alerts: +<% +w = len(block.alerts) +slice = str(alert_idx+w-1) + ":" + str(alert_idx) +%>\ + .AlertAsyncOn(alert_handler_reg_pkg::AsyncOn[${slice}])${"," if m["param_list"] else ""} + % endif + % for i in m["param_list"]: + .${i["name"]}(${i["name_top" if i.get("expose") == "true" or i.get("randtype", "none") != "none" else "default"]})${"," if not loop.last else ""} + % endfor + ) u_${m["name"]} ( + % else: + ${m["type"]} u_${m["name"]} ( + % endif + % for p_in in inputs + inouts: + % if loop.first: + + // Input + % endif + .${lib.ljust("cio_"+p_in.name+"_i",max_sigwidth+9)} (cio_${m["name"]}_${p_in.name}_p2d), + % endfor + % for p_out in outputs + inouts: + % if loop.first: + + // Output + % endif + .${lib.ljust("cio_"+p_out.name+"_o", max_sigwidth+9)} (cio_${m["name"]}_${p_out.name}_d2p), + .${lib.ljust("cio_"+p_out.name+"_en_o",max_sigwidth+9)} (cio_${m["name"]}_${p_out.name}_en_d2p), + % endfor + % for intr in block.interrupts: + % if loop.first: + + // Interrupt + % endif + .${lib.ljust("intr_"+intr.name+"_o",max_intrwidth+7)} (intr_${m["name"]}_${intr.name}), + % endfor + % if block.alerts: + % for alert in block.alerts: + // [${alert_idx}]: ${alert.name}<% alert_idx += 1 %> + % endfor + .alert_tx_o ( alert_tx[${slice}] ), + .alert_rx_i ( alert_rx[${slice}] ), + % endif + ## TODO: Inter-module Connection + % if m.get('inter_signal_list'): + + // Inter-module signals + % for sig in m['inter_signal_list']: + ## TODO: handle below condition in lib.py + % if sig['type'] == "req_rsp": + .${lib.im_portname(sig,"req")}(${lib.im_netname(sig, "req")}), + .${lib.im_portname(sig,"rsp")}(${lib.im_netname(sig, "rsp")}), + % elif sig['type'] == "uni": + ## TODO: Broadcast type + ## TODO: default for logic type + .${lib.im_portname(sig)}(${lib.im_netname(sig)}), + % endif + % endfor + % endif + % if m["type"] == "rv_plic": + + .intr_src_i (intr_vector), + .irq_o (irq_plic), + .irq_id_o (irq_id), + .msip_o (msip), + % endif + % if m["type"] == "pinmux": + + .periph_to_mio_i (mio_d2p ), + .periph_to_mio_oe_i (mio_en_d2p ), + .mio_to_periph_o (mio_p2d ), + + .mio_attr_o, + .mio_out_o, + .mio_oe_o, + .mio_in_i, + + .periph_to_dio_i (dio_d2p ), + .periph_to_dio_oe_i (dio_en_d2p ), + .dio_to_periph_o (dio_p2d ), + + .dio_attr_o, + .dio_out_o, + .dio_oe_o, + .dio_in_i, + + % endif + % if m["type"] == "alert_handler": + // alert signals + .alert_rx_o ( alert_rx ), + .alert_tx_i ( alert_tx ), + % endif + % if m["type"] == "otp_ctrl": + .otp_ext_voltage_h_io, + % endif + % if block.scan: + .scanmode_i, + % endif + % if block.scan_reset: + .scan_rst_ni, + % endif + % if block.scan_en: + .scan_en_i, + % endif + + // Clock and reset connections + % for k, v in m["clock_connections"].items(): + .${k} (${v}), + % endfor + % for k, v in m["reset_connections"].items(): + .${k} (${v})${"," if not loop.last else ""} + % endfor + ); + +% endfor + // interrupt assignments +<% base = interrupt_num %>\ + assign intr_vector = { + % for intr in top["interrupt"][::-1]: +<% base -= intr["width"] %>\ + intr_${intr["name"]}, // IDs [${base} +: ${intr['width']}] + % endfor + 1'b 0 // ID [0 +: 1] is a special case and tied to zero. + }; + + // TL-UL Crossbar +% for xbar in top["xbar"]: +<% + name_len = max([len(x["name"]) for x in xbar["nodes"]]); +%>\ + xbar_${xbar["name"]} u_xbar_${xbar["name"]} ( + % for k, v in xbar["clock_connections"].items(): + .${k} (${v}), + % endfor + % for k, v in xbar["reset_connections"].items(): + .${k} (${v}), + % endfor + + ## Inter-module signal + % for sig in xbar["inter_signal_list"]: +<% assert sig['type'] == "req_rsp" %>\ + // port: ${sig['name']} + .${lib.im_portname(sig,"req")}(${lib.im_netname(sig, "req")}), + .${lib.im_portname(sig,"rsp")}(${lib.im_netname(sig, "rsp")}), + + % endfor + + .scanmode_i + ); +% endfor + +% if "pinmux" in top: + // Pinmux connections + // All muxed inputs + % for sig in top["pinmux"]["ios"]: + % if sig["connection"] == "muxed" and sig["type"] in ["inout", "input"]: +<% literal = lib.get_io_enum_literal(sig, 'mio_in') %>\ + assign cio_${sig["name"]}_p2d${"[" + str(sig["idx"]) +"]" if sig["idx"] !=-1 else ""} = mio_p2d[${literal}]; + % endif + % endfor + + // All muxed outputs + % for sig in top["pinmux"]["ios"]: + % if sig["connection"] == "muxed" and sig["type"] in ["inout", "output"]: +<% literal = lib.get_io_enum_literal(sig, 'mio_out') %>\ + assign mio_d2p[${literal}] = cio_${sig["name"]}_d2p${"[" + str(sig["idx"]) +"]" if sig["idx"] !=-1 else ""}; + % endif + % endfor + + // All muxed output enables + % for sig in top["pinmux"]["ios"]: + % if sig["connection"] == "muxed" and sig["type"] in ["inout", "output"]: +<% literal = lib.get_io_enum_literal(sig, 'mio_out') %>\ + assign mio_en_d2p[${literal}] = cio_${sig["name"]}_en_d2p${"[" + str(sig["idx"]) +"]" if sig["idx"] !=-1 else ""}; + % endif + % endfor + + // All dedicated inputs +<% idx = 0 %>\ + logic [${num_dio_total-1}:0] unused_dio_p2d; + assign unused_dio_p2d = dio_p2d; + % for sig in top["pinmux"]["ios"]: +<% literal = lib.get_io_enum_literal(sig, 'dio') %>\ + % if sig["connection"] != "muxed" and sig["type"] in ["inout"]: + assign cio_${sig["name"]}_p2d${"[" + str(sig["idx"]) +"]" if sig["idx"] !=-1 else ""} = dio_p2d[${literal}]; + % elif sig["connection"] != "muxed" and sig["type"] in ["input"]: + assign cio_${sig["name"]}_p2d${"[" + str(sig["idx"]) +"]" if sig["idx"] !=-1 else ""} = dio_p2d[${literal}]; + % endif + % endfor + + // All dedicated outputs + % for sig in top["pinmux"]["ios"]: +<% literal = lib.get_io_enum_literal(sig, 'dio') %>\ + % if sig["connection"] != "muxed" and sig["type"] in ["inout"]: + assign dio_d2p[${literal}] = cio_${sig["name"]}_d2p${"[" + str(sig["idx"]) +"]" if sig["idx"] !=-1 else ""}; + % elif sig["connection"] != "muxed" and sig["type"] in ["input"]: + assign dio_d2p[${literal}] = 1'b0; + % elif sig["connection"] != "muxed" and sig["type"] in ["output"]: + assign dio_d2p[${literal}] = cio_${sig["name"]}_d2p${"[" + str(sig["idx"]) +"]" if sig["idx"] !=-1 else ""}; + % endif + % endfor + + // All dedicated output enables + % for sig in top["pinmux"]["ios"]: +<% literal = lib.get_io_enum_literal(sig, 'dio') %>\ + % if sig["connection"] != "muxed" and sig["type"] in ["inout"]: + assign dio_en_d2p[${literal}] = cio_${sig["name"]}_en_d2p${"[" + str(sig["idx"]) +"]" if sig["idx"] !=-1 else ""}; + % elif sig["connection"] != "muxed" and sig["type"] in ["input"]: + assign dio_en_d2p[${literal}] = 1'b0; + % elif sig["connection"] != "muxed" and sig["type"] in ["output"]: + assign dio_en_d2p[${literal}] = cio_${sig["name"]}_en_d2p${"[" + str(sig["idx"]) +"]" if sig["idx"] !=-1 else ""}; + % endif + % endfor + +% endif + + // make sure scanmode_i is never X (including during reset) + `ASSERT_KNOWN(scanmodeKnown, scanmode_i, clk_main_i, 0) + +endmodule diff --git a/utils/reggen/topgen/templates/toplevel_memory.h.tpl b/utils/reggen/topgen/templates/toplevel_memory.h.tpl new file mode 100644 index 0000000..bfb0274 --- /dev/null +++ b/utils/reggen/topgen/templates/toplevel_memory.h.tpl @@ -0,0 +1,62 @@ +// Copyright lowRISC contributors. +// Licensed under the Apache License, Version 2.0, see LICENSE for details. +// SPDX-License-Identifier: Apache-2.0 + +#ifndef _TOP_${top["name"].upper()}_MEMORY_H_ +#define _TOP_${top["name"].upper()}_MEMORY_H_ + +/** + * @file + * @brief Assembler-only Top-Specific Definitions. + * + * This file contains preprocessor definitions for use within assembly code. + * + * These are not shared with C/C++ code because these are only allowed to be + * preprocessor definitions, no data or type declarations are allowed. The + * assembler is also stricter about literals (not allowing suffixes for + * signed/unsigned which are sensible to use for unsigned values in C/C++). + */ + +// Include guard for assembler +#ifdef __ASSEMBLER__ + +/** + * Memory base address for rom in top earlgrey. + */ +#define TOP_EARLGREY_ROM_BASE_ADDR 0x00008000 + +/** + * Memory size for rom in top earlgrey. + */ +#define TOP_EARLGREY_ROM_SIZE_BYTES 0x4000 + +% for m in top["memory"]: +/** + * Memory base address for ${m["name"]} in top ${top["name"]}. + */ +#define TOP_${top["name"].upper()}_${m["name"].upper()}_BASE_ADDR ${m["base_addr"]} + +/** + * Memory size for ${m["name"]} in top ${top["name"]}. + */ +#define TOP_${top["name"].upper()}_${m["name"].upper()}_SIZE_BYTES ${m["size"]} + +% endfor + +% for (inst_name, if_name), region in helper.devices(): +<% + if_desc = inst_name if if_name is None else '{} device on {}'.format(if_name, inst_name) + hex_base_addr = "0x{:X}".format(region.base_addr) + base_addr_name = region.base_addr_name().as_c_define() +%>\ +/** + * Peripheral base address for ${if_desc} in top ${top["name"]}. + * + * This should be used with #mmio_region_from_addr to access the memory-mapped + * registers associated with the peripheral (usually via a DIF). + */ +#define ${base_addr_name} ${hex_base_addr} +% endfor +#endif // __ASSEMBLER__ + +#endif // _TOP_${top["name"].upper()}_MEMORY_H_ diff --git a/utils/reggen/topgen/templates/toplevel_memory.ld.tpl b/utils/reggen/topgen/templates/toplevel_memory.ld.tpl new file mode 100644 index 0000000..42c4198 --- /dev/null +++ b/utils/reggen/topgen/templates/toplevel_memory.ld.tpl @@ -0,0 +1,30 @@ +/* Copyright lowRISC contributors. */ +/* Licensed under the Apache License, Version 2.0, see LICENSE for details. */ +/* SPDX-License-Identifier: Apache-2.0 */ +<%! +def memory_to_flags(memory): + memory_type = memory["type"] + memory_access = memory.get("swaccess", "rw") + assert memory_access in ["ro", "rw"] + + flags_str = "" + if memory_access == "ro": + flags_str += "r" + else: + flags_str += "rw" + + if memory_type in ["rom", "eflash"]: + flags_str += "x" + + return flags_str +%>\ + +/** + * Partial linker script for chip memory configuration. + */ +MEMORY { + rom(rx) : ORIGIN = 0x00008000, LENGTH = 0x4000 +% for m in top["memory"]: + ${m["name"]}(${memory_to_flags(m)}) : ORIGIN = ${m["base_addr"]}, LENGTH = ${m["size"]} +% endfor +} diff --git a/utils/reggen/topgen/templates/toplevel_pkg.sv.tpl b/utils/reggen/topgen/templates/toplevel_pkg.sv.tpl new file mode 100644 index 0000000..a25d4fd --- /dev/null +++ b/utils/reggen/topgen/templates/toplevel_pkg.sv.tpl @@ -0,0 +1,112 @@ +// Copyright lowRISC contributors. +// Licensed under the Apache License, Version 2.0, see LICENSE for details. +// SPDX-License-Identifier: Apache-2.0 +${gencmd} +<% +import topgen.lib as lib +%>\ +package top_${top["name"]}_pkg; +% for (inst_name, if_name), region in helper.devices(): +<% + if_desc = inst_name if if_name is None else '{} device on {}'.format(if_name, inst_name) + hex_base_addr = "32'h{:X}".format(region.base_addr) + hex_size_bytes = "32'h{:X}".format(region.size_bytes) +%>\ + /** + * Peripheral base address for ${if_desc} in top ${top["name"]}. + */ + parameter int unsigned ${region.base_addr_name().as_c_define()} = ${hex_base_addr}; + + /** + * Peripheral size in bytes for ${if_desc} in top ${top["name"]}. + */ + parameter int unsigned ${region.size_bytes_name().as_c_define()} = ${hex_size_bytes}; + +% endfor +% for name, region in helper.memories(): +<% + hex_base_addr = "32'h{:x}".format(region.base_addr) + hex_size_bytes = "32'h{:x}".format(region.size_bytes) +%>\ + /** + * Memory base address for ${name} in top ${top["name"]}. + */ + parameter int unsigned ${region.base_addr_name().as_c_define()} = ${hex_base_addr}; + + /** + * Memory size for ${name} in top ${top["name"]}. + */ + parameter int unsigned ${region.size_bytes_name().as_c_define()} = ${hex_size_bytes}; + +% endfor + + // Enumeration of IO power domains. + // Only used in ASIC target. + typedef enum logic [${len(top["pinout"]["banks"]).bit_length()-1}:0] { +% for bank in top["pinout"]["banks"]: + ${lib.Name(['io', 'bank', bank]).as_camel_case()} = ${loop.index}, +% endfor + IoBankCount = ${len(top["pinout"]["banks"])} + } pwr_dom_e; + + // Enumeration for MIO signals on the top-level. + typedef enum int unsigned { +% for sig in top["pinmux"]["ios"]: + % if sig['type'] in ['inout', 'input'] and sig['connection'] == 'muxed': + ${lib.get_io_enum_literal(sig, 'mio_in')} = ${sig['glob_idx']}, + % endif +% endfor +<% total = top["pinmux"]['io_counts']['muxed']['inouts'] + \ + top["pinmux"]['io_counts']['muxed']['inputs'] %>\ + ${lib.Name.from_snake_case("mio_in_count").as_camel_case()} = ${total} + } mio_in_e; + + typedef enum { +% for sig in top["pinmux"]["ios"]: + % if sig['type'] in ['inout', 'output'] and sig['connection'] == 'muxed': + ${lib.get_io_enum_literal(sig, 'mio_out')} = ${sig['glob_idx']}, + % endif +% endfor +<% total = top["pinmux"]['io_counts']['muxed']['inouts'] + \ + top["pinmux"]['io_counts']['muxed']['outputs'] %>\ + ${lib.Name.from_snake_case("mio_out_count").as_camel_case()} = ${total} + } mio_out_e; + + // Enumeration for DIO signals, used on both the top and chip-levels. + typedef enum int unsigned { +% for sig in top["pinmux"]["ios"]: + % if sig['connection'] != 'muxed': + ${lib.get_io_enum_literal(sig, 'dio')} = ${sig['glob_idx']}, + % endif +% endfor +<% total = top["pinmux"]['io_counts']['dedicated']['inouts'] + \ + top["pinmux"]['io_counts']['dedicated']['inputs'] + \ + top["pinmux"]['io_counts']['dedicated']['outputs'] %>\ + ${lib.Name.from_snake_case("dio_count").as_camel_case()} = ${total} + } dio_e; + + // Raw MIO/DIO input array indices on chip-level. + // TODO: Does not account for target specific stubbed/added pads. + // Need to make a target-specific package for those. + typedef enum int unsigned { +% for pad in top["pinout"]["pads"]: + % if pad["connection"] == "muxed": + ${lib.Name.from_snake_case("mio_pad_" + pad["name"]).as_camel_case()} = ${pad["idx"]}, + % endif +% endfor + ${lib.Name.from_snake_case("mio_pad_count").as_camel_case()} + } mio_pad_e; + + typedef enum int unsigned { +% for pad in top["pinout"]["pads"]: + % if pad["connection"] != "muxed": + ${lib.Name.from_snake_case("dio_pad_" + pad["name"]).as_camel_case()} = ${pad["idx"]}, + % endif +% endfor + ${lib.Name.from_snake_case("dio_pad_count").as_camel_case()} + } dio_pad_e; + + // TODO: Enumeration for PLIC Interrupt source peripheral. + // TODO: Enumeration for PLIC Interrupt Ids. + +endpackage diff --git a/utils/reggen/topgen/templates/toplevel_rnd_cnst_pkg.sv.tpl b/utils/reggen/topgen/templates/toplevel_rnd_cnst_pkg.sv.tpl new file mode 100644 index 0000000..9700c3d --- /dev/null +++ b/utils/reggen/topgen/templates/toplevel_rnd_cnst_pkg.sv.tpl @@ -0,0 +1,44 @@ +// Copyright lowRISC contributors. +// Licensed under the Apache License, Version 2.0, see LICENSE for details. +// SPDX-License-Identifier: Apache-2.0 +${gencmd} +<% + def make_blocked_sv_literal(hexstr, randwidth): + """This chops the random hexstring into manageable blocks of 64 chars such that the + lines do not get too long. + """ + # Make all-caps and drop '0x' preamble + hexstr = str(hexstr[2:]).upper() + # Block width in hex chars + blockwidth = 64 + remainder = randwidth % (4*blockwidth) + numbits = remainder if remainder else 4*blockwidth + idx = 0 + hexblocks = [] + while randwidth > 0: + hexstr = hexstr[idx:] + randwidth -= numbits + idx = (numbits + 3) // 4 + hexblocks.append(str(numbits) + "'h" + hexstr[0:idx]) + numbits = 4*blockwidth + return hexblocks +%> +package top_${top["name"]}_rnd_cnst_pkg; + +% for m in top["module"]: + % for p in filter(lambda p: p.get("randtype") in ["data", "perm"], m["param_list"]): + % if loop.first: + //////////////////////////////////////////// + // ${m['name']} + //////////////////////////////////////////// + % endif + // ${p['desc']} + parameter ${p["type"]} ${p["name_top"]} = { + % for block in make_blocked_sv_literal(p["default"], p["randwidth"]): + ${block}${"" if loop.last else ","} + % endfor + }; + + % endfor +% endfor +endpackage : top_${top["name"]}_rnd_cnst_pkg diff --git a/utils/reggen/topgen/templates/xbar_env_pkg__params.sv.tpl b/utils/reggen/topgen/templates/xbar_env_pkg__params.sv.tpl new file mode 100644 index 0000000..63c59fe --- /dev/null +++ b/utils/reggen/topgen/templates/xbar_env_pkg__params.sv.tpl @@ -0,0 +1,88 @@ +// Copyright lowRISC contributors. +// Licensed under the Apache License, Version 2.0, see LICENSE for details. +// SPDX-License-Identifier: Apache-2.0 +// +// xbar_env_pkg__params generated by `topgen.py` tool + +<% + from collections import OrderedDict + + def is_device_a_xbar(dev_name): + for xbar in top["xbar"]: + if xbar["name"] == dev_name: + return 1 + return 0 + + # recursively find all non-xbar devices under this xbar + def get_xbar_edge_nodes(xbar_name): + edge_devices = [] + for xbar in top["xbar"]: + if xbar["name"] == xbar_name: + for host, devices in xbar["connections"].items(): + for dev_name in devices: + if is_device_a_xbar(dev_name): + edge_devices.extend(get_xbar_edge_nodes()) + else: + edge_devices.append(dev_name) + + return edge_devices + + # find device xbar and assign all its device nodes to it: "peri" -> "uart, gpio, ..." + xbar_device_dict = OrderedDict() + + for xbar in top["xbar"]: + for n in xbar["nodes"]: + if n["type"] == "device" and n["xbar"]: + xbar_device_dict[n["name"]] = get_xbar_edge_nodes(n["name"]) + + # create the mapping: host with the corresponding devices map + host_dev_map = OrderedDict() + for host, devices in top["xbar"][0]["connections"].items(): + dev_list = [] + for dev in devices: + if dev not in xbar_device_dict.keys(): + dev_list.append(dev) + else: + dev_list.extend(xbar_device_dict[dev]) + host_dev_map[host] = dev_list + +%>\ + +// List of Xbar device memory map +tl_device_t xbar_devices[$] = '{ +% for xbar in top["xbar"]: + % for device in xbar["nodes"]: + % if device["type"] == "device" and not device["xbar"]: + '{"${device["name"].replace('.', '__')}", '{ + % for addr in device["addr_range"]: +<% + start_addr = int(addr["base_addr"], 0) + end_addr = start_addr + int(addr["size_byte"], 0) - 1 +%>\ + '{32'h${"%08x" % start_addr}, 32'h${"%08x" % end_addr}}${"," if not loop.last else ""} + % endfor + }}${"," if not loop.last or xbar != top["xbar"][-1] else "};"} + % endif + % endfor +% endfor + + // List of Xbar hosts +tl_host_t xbar_hosts[$] = '{ +% for host in host_dev_map.keys(): + '{"${host}", ${loop.index}, '{ +<% + host_devices = host_dev_map[host]; +%>\ + % for device in host_devices: + % if loop.last: + "${device}"}} + % else: + "${device}", + % endif + % endfor + % if loop.last: +}; + % else: + , + % endif +% endfor diff --git a/utils/reggen/topgen/top.py b/utils/reggen/topgen/top.py new file mode 100644 index 0000000..dcdf1ca --- /dev/null +++ b/utils/reggen/topgen/top.py @@ -0,0 +1,122 @@ +# Copyright lowRISC contributors. +# Licensed under the Apache License, Version 2.0, see LICENSE for details. +# SPDX-License-Identifier: Apache-2.0 + +'''Code representing the entire chip for reggen''' + +from typing import Dict, List, Optional, Tuple, Union + +from reggen.ip_block import IpBlock +from reggen.params import ReggenParams +from reggen.reg_block import RegBlock +from reggen.window import Window + +_IFName = Tuple[str, Optional[str]] +_Triple = Tuple[int, str, IpBlock] + + +class Top: + '''An object representing the entire chip, as seen by reggen. + + This contains instances of some blocks (possibly multiple instances of each + block), starting at well-defined base addresses. It may also contain some + windows. These are memories that don't have their own comportable IP (so + aren't defined in a block), but still take up address space. + + ''' + + def __init__(self, + regwidth: int, + blocks: Dict[str, IpBlock], + instances: Dict[str, str], + if_addrs: Dict[Tuple[str, Optional[str]], int], + windows: List[Window], + attrs: Dict[str, str]): + '''Class initializer. + + regwidth is the width of the registers (which must match for all the + blocks) in bits. + + blocks is a map from block name to IpBlock object. + + instances is a map from instance name to the name of the block it + instantiates. Every block name that appears in instances must be a key + of blocks. + + if_addrs is a dictionary that maps the name of a device interface on + some instance of some block to its base address. A key of the form (n, + i) means "the device interface called i on an instance called n". If i + is None, this is an unnamed device interface. Every instance name (n) + that appears in connections must be a key of instances. + + windows is a list of windows (these contain base addresses already). + + attrs is a map from instance name to attr field of the block + + ''' + + self.regwidth = regwidth + self.blocks = blocks + self.instances = instances + self.if_addrs = if_addrs + self.attrs = attrs + + self.window_block = RegBlock(regwidth, ReggenParams()) + + # Generate one list of base addresses and objects (with each object + # either a block name and interface name or a window). While we're at + # it, construct inst_to_block_name and if_addrs. + merged = [] # type: List[Tuple[int, Union[_IFName, Window]]] + for full_if_name, addr in if_addrs.items(): + merged.append((addr, full_if_name)) + + inst_name, if_name = full_if_name + + # The instance name must match some key in instances, whose value + # should in turn match some key in blocks. + assert inst_name in instances + block_name = instances[inst_name] + assert block_name in blocks + + # Check that if_name is indeed the name of a device interface for + # that block. + block = blocks[block_name] + assert block.bus_interfaces.has_interface(False, if_name) + + for window in sorted(windows, key=lambda w: w.offset): + merged.append((window.offset, window)) + self.window_block.add_window(window) + + # A map from block name to the list of its instances. These instances + # are listed in increasing order of the lowest base address of one of + # their interfaces. The entries are added into the dict in the same + # order, so an iteration over items() will give blocks ordered by their + # first occurrence in the address map. + self.block_instances = {} # type: Dict[str, List[str]] + + # Walk the merged list in order of increasing base address. Check for + # overlaps and construct block_instances. + offset = 0 + for base_addr, item in sorted(merged, key=lambda pr: pr[0]): + # Make sure that this item doesn't overlap with the previous one + assert offset <= base_addr, item + + if isinstance(item, Window): + addrsep = (regwidth + 7) // 8 + offset = item.next_offset(addrsep) + continue + + inst_name, if_name = item + block_name = instances[inst_name] + block = blocks[block_name] + + lst = self.block_instances.setdefault(block_name, []) + if inst_name not in lst: + lst.append(inst_name) + + # This should be guaranteed by the fact that we've already checked + # the existence of a device interface. + assert if_name in block.reg_blocks + reg_block = block.reg_blocks[if_name] + + offset = base_addr + reg_block.offset diff --git a/utils/reggen/topgen/top_uvm_reg.sv.tpl b/utils/reggen/topgen/top_uvm_reg.sv.tpl new file mode 100644 index 0000000..1486a92 --- /dev/null +++ b/utils/reggen/topgen/top_uvm_reg.sv.tpl @@ -0,0 +1,151 @@ +// Copyright lowRISC contributors. +// Licensed under the Apache License, Version 2.0, see LICENSE for details. +// SPDX-License-Identifier: Apache-2.0 + +// UVM registers auto-generated by `reggen` containing UVM definitions for the entire top-level +<%! + from topgen.gen_dv import sv_base_addr + from reggen.gen_dv import bcname, mcname, miname +%> +## +## This template is used for chip-wide tests. It expects to be run with the +## following arguments +## +## top a Top object +## +## dv_base_prefix a string for the base register type. If it is FOO, then +## we will inherit from FOO_reg (assumed to be a subclass +## of uvm_reg). +## +## Like uvm_reg.sv.tpl, we use functions from uvm_reg_base.sv.tpl to define +## per-device-interface code. +## +<%namespace file="uvm_reg_base.sv.tpl" import="*"/>\ +## +## +## Waive the package-filename check: we're going to be defining all sorts of +## packages in a single file. + +// verilog_lint: waive-start package-filename +## +## Iterate over the device interfaces of blocks in Top, constructing a package +## for each. Sorting items like this guarantees we'll work alphabetically in +## block name. +% for block_name, block in sorted(top.blocks.items()): +% for if_name, rb in block.reg_blocks.items(): +<% + if_suffix = '' if if_name is None else '_' + if_name + esc_if_name = block_name.lower() + if_suffix + if_desc = '' if if_name is None else '; interface {}'.format(if_name) + reg_block_path = 'u_reg' + if_suffix + reg_block_path = reg_block_path if block.hier_path is None else block.hier_path + "." + reg_block_path +%>\ +// Block: ${block_name.lower()}${if_desc} +${make_ral_pkg(dv_base_prefix, top.regwidth, reg_block_path, rb, esc_if_name)} +% endfor +% endfor +## +## +## Now that we've made the block-level packages, re-instate the +## package-filename check. The only package left is chip_ral_pkg, which should +## match the generated filename. + +// verilog_lint: waive-start package-filename + +// Block: chip +package chip_ral_pkg; +<% + if_packages = [] + for block_name, block in sorted(top.blocks.items()): + for if_name in block.reg_blocks: + if_suffix = '' if if_name is None else '_' + if_name + if_packages.append('{}{}_ral_pkg'.format(block_name.lower(), if_suffix)) + + windows = top.window_block.windows +%>\ +${make_ral_pkg_hdr(dv_base_prefix, if_packages)} +${make_ral_pkg_fwd_decls('chip', [], windows)} +% for window in windows: + +${make_ral_pkg_window_class(dv_base_prefix, 'chip', window)} +% endfor + + class chip_reg_block extends ${dv_base_prefix}_reg_block; + // sub blocks +% for block_name, block in sorted(top.blocks.items()): +% for inst_name in top.block_instances[block_name.lower()]: +% for if_name, rb in block.reg_blocks.items(): +<% + if_suffix = '' if if_name is None else '_' + if_name + esc_if_name = block_name.lower() + if_suffix + if_inst = inst_name + if_suffix +%>\ + rand ${bcname(esc_if_name)} ${if_inst}; +% endfor +% endfor +% endfor +% if windows: + // memories +% for window in windows: + rand ${mcname('chip', window)} ${miname(window)}; +% endfor +% endif + + `uvm_object_utils(chip_reg_block) + + function new(string name = "chip_reg_block", + int has_coverage = UVM_NO_COVERAGE); + super.new(name, has_coverage); + endfunction : new + + virtual function void build(uvm_reg_addr_t base_addr, + csr_excl_item csr_excl = null); + // create default map + this.default_map = create_map(.name("default_map"), + .base_addr(base_addr), + .n_bytes(${top.regwidth//8}), + .endian(UVM_LITTLE_ENDIAN)); + if (csr_excl == null) begin + csr_excl = csr_excl_item::type_id::create("csr_excl"); + this.csr_excl = csr_excl; + end + + // create sub blocks and add their maps +% for block_name, block in sorted(top.blocks.items()): +% for inst_name in top.block_instances[block_name.lower()]: +% for if_name, rb in block.reg_blocks.items(): +<% + if_suffix = '' if if_name is None else '_' + if_name + esc_if_name = block_name.lower() + if_suffix + if_inst = inst_name + if_suffix + + if top.attrs.get(inst_name) == 'reggen_only': + hdl_path = 'tb.dut.u_' + inst_name + else: + hdl_path = 'tb.dut.top_earlgrey.u_' + inst_name + qual_if_name = (inst_name, if_name) + base_addr = top.if_addrs[qual_if_name] + base_addr_txt = sv_base_addr(top, qual_if_name) + + hpr_indent = (len(if_inst) + len('.set_hdl_path_root(')) * ' ' +%>\ + ${if_inst} = ${bcname(esc_if_name)}::type_id::create("${if_inst}"); + ${if_inst}.configure(.parent(this)); + ${if_inst}.build(.base_addr(base_addr + ${base_addr_txt}), .csr_excl(csr_excl)); + ${if_inst}.set_hdl_path_root("${hdl_path}", + ${hpr_indent}"BkdrRegPathRtl"); + ${if_inst}.set_hdl_path_root("${hdl_path}", + ${hpr_indent}"BkdrRegPathRtlCommitted"); + ${if_inst}.set_hdl_path_root("${hdl_path}", + ${hpr_indent}"BkdrRegPathRtlShadow"); + default_map.add_submap(.child_map(${if_inst}.default_map), + .offset(base_addr + ${base_addr_txt})); +% endfor +% endfor +% endfor +${make_ral_pkg_window_instances(top.regwidth, 'chip', top.window_block)} + + endfunction : build + endclass : chip_reg_block + +endpackage diff --git a/utils/reggen/topgen/validate.py b/utils/reggen/topgen/validate.py new file mode 100644 index 0000000..bfb4298 --- /dev/null +++ b/utils/reggen/topgen/validate.py @@ -0,0 +1,878 @@ +# Copyright lowRISC contributors. +# Licensed under the Apache License, Version 2.0, see LICENSE for details. +# SPDX-License-Identifier: Apache-2.0 +import re +import logging as log +from collections import OrderedDict +from enum import Enum +from typing import Dict, List + +from reggen.validate import check_keys +from reggen.ip_block import IpBlock + +# For the reference +# val_types = { +# 'd': ["int", "integer (binary 0b, octal 0o, decimal, hex 0x)"], +# 'x': ["xint", "x for undefined otherwise int"], +# 'b': [ +# "bitrange", "bit number as decimal integer, \ +# or bit-range as decimal integers msb:lsb" +# ], +# 'l': ["list", "comma separated list enclosed in `[]`"], +# 'ln': ["name list", 'comma separated list enclosed in `[]` of '\ +# 'one or more groups that have just name and dscr keys.'\ +# ' e.g. `{ name: "name", desc: "description"}`'], +# 'lnw': ["name list+", 'name list that optionally contains a width'], +# 'lp': ["parameter list", 'parameter list having default value optionally'], +# 'g': ["group", "comma separated group of key:value enclosed in `{}`"], +# 's': ["string", "string, typically short"], +# 't': ["text", "string, may be multi-line enclosed in `'''` "\ +# "may use `**bold**`, `*italic*` or `!!Reg` markup"], +# 'T': ["tuple", "tuple enclosed in ()"], +# 'pi': ["python int", "Native Python type int (generated)"], +# 'pb': ["python Bool", "Native Python type Bool (generated)"], +# 'pl': ["python list", "Native Python type list (generated)"], +# 'pe': ["python enum", "Native Python type enum (generated)"] +# } + +# Required/optional field in top hjson +top_required = { + 'name': ['s', 'Top name'], + 'type': ['s', 'type of hjson. Shall be "top" always'], + 'clocks': ['g', 'group of clock properties'], + 'resets': ['l', 'list of resets'], + 'module': ['l', 'list of modules to instantiate'], + 'memory': ['l', 'list of memories. At least one memory ' + 'is needed to run the software'], + 'debug_mem_base_addr': ['d', 'Base address of RV_DM. ' + 'Planned to move to module'], + 'xbar': ['l', 'List of the xbar used in the top'], + 'rnd_cnst_seed': ['int', "Seed for random netlist constant computation"], + 'pinout': ['g', 'Pinout configuration'], + 'targets': ['l', ' Target configurations'], + 'pinmux': ['g', 'pinmux configuration'], +} + +top_optional = { + 'alert_async': ['l', 'async alerts (generated)'], + 'alert': ['lnw', 'alerts (generated)'], + 'alert_module': [ + 'l', + 'list of the modules that connects to alert_handler' + ], + 'datawidth': ['pn', "default data width"], + 'exported_clks': ['g', 'clock signal routing rules'], + 'host': ['g', 'list of host-only components in the system'], + 'inter_module': ['g', 'define the signal connections between the modules'], + 'interrupt': ['lnw', 'interrupts (generated)'], + 'interrupt_module': ['l', 'list of the modules that connects to rv_plic'], + 'num_cores': ['pn', "number of computing units"], + 'power': ['g', 'power domains supported by the design'], + 'port': ['g', 'assign special attributes to specific ports'] +} + +top_added = {} + +pinmux_required = {} +pinmux_optional = { + 'num_wkup_detect': [ + 'd', 'Number of wakeup detectors' + ], + 'wkup_cnt_width': [ + 'd', 'Number of bits in wakeup detector counters' + ], + 'signals': ['l', 'List of Dedicated IOs.'], +} +pinmux_added = { + 'ios': ['l', 'Full list of IO'], +} + +pinmux_sig_required = { + 'instance': ['s', 'Module instance name'], + 'connection': ['s', 'Specification of connection type, ' + 'can be direct, manual or muxed'], +} +pinmux_sig_optional = { + 'port': ['s', 'Port name of module'], + 'pad': ['s', 'Pad name for direct connections'], + 'desc': ['s', 'Signal description'], + 'attr': ['s', 'Pad type for generating the correct attribute CSR'] +} +pinmux_sig_added = {} + +pinout_required = { + 'banks': ['l', 'List of IO power banks'], + 'pads': ['l', 'List of pads'] +} +pinout_optional = { +} +pinout_added = {} + +pad_required = { + 'name': ['l', 'Pad name'], + 'type': ['s', 'Pad type'], + 'bank': ['s', 'IO power bank for the pad'], + 'connection': ['s', 'Specification of connection type, ' + 'can be direct, manual or muxed'], +} +pad_optional = { + 'desc': ['s', 'Pad description'], +} +pad_added = {} + +target_required = { + 'name': ['s', 'Name of target'], + 'pinout': ['g', 'Target-specific pinout configuration'], + 'pinmux': ['g', 'Target-specific pinmux configuration'] +} +target_optional = { +} +target_added = {} + +target_pinmux_required = { + 'special_signals': ['l', 'List of special signals and the pad they are mapped to.'], +} +target_pinmux_optional = {} +target_pinmux_added = {} + +target_pinout_required = { + 'remove_pads': ['l', 'List of pad names to remove and stub out'], + 'add_pads': ['l', 'List of manual pads to add'], +} +target_pinout_optional = {} +target_pinout_added = {} + +straps_required = { + 'tap0': ['s', 'Name of tap0 pad'], + 'tap1': ['s', 'Name of tap1 pad'], + 'dft0': ['s', 'Name of dft0 pad'], + 'dft1': ['s', 'Name of dft1 pad'], +} +straps_optional = {} +straps_added = {} + +straps_required = { + 'tap0': ['s', 'Name of tap0 pad'], + 'tap1': ['s', 'Name of tap1 pad'], + 'dft0': ['s', 'Name of dft0 pad'], + 'dft1': ['s', 'Name of dft1 pad'], +} +straps_optional = {} +straps_added = {} + +special_sig_required = { + 'name': ['s', 'DIO name'], + 'pad': ['s', 'Pad name'], +} +special_sig_optional = { + 'desc': ['s', 'Description of signal connection'], +} +special_sig_added = {} + +clock_srcs_required = { + 'name': ['s', 'name of clock group'], + 'aon': ['s', 'yes, no. aon attribute of a clock'], + 'freq': ['s', 'frequency of clock in Hz'], +} + +clock_srcs_optional = { + 'derived': ['s', 'whether clock is derived'], + 'params': ['s', 'extra clock parameters'] +} + +derived_clock_srcs_required = { + 'name': ['s', 'name of clock group'], + 'aon': ['s', 'yes, no. aon attribute of a clock'], + 'freq': ['s', 'frequency of clock in Hz'], + 'src': ['s', 'source clock'], + 'div': ['d', 'ratio between source clock and derived clock'], +} + +clock_groups_required = { + 'name': ['s', 'name of clock group'], + 'src': ['s', 'yes, no. This clock group is directly from source'], + 'sw_cg': ['s', 'yes, no, hint. Software clock gate attributes'], +} +clock_groups_optional = { + 'unique': ['s', 'whether clocks in the group are unique'], + 'clocks': ['g', 'groups of clock name to source'], +} +clock_groups_added = {} + +eflash_required = { + 'banks': ['d', 'number of flash banks'], + 'base_addr': ['s', 'strarting hex address of memory'], + 'clock_connections': ['g', 'generated, elaborated version of clock_srcs'], + 'clock_group': ['s', 'associated clock attribute group'], + 'clock_srcs': ['g', 'clock connections'], + 'inter_signal_list': ['lg', 'intersignal list'], + 'name': ['s', 'name of flash memory'], + 'pages_per_bank': ['d', 'number of data pages per flash bank'], + 'program_resolution': ['d', 'maximum number of flash words allowed to program'], + 'reset_connections': ['g', 'reset connections'], + 'swaccess': ['s', 'software accessibility'], + 'type': ['s', 'type of memory'] +} + +eflash_optional = {} + +eflash_added = {} + + +# Supported PAD types. +# Needs to coincide with enum definition in prim_pad_wrapper_pkg.sv +class PadType(Enum): + INPUT_STD = 'InputStd' + BIDIR_STD = 'BidirStd' + BIDIR_TOL = 'BidirTol' + BIDIR_OD = 'BidirOd' + ANALOG_IN0 = 'AnalogIn0' + + +def is_valid_pad_type(obj): + try: + PadType(obj) + except ValueError: + return False + return True + + +class TargetType(Enum): + MODULE = "module" + XBAR = "xbar" + + +class Target: + """Target class informs the checkers if we are validating a module or xbar + """ + def __init__(self, target_type): + # The type of this target + self.target_type = target_type + # The key to search against + if target_type == TargetType.MODULE: + self.key = "type" + else: + self.key = "name" + + +class Flash: + """Flash class contains information regarding parameter defaults. + For now, only expose banks / pages_per_bank for user configuration. + For now, also enforce power of 2 requiremnt. + """ + max_banks = 4 + max_pages_per_bank = 1024 + + def __init__(self, mem): + self.banks = mem['banks'] + self.pages_per_bank = mem['pages_per_bank'] + self.program_resolution = mem['program_resolution'] + self.words_per_page = 256 + self.data_width = 64 + self.metadata_width = 12 + self.info_types = 3 + self.infos_per_bank = [10, 1, 2] + + def is_pow2(self, n): + return (n != 0) and (n & (n - 1) == 0) + + def check_values(self): + pow2_check = (self.is_pow2(self.banks) and + self.is_pow2(self.pages_per_bank) and + self.is_pow2(self.program_resolution)) + limit_check = ((self.banks <= Flash.max_banks) and + (self.pages_per_bank <= Flash.max_pages_per_bank)) + + return pow2_check and limit_check + + def calc_size(self): + word_bytes = self.data_width / 8 + bytes_per_page = word_bytes * self.words_per_page + bytes_per_bank = bytes_per_page * self.pages_per_bank + return bytes_per_bank * self.banks + + def populate(self, mem): + mem['words_per_page'] = self.words_per_page + mem['data_width'] = self.data_width + mem['metadata_width'] = self.metadata_width + mem['info_types'] = self.info_types + mem['infos_per_bank'] = self.infos_per_bank + mem['size'] = hex(int(self.calc_size())) + + word_bytes = self.data_width / 8 + mem['pgm_resolution_bytes'] = int(self.program_resolution * word_bytes) + + +# Check to see if each module/xbar defined in top.hjson exists as ip/xbar.hjson +# Also check to make sure there are not multiple definitions of ip/xbar.hjson for each +# top level definition +# If it does, return a dictionary of instance names to index in ip/xbarobjs +def check_target(top, objs, tgtobj): + error = 0 + idxs = OrderedDict() + + # Collect up counts of object names. We support entries of objs that are + # either dicts (for top-levels) or IpBlock objects. + name_indices = {} + for idx, obj in enumerate(objs): + if isinstance(obj, IpBlock): + name = obj.name.lower() + else: + name = obj['name'].lower() + + log.info("%d Order is %s" % (idx, name)) + name_indices.setdefault(name, []).append(idx) + + tgt_type = tgtobj.target_type.value + inst_key = tgtobj.key + + for cfg in top[tgt_type]: + cfg_name = cfg['name'].lower() + log.info("Checking target %s %s" % (tgt_type, cfg_name)) + + indices = name_indices.get(cfg[inst_key], []) + if not indices: + log.error("Could not find %s.hjson" % cfg_name) + error += 1 + elif len(indices) > 1: + log.error("Duplicate %s.hjson" % cfg_name) + error += 1 + else: + idxs[cfg_name] = indices[0] + + log.info("Current state %s" % idxs) + return error, idxs + + +def check_pad(top: Dict, + pad: Dict, + known_pad_names: Dict, + valid_connections: List[str], + prefix: str) -> int: + error = 0 + error += check_keys(pad, pad_required, pad_optional, + pad_added, prefix) + + # check name uniqueness + if pad['name'] in known_pad_names: + log.warning('Pad name {} is not unique'.format(pad['name'])) + error += 1 + known_pad_names[pad['name']] = 1 + + if not is_valid_pad_type(pad['type']): + log.warning('Unkown pad type {}'.format(pad['type'])) + error += 1 + + if pad['bank'] not in top['pinout']['banks']: + log.warning('Unkown io power bank {}'.format(pad['bank'])) + error += 1 + + if pad['connection'] not in valid_connections: + log.warning('Connection type {} of pad {} is invalid' + .format(pad['connection'], pad['name'])) + error += 1 + + return error + + +def check_pinout(top: Dict, prefix: str) -> int: + error = check_keys(top['pinout'], pinout_required, pinout_optional, + pinout_added, prefix + ' Pinout') + + known_names = {} + for pad in top['pinout']['pads']: + error += check_keys(pad, pad_required, pad_optional, + pad_added, prefix + ' Pinout') + + error += check_pad(top, pad, known_names, + ['direct', 'manual', 'muxed'], + prefix + ' Pad') + + return error + + +def check_pinmux(top: Dict, prefix: str) -> int: + error = check_keys(top['pinmux'], pinmux_required, pinmux_optional, + pinmux_added, prefix + ' Pinmux') + + # This is used for the direct connection accounting below, + # where we tick off already connected direct pads. + known_direct_pads = {} + direct_pad_attr = {} + for pad in top['pinout']['pads']: + if pad['connection'] == 'direct': + known_direct_pads[pad['name']] = 1 + direct_pad_attr[pad['name']] = pad['type'] + + # Note: the actual signal crosscheck is deferred until the merge stage, + # since we have no idea at this point which IOs comportable IPs expose. + for sig in top['pinmux']['signals']: + error += check_keys(sig, pinmux_sig_required, pinmux_sig_optional, + pinmux_sig_added, prefix + ' Pinmux signal') + + if sig['connection'] not in ['direct', 'manual', 'muxed']: + log.warning('Invalid connection type {}'.format(sig['connection'])) + error += 1 + + # The pad needs to refer to a valid pad name in the pinout that is of + # connection type "direct". We tick off all direct pads that have been + # referenced in order to make sure there are no double connections + # and unconnected direct pads. + padname = sig.setdefault('pad', '') + if padname != '': + if padname in known_direct_pads: + if known_direct_pads[padname] == 1: + known_direct_pads[padname] = 0 + padattr = direct_pad_attr[padname] + else: + log.warning('Warning, direct pad {} is already connected' + .format(padname)) + error += 1 + else: + log.warning('Unknown direct pad {}'.format(padname)) + error += 1 + + # Check port naming scheme. + port = sig.setdefault('port', '') + pattern = r'^[a-zA-Z0-9_]*(\[[0-9]*\]){0,1}' + matches = re.match(pattern, port) + if matches is None: + log.warning('Port name {} has wrong format' + .format(port)) + error += 1 + + # Check that only direct connections have pad keys + if sig['connection'] == 'direct': + if sig.setdefault('attr', '') != '': + log.warning('Direct connection of instance {} port {} ' + 'must not have an associated pad attribute field' + .format(sig['instance'], + sig['port'])) + error += 1 + # Since the signal is directly connected, we can automatically infer + # the pad type needed to instantiate the correct attribute CSR WARL + # module inside the pinmux. + sig['attr'] = padattr + + if padname == '': + log.warning('Instance {} port {} connection is of direct type ' + 'and therefore must have an associated pad name.' + .format(sig['instance'], + sig['port'])) + error += 1 + if port == '': + log.warning('Instance {} port {} connection is of direct type ' + 'and therefore must have an associated port name.' + .format(sig['instance'], + sig['port'])) + error += 1 + elif sig['connection'] == 'muxed': + # Muxed signals do not have a corresponding pad and attribute CSR, + # since they first go through the pinmux matrix. + if sig.setdefault('attr', '') != '': + log.warning('Muxed connection of instance {} port {} ' + 'must not have an associated pad attribute field' + .format(sig['instance'], + sig['port'])) + error += 1 + if padname != '': + log.warning('Muxed connection of instance {} port {} ' + 'must not have an associated pad' + .format(sig['instance'], + sig['port'])) + error += 1 + elif sig['connection'] == 'manual': + # This pad attr key is only allowed in the manual case, + # as there is no way to infer the pad type automatically. + sig.setdefault('attr', 'BidirStd') + if padname != '': + log.warning('Manual connection of instance {} port {} ' + 'must not have an associated pad' + .format(sig['instance'], + sig['port'])) + error += 1 + + # At this point, all direct pads should have been ticked off. + for key, val in known_direct_pads.items(): + if val == 1: + log.warning('Direct pad {} has not been connected' + .format(key)) + error += 1 + + return error + + +def check_implementation_targets(top: Dict, prefix: str) -> int: + error = 0 + known_names = {} + for target in top['targets']: + error += check_keys(target, target_required, target_optional, + target_added, prefix + ' Targets') + + # check name uniqueness + if target['name'] in known_names: + log.warning('Target name {} is not unique'.format(target['name'])) + error += 1 + known_names[target['name']] = 1 + + error += check_keys(target['pinmux'], target_pinmux_required, target_pinmux_optional, + target_pinmux_added, prefix + ' Target pinmux') + + error += check_keys(target['pinout'], target_pinout_required, target_pinout_optional, + target_pinout_added, prefix + ' Target pinout') + + # Check special pad signals + known_entry_names = {} + for entry in target['pinmux']['special_signals']: + error += check_keys(entry, special_sig_required, special_sig_optional, + special_sig_added, prefix + ' Special signal') + + # check name uniqueness + if entry['name'] in known_entry_names: + log.warning('Special pad name {} is not unique'.format(entry['name'])) + error += 1 + known_entry_names[entry['name']] = 1 + + # The pad key needs to refer to a valid pad name. + is_muxed = False + for pad in top['pinout']['pads']: + if entry['pad'] == pad['name']: + is_muxed = pad['connection'] == 'muxed' + break + else: + log.warning('Unknown pad {}'.format(entry['pad'])) + error += 1 + + if not is_muxed: + # If this is not a muxed pad, we need to make sure this refers to + # DIO that is NOT a manual pad. + for sig in top['pinmux']['signals']: + if entry['pad'] == sig['pad']: + break + else: + log.warning('Special pad {} cannot refer to a manual pad'.format(entry['pad'])) + error += 1 + + # Check pads to remove and stub out + for entry in target['pinout']['remove_pads']: + # The pad key needs to refer to a valid pad name. + for pad in top['pinout']['pads']: + if entry == pad['name']: + break + else: + log.warning('Unknown pad {}'.format(entry)) + error += 1 + + # Check pads to add + known_pad_names = {} + for pad in top['pinout']['pads']: + known_pad_names.update({pad['name']: 1}) + + for pad in target['pinout']['add_pads']: + error += check_pad(top, pad, known_pad_names, ['manual'], + prefix + ' Additional Pad') + + return error + + +# check for inconsistent clock group definitions +def check_clock_groups(top): + + # default empty assignment + if "groups" not in top['clocks']: + top['clocks']['groups'] = [] + + error = 0 + for group in top['clocks']['groups']: + error = check_keys(group, clock_groups_required, clock_groups_optional, + clock_groups_added, "Clock Groups") + + # Check sw_cg values are valid + if group['sw_cg'] not in ['yes', 'no', 'hint']: + log.error("Incorrect attribute for sw_cg: {}".format( + group['sw_cg'])) + error += 1 + + # Check combination of src and sw are valid + if group['src'] == 'yes' and group['sw_cg'] != 'no': + log.error("Invalid combination of src and sw_cg: {} and {}".format( + group['src'], group['sw_cg'])) + error += 1 + + # Check combination of sw_cg and unique are valid + unique = group['unique'] if 'unique' in group else 'no' + if group['sw_cg'] == 'no' and unique != 'no': + log.error( + "Incorrect attribute combination. When sw_cg is no, unique must be no" + ) + error += 1 + + if error: + break + + return error + + +def check_clocks_resets(top, ipobjs, ip_idxs, xbarobjs, xbar_idxs): + + error = 0 + + # there should only be one each of pwrmgr/clkmgr/rstmgr + pwrmgrs = [m for m in top['module'] if m['type'] == 'pwrmgr'] + clkmgrs = [m for m in top['module'] if m['type'] == 'clkmgr'] + rstmgrs = [m for m in top['module'] if m['type'] == 'rstmgr'] + + if len(pwrmgrs) == 1 * len(clkmgrs) == 1 * len(rstmgrs) != 1: + log.error("Incorrect number of pwrmgr/clkmgr/rstmgr") + error += 1 + + # check clock fields are all there + ext_srcs = [] + for src in top['clocks']['srcs']: + check_keys(src, clock_srcs_required, clock_srcs_optional, {}, + "Clock source") + ext_srcs.append(src['name']) + + # check derived clock sources + log.info("Collected clocks are {}".format(ext_srcs)) + for src in top['clocks']['derived_srcs']: + check_keys(src, derived_clock_srcs_required, {}, {}, "Derived clocks") + try: + ext_srcs.index(src['src']) + except Exception: + error += 1 + log.error("{} is not a valid src for {}".format( + src['src'], src['name'])) + + # all defined clock/reset nets + reset_nets = [reset['name'] for reset in top['resets']['nodes']] + clock_srcs = [ + clock['name'] + for clock in top['clocks']['srcs'] + top['clocks']['derived_srcs'] + ] + + # Check clock/reset port connection for all IPs + for ipcfg in top['module']: + ipcfg_name = ipcfg['name'].lower() + log.info("Checking clock/resets for %s" % ipcfg_name) + error += validate_reset(ipcfg, ipobjs[ip_idxs[ipcfg_name]], reset_nets) + error += validate_clock(ipcfg, ipobjs[ip_idxs[ipcfg_name]], clock_srcs) + + if error: + log.error("module clock/reset checking failed") + break + + # Check clock/reset port connection for all xbars + for xbarcfg in top['xbar']: + xbarcfg_name = xbarcfg['name'].lower() + log.info("Checking clock/resets for xbar %s" % xbarcfg_name) + error += validate_reset(xbarcfg, xbarobjs[xbar_idxs[xbarcfg_name]], + reset_nets, "xbar") + error += validate_clock(xbarcfg, xbarobjs[xbar_idxs[xbarcfg_name]], + clock_srcs, "xbar") + + if error: + log.error("xbar clock/reset checking failed") + break + + return error + + +# Checks the following +# For each defined reset connection in top*.hjson, there exists a defined port at the destination +# and defined reset net +# There are the same number of defined connections as there are ports +def validate_reset(top, inst, reset_nets, prefix=""): + # Gather inst port list + error = 0 + + # Handle either an IpBlock (generated by reggen) or an OrderedDict + # (generated by topgen for a crossbar) + if isinstance(inst, IpBlock): + name = inst.name + reset_signals = inst.reset_signals + else: + name = inst['name'] + reset_signals = ([inst.get('reset_primary', 'rst_ni')] + + inst.get('other_reset_list', [])) + + log.info("%s %s resets are %s" % + (prefix, name, reset_signals)) + + if len(top['reset_connections']) != len(reset_signals): + error += 1 + log.error("%s %s mismatched number of reset ports and nets" % + (prefix, name)) + + missing_port = [ + port for port in top['reset_connections'].keys() + if port not in reset_signals + ] + + if missing_port: + error += 1 + log.error("%s %s Following reset ports do not exist:" % + (prefix, name)) + [log.error("%s" % port) for port in missing_port] + + missing_net = [ + net for port, net in top['reset_connections'].items() + if net not in reset_nets + ] + + if missing_net: + error += 1 + log.error("%s %s Following reset nets do not exist:" % + (prefix, name)) + [log.error("%s" % net) for net in missing_net] + + return error + + +# Checks the following +# For each defined clock_src in top*.hjson, there exists a defined port at the destination +# and defined clock source +# There are the same number of defined connections as there are ports +def validate_clock(top, inst, clock_srcs, prefix=""): + # Gather inst port list + error = 0 + + # Handle either an IpBlock (generated by reggen) or an OrderedDict + # (generated by topgen for a crossbar) + if isinstance(inst, IpBlock): + name = inst.name + clock_signals = inst.clock_signals + else: + name = inst['name'] + clock_signals = ([inst.get('clock_primary', 'rst_ni')] + + inst.get('other_clock_list', [])) + + if len(top['clock_srcs']) != len(clock_signals): + error += 1 + log.error("%s %s mismatched number of clock ports and nets" % + (prefix, name)) + + missing_port = [ + port for port in top['clock_srcs'].keys() + if port not in clock_signals + ] + + if missing_port: + error += 1 + log.error("%s %s Following clock ports do not exist:" % + (prefix, name)) + [log.error("%s" % port) for port in missing_port] + + missing_net = [ + net for port, net in top['clock_srcs'].items() if net not in clock_srcs + ] + + if missing_net: + error += 1 + log.error("%s %s Following clock nets do not exist:" % + (prefix, name)) + [log.error("%s" % net) for net in missing_net] + + return error + + +def check_flash(top): + error = 0 + + for mem in top['memory']: + if mem['type'] == "eflash": + error = check_keys(mem, eflash_required, eflash_optional, + eflash_added, "Eflash") + + flash = Flash(mem) + error += 1 if not flash.check_values() else 0 + + if error: + log.error("Flash check failed") + else: + flash.populate(mem) + + return error + + +def check_power_domains(top): + error = 0 + + # check that the default domain is valid + if top['power']['default'] not in top['power']['domains']: + error += 1 + return error + + # check that power domain definition is consistent with reset and module definition + for reset in top['resets']['nodes']: + if reset['gen']: + if 'domains' not in reset: + log.error("{} missing domain definition".format(reset['name'])) + error += 1 + return error + else: + for domain in reset['domains']: + if domain not in top['power']['domains']: + log.error("{} defined invalid domain {}".format( + reset['name'], domain)) + error += 1 + return error + + # Check that each module, xbar, memory has a power domain defined. + # If not, give it a default. + # If there is one defined, check that it is a valid definition + for end_point in top['module'] + top['memory'] + top['xbar']: + if 'domain' not in end_point: + end_point['domain'] = top['power']['default'] + + if end_point['domain'] not in top['power']['domains']: + log.error("{} defined invalid domain {}" + .format(end_point['name'], + end_point['domain'])) + error += 1 + return error + + # arrived without incident, return + return error + + +def validate_top(top, ipobjs, xbarobjs): + # return as it is for now + error = check_keys(top, top_required, top_optional, top_added, "top") + + if error != 0: + log.error("Top HJSON has top level errors. Aborting") + return top, error + + component = top['name'] + + # MODULE check + err, ip_idxs = check_target(top, ipobjs, Target(TargetType.MODULE)) + error += err + + # XBAR check + err, xbar_idxs = check_target(top, xbarobjs, Target(TargetType.XBAR)) + error += err + + # MEMORY check + error += check_flash(top) + + # Power domain check + error += check_power_domains(top) + + # Clock / Reset check + error += check_clocks_resets(top, ipobjs, ip_idxs, xbarobjs, xbar_idxs) + + # Clock group check + error += check_clock_groups(top) + + # RV_PLIC check + + # Pinout, pinmux and target checks + # Note that these checks must happen in this order, as + # the pinmux and target configs depend on the pinout. + error += check_pinout(top, component) + error += check_pinmux(top, component) + error += check_implementation_targets(top, component) + + return top, error