From 5c098866f2a8a817c3b009685a04c78e4ba63984 Mon Sep 17 00:00:00 2001 From: Luke Wren Date: Sat, 4 Dec 2021 20:46:39 +0000 Subject: [PATCH] Sketch in AMO support --- hdl/arith/hazard3_amo_alu.v | 14 +-- hdl/hazard3.f | 5 +- hdl/hazard3_core.v | 181 +++++++++++++++++++++++++++------ hdl/hazard3_decode.v | 193 +++++++++++++++++++----------------- 4 files changed, 262 insertions(+), 131 deletions(-) diff --git a/hdl/arith/hazard3_amo_alu.v b/hdl/arith/hazard3_amo_alu.v index 689a130..ed8884e 100644 --- a/hdl/arith/hazard3_amo_alu.v +++ b/hdl/arith/hazard3_amo_alu.v @@ -29,6 +29,8 @@ module hazard3_amo_alu #( output reg [W_DATA-1:0] result ); +`include "hazard3_ops.vh" + wire sub = op != MEMOP_AMOADD_W; wire cmp_unsigned = op == MEMOP_AMOMINU_W || op == MEMOP_AMOMAXU_W; @@ -41,12 +43,12 @@ wire rs1_lessthan_rs2 = always @ (*) begin case(op) - MEMOP_AMOADD_W : result = sum; - MEMOP_AMOXOR_W : result = op_rs1 ^ op_rs2; - MEMOP_AMOAND_W : result = op_rs1 & op_rs2; - MEMOP_AMOOR_W : result = op_rs1 | op_rs2; - MEMOP_AMOMIN_W : result = rs1_lessthan_rs2 ? op_rs1 : op_rs2; - MEMOP_AMOMAX_W : result = rs1_lessthan_rs2 ? op_rs2 : op_rs1; + MEMOP_AMOADD_W: result = sum; + MEMOP_AMOXOR_W: result = op_rs1 ^ op_rs2; + MEMOP_AMOAND_W: result = op_rs1 & op_rs2; + MEMOP_AMOOR_W: result = op_rs1 | op_rs2; + MEMOP_AMOMIN_W: result = rs1_lessthan_rs2 ? op_rs1 : op_rs2; + MEMOP_AMOMAX_W: result = rs1_lessthan_rs2 ? op_rs2 : op_rs1; MEMOP_AMOMINU_W: result = rs1_lessthan_rs2 ? op_rs1 : op_rs2; MEMOP_AMOMAXU_W: result = rs1_lessthan_rs2 ? op_rs2 : op_rs1; // AMOSWAP diff --git a/hdl/hazard3.f b/hdl/hazard3.f index ecd08b3..d87bf2f 100644 --- a/hdl/hazard3.f +++ b/hdl/hazard3.f @@ -2,10 +2,11 @@ file hazard3_core.v file hazard3_cpu_1port.v file hazard3_cpu_2port.v file arith/hazard3_alu.v -file arith/hazard3_shift_barrel.v -file arith/hazard3_priority_encode.v +file arith/hazard3_amo_alu.v file arith/hazard3_muldiv_seq.v file arith/hazard3_mul_fast.v +file arith/hazard3_priority_encode.v +file arith/hazard3_shift_barrel.v file hazard3_frontend.v file hazard3_instr_decompress.v file hazard3_decode.v diff --git a/hdl/hazard3_core.v b/hdl/hazard3_core.v index 5631e25..f85d9a1 100644 --- a/hdl/hazard3_core.v +++ b/hdl/hazard3_core.v @@ -274,11 +274,8 @@ reg [W_EXCEPT-1:0] xm_except; reg xm_wfi; reg xm_delay_irq_entry; - -reg x_stall_raw; -wire x_stall_muldiv; -wire x_jump_req; -wire m_wfi_stall_clear; +// ---------------------------------------------------------------------------- +// Stall logic // IRQs squeeze in between the instructions in X and M, so in this case X // stalls but M can continue. -> X always stalls on M trap, M *may* stall. @@ -299,37 +296,61 @@ wire x_stall_on_exclusive_overlap = |EXTENSION_A && ( (xm_memop == MEMOP_SC_W || xm_memop == MEMOP_LR_W) ); -assign x_stall = - m_stall || - x_stall_on_trap || - x_stall_on_exclusive_overlap || - x_stall_raw || x_stall_muldiv || - bus_aph_req_d && !bus_aph_ready_d || - x_jump_req && !f_jump_rdy; +// AMOs are issued completely from X. We keep X stalled, and pass bubbles into +// M. Otherwise the exception handling would be even more of a mess. Phases +// 0-3 are read/write address/data phases. Phase 4 is error, due to HRESP or +// due to low HEXOKAY response to read. + +// Also need to clear AMO if it follows an excepting instruction. + +reg [2:0] x_amo_phase; +wire x_stall_on_amo = |EXTENSION_A && d_memop_is_amo && !m_trap_enter_soon && ( + x_amo_phase < 3'h3 || (x_amo_phase == 3'h3 && !bus_dph_ready_d) +); + +// Read-after-write hazard detection (e.g. load-use) wire m_fast_mul_result_vld; -wire m_generating_result = xm_memop < MEMOP_SW || m_fast_mul_result_vld; +wire m_generating_result = xm_memop < MEMOP_SW || x_memop == MEMOP_LR_W || m_fast_mul_result_vld; -// Load-use hazard detection +reg x_stall_on_raw; always @ (*) begin - x_stall_raw = 1'b0; + x_stall_on_raw = 1'b0; if (REDUCED_BYPASS) begin - x_stall_raw = + x_stall_on_raw = |xm_rd && (xm_rd == d_rs1 || xm_rd == d_rs2) || |mw_rd && (mw_rd == d_rs1 || mw_rd == d_rs2); end else if (m_generating_result) begin // With the full bypass network, load-use (or fast multiply-use) is the only RAW stall if (|xm_rd && xm_rd == d_rs1) begin // Store addresses cannot be bypassed later, so there is no exception here. - x_stall_raw = 1'b1; + x_stall_on_raw = 1'b1; end else if (|xm_rd && xm_rd == d_rs2) begin // Store data can be bypassed in M. Any other instructions must stall. - x_stall_raw = !(d_memop == MEMOP_SW || d_memop == MEMOP_SH || d_memop == MEMOP_SB); + x_stall_on_raw = !(d_memop == MEMOP_SW || d_memop == MEMOP_SH || d_memop == MEMOP_SB); end end end +wire x_stall_muldiv; +wire x_jump_req; + +assign x_stall = + m_stall || + x_stall_on_trap || + x_stall_on_exclusive_overlap || + x_stall_on_amo || + x_stall_on_raw || + x_stall_muldiv || + bus_aph_req_d && !bus_aph_ready_d || + x_jump_req && !f_jump_rdy; + +wire m_wfi_stall_clear; + +// ---------------------------------------------------------------------------- +// Execution logic + // ALU, operand muxes and bypass always @ (*) begin @@ -376,15 +397,51 @@ hazard3_alu #( // AHB transaction request +// AMO stalls the pipe, then generates two bus transfers per 4-cycle +// iteration, unless it bails out due to a bus fault or failed load +// reservation. +always @ (posedge clk or negedge rst_n) begin + if (!rst_n) begin + x_amo_phase <= 3'h0; + end else if (|EXTENSION_A && !x_stall) begin + if (!d_memop_is_amo) begin + x_amo_phase <= 3'h0; + end else if (x_stall_on_raw) begin + // First address phase stalled due to address dependency on + // previous load/mul/etc. Shouldn't be possible in later phases. +`ifdef FORMAL + assert(x_amo_phase == 3'h0); +`endif + x_amo_phase <= 3'h0; + end else if (m_trap_enter_soon) begin + x_amo_phase <= 3'h0; + end else if (x_amo_phase == 3'h1 && !bus_dph_exokay_d) begin + // Load reserve fail indicates the memory region does not support + // exclusives, so we will never succeed at store. Exception. + x_amo_phase <= 3'h4; + end else if ((x_amo_phase == 3'h1 || x_amo_phase == 3'h3) && bus_dph_err_d) begin + // Bus fault. Exception. + x_amo_phase <= 3'h4; + end else if (x_amo_phase == 3'h3) begin + // We're done! + x_amo_phase <= 3'h0; + end else begin + x_amo_phase <= x_amo_phase + 3'h1; + end + end +end + reg mw_local_exclusive_reserved; wire x_memop_vld = d_memop != MEMOP_NONE && !( - |EXTENSION_A && d_memop == MEMOP_SC_W && !mw_local_exclusive_reserved + |EXTENSION_A && d_memop == MEMOP_SC_W && !mw_local_exclusive_reserved || + |EXTENSION_A && d_memop_is_amo && x_amo_phase != 3'h0 && x_amo_phase != 3'h2 ); wire x_memop_write = d_memop == MEMOP_SW || d_memop == MEMOP_SH || d_memop == MEMOP_SB || - |EXTENSION_A && d_memop == MEMOP_SC_W; + |EXTENSION_A && d_memop == MEMOP_SC_W || + |EXTENSION_A && d_memop_is_amo && x_amo_phase == 3'h2; wire x_unaligned_addr = d_memop != MEMOP_NONE && ( bus_hsize_d == HSIZE_WORD && |bus_haddr_d[1:0] || @@ -410,7 +467,7 @@ always @ (*) begin default: bus_hsize_d = HSIZE_WORD; endcase bus_aph_req_d = x_memop_vld && !( - x_stall_raw || + x_stall_on_raw || x_stall_on_exclusive_overlap || x_unaligned_addr || m_trap_enter_soon || @@ -448,7 +505,7 @@ if (EXTENSION_M) begin: has_muldiv ); assign x_muldiv_op_vld = (d_aluop == ALUOP_MULDIV && !x_use_fast_mul) - && !(x_muldiv_posted || x_stall_raw || x_muldiv_kill); + && !(x_muldiv_posted || x_stall_on_raw || x_muldiv_kill); hazard3_muldiv_seq #( `include "hazard3_config_inst.vh" @@ -548,9 +605,11 @@ end wire [W_ADDR-1:0] m_exception_return_addr; wire [W_EXCEPT-1:0] x_except = - x_csr_illegal_access ? EXCEPT_INSTR_ILLEGAL : - x_unaligned_addr && x_memop_write ? EXCEPT_STORE_ALIGN : - x_unaligned_addr && !x_memop_write ? EXCEPT_LOAD_ALIGN : d_except; + x_csr_illegal_access ? EXCEPT_INSTR_ILLEGAL : + |EXTENSION_A && x_unaligned_addr && d_memop_is_amo ? EXCEPT_STORE_ALIGN : + |EXTENSION_A && x_amo_phase == 3'h4 ? EXCEPT_STORE_FAULT : + x_unaligned_addr && x_memop_write ? EXCEPT_STORE_ALIGN : + x_unaligned_addr && !x_memop_write ? EXCEPT_LOAD_ALIGN : d_except; // If an instruction causes an exceptional condition we do not consider it to have retired. wire x_except_counts_as_retire = x_except == EXCEPT_EBREAK || x_except == EXCEPT_MRET || x_except == EXCEPT_ECALL; @@ -647,15 +706,22 @@ always @ (posedge clk or negedge rst_n) begin end end -// No reset on datapath flops -always @ (posedge clk) - if (!m_stall) begin +// Datapath flops +always @ (posedge clk or negedge rst_n) begin + if (!rst_n) begin + xm_result <= {W_DATA{1'b0}}; + xm_store_data <= {W_DATA{1'b0}}; + end else if (!m_stall) begin xm_result <= d_csr_ren ? x_csr_rdata : EXTENSION_M && d_aluop == ALUOP_MULDIV ? x_muldiv_result : x_alu_result; xm_store_data <= x_rs2_bypass; + + end else if (d_memop_is_amo && x_amo_phase == 3'h1 && bus_dph_ready_d) begin + xm_store_data <= x_rs2_bypass; end +end // Branch handling @@ -663,7 +729,7 @@ always @ (posedge clk) wire [W_ADDR-1:0] x_jump_target = ((d_jump_is_regoffs ? x_rs1_bypass : d_pc) + d_jump_offs) & ~32'h1; // Be careful not to take branches whose comparisons depend on a load result -assign x_jump_req = !x_stall_raw && ( +assign x_jump_req = !x_stall_on_raw && ( d_branchcond == BCOND_ALWAYS || d_branchcond == BCOND_ZERO && !x_alu_cmp || d_branchcond == BCOND_NZERO && x_alu_cmp @@ -698,6 +764,51 @@ assign m_exception_return_addr = d_pc - ( prev_instr_was_32_bit ? 32'h4 : 32'h2 ); +// Load/store data handling + +wire [W_DATA-1:0] m_amo_wdata; +wire m_amo_wdata_valid; + +generate +if (EXTENSION_A) begin: has_amo_alu + + reg [W_MEMOP-1:0] amo_memop; + reg [W_DATA-1:0] amo_load_data; + reg m_amo_wdata_valid_r; + + assign m_amo_wdata_valid = m_amo_wdata_valid_r; + + always @ (posedge clk or negedge rst_n) begin + if (!rst_n) begin + amo_memop <= MEMOP_NONE; + amo_load_data <= {W_DATA{1'b0}}; + m_amo_wdata_valid_r <= 1'b0; + end else if (d_memop_is_amo && x_amo_phase == 3'h1 && bus_dph_ready_d) begin + amo_memop <= d_memop; + amo_load_data <= bus_rdata_d; + m_amo_wdata_valid_r <= 1'b1; + end else if (x_amo_phase == 3'h4 || (x_amo_phase == 3'h3 && bus_dph_ready_d) || m_trap_enter_soon) begin + m_amo_wdata_valid_r <= 1'b0; + end + end + + hazard3_amo_alu #( + `include "hazard3_config_inst.vh" + ) amo_alu ( + .op (amo_memop), + .op_rs1(amo_load_data), + .op_rs2(xm_store_data), + .result(m_amo_wdata) + ); + +end else begin: no_amo_alu + + assign m_amo_wdata = {W_DATA{1'b0}}; + assign m_amo_wdata_valid = 1'b0; + +end +endgenerate + always @ (*) begin // Local forwarding of store data if (|mw_rd && xm_rs2 == mw_rd && !REDUCED_BYPASS) begin @@ -709,8 +820,10 @@ always @ (*) begin case (xm_memop) MEMOP_SH: bus_wdata_d = {2{m_wdata[15:0]}}; MEMOP_SB: bus_wdata_d = {4{m_wdata[7:0]}}; - default: bus_wdata_d = m_wdata; // TODO worth it to mask when not writing? Costs LUTs, saves energy + default: bus_wdata_d = m_wdata; endcase + if (|EXTENSION_A && amo_wdata_valid) + bus_wdata_d = m_amo_wdata; casez ({xm_memop, xm_result[1:0]}) {MEMOP_LH , 2'b0z}: m_rdata_pick_sext = {{16{bus_rdata_d[15]}}, bus_rdata_d[15: 0]}; @@ -749,7 +862,13 @@ always @ (posedge clk or negedge rst_n) begin if (!rst_n) begin mw_local_exclusive_reserved <= 1'b0; end else if (|EXTENSION_A && !m_stall) begin - if (xm_memop == MEMOP_SC_W) begin +`ifdef FORMAL + // AMOs should handle the entire bus transfer in stage X. + assert(xm_memop != MEMOP_AMOADD_W); +`endif + if (d_memop_is_amo && |x_amo_phase) begin // TODO do AMOs clear reservation? + mw_local_exclusive_reserved <= 1'b0; + end else if (xm_memop == MEMOP_SC_W) begin mw_local_exclusive_reserved <= 1'b0; end else if (xm_memop == MEMOP_LR_W) begin mw_local_exclusive_reserved <= bus_dph_exokay_d; diff --git a/hdl/hazard3_decode.v b/hdl/hazard3_decode.v index bba5636..e1f613a 100644 --- a/hdl/hazard3_decode.v +++ b/hdl/hazard3_decode.v @@ -197,104 +197,113 @@ always @ (*) begin d_wfi = 1'b0; casez (d_instr) - RV_BEQ: begin d_invalid_32bit = DEBUG_SUPPORT && debug_mode; d_rd = X0; d_aluop = ALUOP_SUB; d_branchcond = BCOND_ZERO; end - RV_BNE: begin d_invalid_32bit = DEBUG_SUPPORT && debug_mode; d_rd = X0; d_aluop = ALUOP_SUB; d_branchcond = BCOND_NZERO; end - RV_BLT: begin d_invalid_32bit = DEBUG_SUPPORT && debug_mode; d_rd = X0; d_aluop = ALUOP_LT; d_branchcond = BCOND_NZERO; end - RV_BGE: begin d_invalid_32bit = DEBUG_SUPPORT && debug_mode; d_rd = X0; d_aluop = ALUOP_LT; d_branchcond = BCOND_ZERO; end - RV_BLTU: begin d_invalid_32bit = DEBUG_SUPPORT && debug_mode; d_rd = X0; d_aluop = ALUOP_LTU; d_branchcond = BCOND_NZERO; end - RV_BGEU: begin d_invalid_32bit = DEBUG_SUPPORT && debug_mode; d_rd = X0; d_aluop = ALUOP_LTU; d_branchcond = BCOND_ZERO; end - RV_JALR: begin d_invalid_32bit = DEBUG_SUPPORT && debug_mode; d_branchcond = BCOND_ALWAYS; d_jump_is_regoffs = 1'b1; d_rs2 = X0; d_aluop = ALUOP_ADD; d_alusrc_a = ALUSRCA_PC; d_alusrc_b = ALUSRCB_IMM; d_imm = d_instr_is_32bit ? 32'h4 : 32'h2; end - RV_JAL: begin d_invalid_32bit = DEBUG_SUPPORT && debug_mode; d_branchcond = BCOND_ALWAYS; d_rs1 = X0; d_rs2 = X0; d_aluop = ALUOP_ADD; d_alusrc_a = ALUSRCA_PC; d_alusrc_b = ALUSRCB_IMM; d_imm = d_instr_is_32bit ? 32'h4 : 32'h2; end - RV_LUI: begin d_aluop = ALUOP_ADD; d_imm = d_imm_u; d_alusrc_b = ALUSRCB_IMM; d_rs2 = X0; d_rs1 = X0; end - RV_AUIPC: begin d_invalid_32bit = DEBUG_SUPPORT && debug_mode; d_aluop = ALUOP_ADD; d_imm = d_imm_u; d_alusrc_b = ALUSRCB_IMM; d_rs2 = X0; d_alusrc_a = ALUSRCA_PC; d_rs1 = X0; end - RV_ADDI: begin d_aluop = ALUOP_ADD; d_imm = d_imm_i; d_alusrc_b = ALUSRCB_IMM; d_rs2 = X0; end - RV_SLLI: begin d_aluop = ALUOP_SLL; d_imm = d_imm_i; d_alusrc_b = ALUSRCB_IMM; d_rs2 = X0; end - RV_SLTI: begin d_aluop = ALUOP_LT; d_imm = d_imm_i; d_alusrc_b = ALUSRCB_IMM; d_rs2 = X0; end - RV_SLTIU: begin d_aluop = ALUOP_LTU; d_imm = d_imm_i; d_alusrc_b = ALUSRCB_IMM; d_rs2 = X0; end - RV_XORI: begin d_aluop = ALUOP_XOR; d_imm = d_imm_i; d_alusrc_b = ALUSRCB_IMM; d_rs2 = X0; end - RV_SRLI: begin d_aluop = ALUOP_SRL; d_imm = d_imm_i; d_alusrc_b = ALUSRCB_IMM; d_rs2 = X0; end - RV_SRAI: begin d_aluop = ALUOP_SRA; d_imm = d_imm_i; d_alusrc_b = ALUSRCB_IMM; d_rs2 = X0; end - RV_ORI: begin d_aluop = ALUOP_OR; d_imm = d_imm_i; d_alusrc_b = ALUSRCB_IMM; d_rs2 = X0; end - RV_ANDI: begin d_aluop = ALUOP_AND; d_imm = d_imm_i; d_alusrc_b = ALUSRCB_IMM; d_rs2 = X0; end - RV_ADD: begin d_aluop = ALUOP_ADD; end - RV_SUB: begin d_aluop = ALUOP_SUB; end - RV_SLL: begin d_aluop = ALUOP_SLL; end - RV_SLT: begin d_aluop = ALUOP_LT; end - RV_SLTU: begin d_aluop = ALUOP_LTU; end - RV_XOR: begin d_aluop = ALUOP_XOR; end - RV_SRL: begin d_aluop = ALUOP_SRL; end - RV_SRA: begin d_aluop = ALUOP_SRA; end - RV_OR: begin d_aluop = ALUOP_OR; end - RV_AND: begin d_aluop = ALUOP_AND; end - RV_LB: begin d_aluop = ALUOP_ADD; d_imm = d_imm_i; d_alusrc_b = ALUSRCB_IMM; d_rs2 = X0; d_memop = MEMOP_LB; end - RV_LH: begin d_aluop = ALUOP_ADD; d_imm = d_imm_i; d_alusrc_b = ALUSRCB_IMM; d_rs2 = X0; d_memop = MEMOP_LH; end - RV_LW: begin d_aluop = ALUOP_ADD; d_imm = d_imm_i; d_alusrc_b = ALUSRCB_IMM; d_rs2 = X0; d_memop = MEMOP_LW; end - RV_LBU: begin d_aluop = ALUOP_ADD; d_imm = d_imm_i; d_alusrc_b = ALUSRCB_IMM; d_rs2 = X0; d_memop = MEMOP_LBU; end - RV_LHU: begin d_aluop = ALUOP_ADD; d_imm = d_imm_i; d_alusrc_b = ALUSRCB_IMM; d_rs2 = X0; d_memop = MEMOP_LHU; end - RV_SB: begin d_aluop = ALUOP_ADD; d_imm = d_imm_s; d_alusrc_b = ALUSRCB_IMM; d_memop = MEMOP_SB; d_rd = X0; end - RV_SH: begin d_aluop = ALUOP_ADD; d_imm = d_imm_s; d_alusrc_b = ALUSRCB_IMM; d_memop = MEMOP_SH; d_rd = X0; end - RV_SW: begin d_aluop = ALUOP_ADD; d_imm = d_imm_s; d_alusrc_b = ALUSRCB_IMM; d_memop = MEMOP_SW; d_rd = X0; end + RV_BEQ: begin d_invalid_32bit = DEBUG_SUPPORT && debug_mode; d_rd = X0; d_aluop = ALUOP_SUB; d_branchcond = BCOND_ZERO; end + RV_BNE: begin d_invalid_32bit = DEBUG_SUPPORT && debug_mode; d_rd = X0; d_aluop = ALUOP_SUB; d_branchcond = BCOND_NZERO; end + RV_BLT: begin d_invalid_32bit = DEBUG_SUPPORT && debug_mode; d_rd = X0; d_aluop = ALUOP_LT; d_branchcond = BCOND_NZERO; end + RV_BGE: begin d_invalid_32bit = DEBUG_SUPPORT && debug_mode; d_rd = X0; d_aluop = ALUOP_LT; d_branchcond = BCOND_ZERO; end + RV_BLTU: begin d_invalid_32bit = DEBUG_SUPPORT && debug_mode; d_rd = X0; d_aluop = ALUOP_LTU; d_branchcond = BCOND_NZERO; end + RV_BGEU: begin d_invalid_32bit = DEBUG_SUPPORT && debug_mode; d_rd = X0; d_aluop = ALUOP_LTU; d_branchcond = BCOND_ZERO; end + RV_JALR: begin d_invalid_32bit = DEBUG_SUPPORT && debug_mode; d_branchcond = BCOND_ALWAYS; d_jump_is_regoffs = 1'b1; d_rs2 = X0; d_aluop = ALUOP_ADD; d_alusrc_a = ALUSRCA_PC; d_alusrc_b = ALUSRCB_IMM; d_imm = d_instr_is_32bit ? 32'h4 : 32'h2; end + RV_JAL: begin d_invalid_32bit = DEBUG_SUPPORT && debug_mode; d_branchcond = BCOND_ALWAYS; d_rs1 = X0; d_rs2 = X0; d_aluop = ALUOP_ADD; d_alusrc_a = ALUSRCA_PC; d_alusrc_b = ALUSRCB_IMM; d_imm = d_instr_is_32bit ? 32'h4 : 32'h2; end + RV_LUI: begin d_aluop = ALUOP_ADD; d_imm = d_imm_u; d_alusrc_b = ALUSRCB_IMM; d_rs2 = X0; d_rs1 = X0; end + RV_AUIPC: begin d_invalid_32bit = DEBUG_SUPPORT && debug_mode; d_aluop = ALUOP_ADD; d_imm = d_imm_u; d_alusrc_b = ALUSRCB_IMM; d_rs2 = X0; d_alusrc_a = ALUSRCA_PC; d_rs1 = X0; end + RV_ADDI: begin d_aluop = ALUOP_ADD; d_imm = d_imm_i; d_alusrc_b = ALUSRCB_IMM; d_rs2 = X0; end + RV_SLLI: begin d_aluop = ALUOP_SLL; d_imm = d_imm_i; d_alusrc_b = ALUSRCB_IMM; d_rs2 = X0; end + RV_SLTI: begin d_aluop = ALUOP_LT; d_imm = d_imm_i; d_alusrc_b = ALUSRCB_IMM; d_rs2 = X0; end + RV_SLTIU: begin d_aluop = ALUOP_LTU; d_imm = d_imm_i; d_alusrc_b = ALUSRCB_IMM; d_rs2 = X0; end + RV_XORI: begin d_aluop = ALUOP_XOR; d_imm = d_imm_i; d_alusrc_b = ALUSRCB_IMM; d_rs2 = X0; end + RV_SRLI: begin d_aluop = ALUOP_SRL; d_imm = d_imm_i; d_alusrc_b = ALUSRCB_IMM; d_rs2 = X0; end + RV_SRAI: begin d_aluop = ALUOP_SRA; d_imm = d_imm_i; d_alusrc_b = ALUSRCB_IMM; d_rs2 = X0; end + RV_ORI: begin d_aluop = ALUOP_OR; d_imm = d_imm_i; d_alusrc_b = ALUSRCB_IMM; d_rs2 = X0; end + RV_ANDI: begin d_aluop = ALUOP_AND; d_imm = d_imm_i; d_alusrc_b = ALUSRCB_IMM; d_rs2 = X0; end + RV_ADD: begin d_aluop = ALUOP_ADD; end + RV_SUB: begin d_aluop = ALUOP_SUB; end + RV_SLL: begin d_aluop = ALUOP_SLL; end + RV_SLT: begin d_aluop = ALUOP_LT; end + RV_SLTU: begin d_aluop = ALUOP_LTU; end + RV_XOR: begin d_aluop = ALUOP_XOR; end + RV_SRL: begin d_aluop = ALUOP_SRL; end + RV_SRA: begin d_aluop = ALUOP_SRA; end + RV_OR: begin d_aluop = ALUOP_OR; end + RV_AND: begin d_aluop = ALUOP_AND; end + RV_LB: begin d_aluop = ALUOP_ADD; d_imm = d_imm_i; d_alusrc_b = ALUSRCB_IMM; d_rs2 = X0; d_memop = MEMOP_LB; end + RV_LH: begin d_aluop = ALUOP_ADD; d_imm = d_imm_i; d_alusrc_b = ALUSRCB_IMM; d_rs2 = X0; d_memop = MEMOP_LH; end + RV_LW: begin d_aluop = ALUOP_ADD; d_imm = d_imm_i; d_alusrc_b = ALUSRCB_IMM; d_rs2 = X0; d_memop = MEMOP_LW; end + RV_LBU: begin d_aluop = ALUOP_ADD; d_imm = d_imm_i; d_alusrc_b = ALUSRCB_IMM; d_rs2 = X0; d_memop = MEMOP_LBU; end + RV_LHU: begin d_aluop = ALUOP_ADD; d_imm = d_imm_i; d_alusrc_b = ALUSRCB_IMM; d_rs2 = X0; d_memop = MEMOP_LHU; end + RV_SB: begin d_aluop = ALUOP_ADD; d_imm = d_imm_s; d_alusrc_b = ALUSRCB_IMM; d_memop = MEMOP_SB; d_rd = X0; end + RV_SH: begin d_aluop = ALUOP_ADD; d_imm = d_imm_s; d_alusrc_b = ALUSRCB_IMM; d_memop = MEMOP_SH; d_rd = X0; end + RV_SW: begin d_aluop = ALUOP_ADD; d_imm = d_imm_s; d_alusrc_b = ALUSRCB_IMM; d_memop = MEMOP_SW; d_rd = X0; end - RV_MUL: if (EXTENSION_M) begin d_aluop = ALUOP_MULDIV; d_mulop = M_OP_MUL; end else begin d_invalid_32bit = 1'b1; end - RV_MULH: if (EXTENSION_M) begin d_aluop = ALUOP_MULDIV; d_mulop = M_OP_MULH; end else begin d_invalid_32bit = 1'b1; end - RV_MULHSU: if (EXTENSION_M) begin d_aluop = ALUOP_MULDIV; d_mulop = M_OP_MULHSU; end else begin d_invalid_32bit = 1'b1; end - RV_MULHU: if (EXTENSION_M) begin d_aluop = ALUOP_MULDIV; d_mulop = M_OP_MULHU; end else begin d_invalid_32bit = 1'b1; end - RV_DIV: if (EXTENSION_M) begin d_aluop = ALUOP_MULDIV; d_mulop = M_OP_DIV; end else begin d_invalid_32bit = 1'b1; end - RV_DIVU: if (EXTENSION_M) begin d_aluop = ALUOP_MULDIV; d_mulop = M_OP_DIVU; end else begin d_invalid_32bit = 1'b1; end - RV_REM: if (EXTENSION_M) begin d_aluop = ALUOP_MULDIV; d_mulop = M_OP_REM; end else begin d_invalid_32bit = 1'b1; end - RV_REMU: if (EXTENSION_M) begin d_aluop = ALUOP_MULDIV; d_mulop = M_OP_REMU; end else begin d_invalid_32bit = 1'b1; end + RV_MUL: if (EXTENSION_M) begin d_aluop = ALUOP_MULDIV; d_mulop = M_OP_MUL; end else begin d_invalid_32bit = 1'b1; end + RV_MULH: if (EXTENSION_M) begin d_aluop = ALUOP_MULDIV; d_mulop = M_OP_MULH; end else begin d_invalid_32bit = 1'b1; end + RV_MULHSU: if (EXTENSION_M) begin d_aluop = ALUOP_MULDIV; d_mulop = M_OP_MULHSU; end else begin d_invalid_32bit = 1'b1; end + RV_MULHU: if (EXTENSION_M) begin d_aluop = ALUOP_MULDIV; d_mulop = M_OP_MULHU; end else begin d_invalid_32bit = 1'b1; end + RV_DIV: if (EXTENSION_M) begin d_aluop = ALUOP_MULDIV; d_mulop = M_OP_DIV; end else begin d_invalid_32bit = 1'b1; end + RV_DIVU: if (EXTENSION_M) begin d_aluop = ALUOP_MULDIV; d_mulop = M_OP_DIVU; end else begin d_invalid_32bit = 1'b1; end + RV_REM: if (EXTENSION_M) begin d_aluop = ALUOP_MULDIV; d_mulop = M_OP_REM; end else begin d_invalid_32bit = 1'b1; end + RV_REMU: if (EXTENSION_M) begin d_aluop = ALUOP_MULDIV; d_mulop = M_OP_REMU; end else begin d_invalid_32bit = 1'b1; end - RV_LR_W: if (EXTENSION_A) begin d_imm = X0; d_alusrc_b = ALUSRCB_IMM; d_rs2 = X0; d_memop = MEMOP_LR_W; end - RV_SC_W: if (EXTENSION_A) begin d_imm = X0; d_alusrc_b = ALUSRCB_IMM; d_memop = MEMOP_SC_W; end + RV_LR_W: if (EXTENSION_A) begin d_imm = X0; d_alusrc_b = ALUSRCB_IMM; d_rs2 = X0; d_memop = MEMOP_LR_W; end + RV_SC_W: if (EXTENSION_A) begin d_imm = X0; d_alusrc_b = ALUSRCB_IMM; d_memop = MEMOP_SC_W; end + RV_AMOSWAP_W: if (EXTENSION_A) begin d_imm = X0; d_alusrc_b = ALUSRCB_IMM; d_memop = MEMOP_AMOSWAP_W; end + RV_AMOADD_W: if (EXTENSION_A) begin d_imm = X0; d_alusrc_b = ALUSRCB_IMM; d_memop = MEMOP_AMOADD_W; end + RV_AMOXOR_W: if (EXTENSION_A) begin d_imm = X0; d_alusrc_b = ALUSRCB_IMM; d_memop = MEMOP_AMOXOR_W; end + RV_AMOAND_W: if (EXTENSION_A) begin d_imm = X0; d_alusrc_b = ALUSRCB_IMM; d_memop = MEMOP_AMOAND_W; end + RV_AMOOR_W: if (EXTENSION_A) begin d_imm = X0; d_alusrc_b = ALUSRCB_IMM; d_memop = MEMOP_AMOOR_W; end + RV_AMOMIN_W: if (EXTENSION_A) begin d_imm = X0; d_alusrc_b = ALUSRCB_IMM; d_memop = MEMOP_AMOMIN_W; end + RV_AMOMAX_W: if (EXTENSION_A) begin d_imm = X0; d_alusrc_b = ALUSRCB_IMM; d_memop = MEMOP_AMOMAX_W; end + RV_AMOMINU_W: if (EXTENSION_A) begin d_imm = X0; d_alusrc_b = ALUSRCB_IMM; d_memop = MEMOP_AMOMINU_W; end + RV_AMOMAXU_W: if (EXTENSION_A) begin d_imm = X0; d_alusrc_b = ALUSRCB_IMM; d_memop = MEMOP_AMOMAXU_W; end - RV_SH1ADD: if (EXTENSION_ZBA) begin d_aluop = ALUOP_SH1ADD; end else begin d_invalid_32bit = 1'b1; end - RV_SH2ADD: if (EXTENSION_ZBA) begin d_aluop = ALUOP_SH2ADD; end else begin d_invalid_32bit = 1'b1; end - RV_SH3ADD: if (EXTENSION_ZBA) begin d_aluop = ALUOP_SH3ADD; end else begin d_invalid_32bit = 1'b1; end + RV_SH1ADD: if (EXTENSION_ZBA) begin d_aluop = ALUOP_SH1ADD; end else begin d_invalid_32bit = 1'b1; end + RV_SH2ADD: if (EXTENSION_ZBA) begin d_aluop = ALUOP_SH2ADD; end else begin d_invalid_32bit = 1'b1; end + RV_SH3ADD: if (EXTENSION_ZBA) begin d_aluop = ALUOP_SH3ADD; end else begin d_invalid_32bit = 1'b1; end - RV_ANDN: if (EXTENSION_ZBB) begin d_aluop = ALUOP_ANDN; end else begin d_invalid_32bit = 1'b1; end - RV_CLZ: if (EXTENSION_ZBB) begin d_aluop = ALUOP_CLZ; d_rs2 = X0; end else begin d_invalid_32bit = 1'b1; end - RV_CPOP: if (EXTENSION_ZBB) begin d_aluop = ALUOP_CPOP; d_rs2 = X0; end else begin d_invalid_32bit = 1'b1; end - RV_CTZ: if (EXTENSION_ZBB) begin d_aluop = ALUOP_CTZ; d_rs2 = X0; end else begin d_invalid_32bit = 1'b1; end - RV_MAX: if (EXTENSION_ZBB) begin d_aluop = ALUOP_MAX; end else begin d_invalid_32bit = 1'b1; end - RV_MAXU: if (EXTENSION_ZBB) begin d_aluop = ALUOP_MAXU; end else begin d_invalid_32bit = 1'b1; end - RV_MIN: if (EXTENSION_ZBB) begin d_aluop = ALUOP_MIN; end else begin d_invalid_32bit = 1'b1; end - RV_MINU: if (EXTENSION_ZBB) begin d_aluop = ALUOP_MINU; end else begin d_invalid_32bit = 1'b1; end - RV_ORC_B: if (EXTENSION_ZBB) begin d_aluop = ALUOP_ORC_B; d_rs2 = X0; end else begin d_invalid_32bit = 1'b1; end - RV_ORN: if (EXTENSION_ZBB) begin d_aluop = ALUOP_ORN; end else begin d_invalid_32bit = 1'b1; end - RV_REV8: if (EXTENSION_ZBB) begin d_aluop = ALUOP_REV8; d_rs2 = X0; end else begin d_invalid_32bit = 1'b1; end - RV_ROL: if (EXTENSION_ZBB) begin d_aluop = ALUOP_ROL; end else begin d_invalid_32bit = 1'b1; end - RV_ROR: if (EXTENSION_ZBB) begin d_aluop = ALUOP_ROR; end else begin d_invalid_32bit = 1'b1; end - RV_RORI: if (EXTENSION_ZBB) begin d_aluop = ALUOP_ROR; d_rs2 = X0; d_imm = d_imm_i; d_alusrc_b = ALUSRCB_IMM; end else begin d_invalid_32bit = 1'b1; end - RV_SEXT_B: if (EXTENSION_ZBB) begin d_aluop = ALUOP_SEXT_B; d_rs2 = X0; end else begin d_invalid_32bit = 1'b1; end - RV_SEXT_H: if (EXTENSION_ZBB) begin d_aluop = ALUOP_SEXT_H; d_rs2 = X0; end else begin d_invalid_32bit = 1'b1; end - RV_XNOR: if (EXTENSION_ZBB) begin d_aluop = ALUOP_XNOR; end else begin d_invalid_32bit = 1'b1; end - RV_ZEXT_H: if (EXTENSION_ZBB) begin d_aluop = ALUOP_ZEXT_H; d_rs2 = X0; end else begin d_invalid_32bit = 1'b1; end + RV_ANDN: if (EXTENSION_ZBB) begin d_aluop = ALUOP_ANDN; end else begin d_invalid_32bit = 1'b1; end + RV_CLZ: if (EXTENSION_ZBB) begin d_aluop = ALUOP_CLZ; d_rs2 = X0; end else begin d_invalid_32bit = 1'b1; end + RV_CPOP: if (EXTENSION_ZBB) begin d_aluop = ALUOP_CPOP; d_rs2 = X0; end else begin d_invalid_32bit = 1'b1; end + RV_CTZ: if (EXTENSION_ZBB) begin d_aluop = ALUOP_CTZ; d_rs2 = X0; end else begin d_invalid_32bit = 1'b1; end + RV_MAX: if (EXTENSION_ZBB) begin d_aluop = ALUOP_MAX; end else begin d_invalid_32bit = 1'b1; end + RV_MAXU: if (EXTENSION_ZBB) begin d_aluop = ALUOP_MAXU; end else begin d_invalid_32bit = 1'b1; end + RV_MIN: if (EXTENSION_ZBB) begin d_aluop = ALUOP_MIN; end else begin d_invalid_32bit = 1'b1; end + RV_MINU: if (EXTENSION_ZBB) begin d_aluop = ALUOP_MINU; end else begin d_invalid_32bit = 1'b1; end + RV_ORC_B: if (EXTENSION_ZBB) begin d_aluop = ALUOP_ORC_B; d_rs2 = X0; end else begin d_invalid_32bit = 1'b1; end + RV_ORN: if (EXTENSION_ZBB) begin d_aluop = ALUOP_ORN; end else begin d_invalid_32bit = 1'b1; end + RV_REV8: if (EXTENSION_ZBB) begin d_aluop = ALUOP_REV8; d_rs2 = X0; end else begin d_invalid_32bit = 1'b1; end + RV_ROL: if (EXTENSION_ZBB) begin d_aluop = ALUOP_ROL; end else begin d_invalid_32bit = 1'b1; end + RV_ROR: if (EXTENSION_ZBB) begin d_aluop = ALUOP_ROR; end else begin d_invalid_32bit = 1'b1; end + RV_RORI: if (EXTENSION_ZBB) begin d_aluop = ALUOP_ROR; d_rs2 = X0; d_imm = d_imm_i; d_alusrc_b = ALUSRCB_IMM; end else begin d_invalid_32bit = 1'b1; end + RV_SEXT_B: if (EXTENSION_ZBB) begin d_aluop = ALUOP_SEXT_B; d_rs2 = X0; end else begin d_invalid_32bit = 1'b1; end + RV_SEXT_H: if (EXTENSION_ZBB) begin d_aluop = ALUOP_SEXT_H; d_rs2 = X0; end else begin d_invalid_32bit = 1'b1; end + RV_XNOR: if (EXTENSION_ZBB) begin d_aluop = ALUOP_XNOR; end else begin d_invalid_32bit = 1'b1; end + RV_ZEXT_H: if (EXTENSION_ZBB) begin d_aluop = ALUOP_ZEXT_H; d_rs2 = X0; end else begin d_invalid_32bit = 1'b1; end - RV_CLMUL: if (EXTENSION_ZBC) begin d_aluop = ALUOP_CLMUL; end else begin d_invalid_32bit = 1'b1; end - RV_CLMULH: if (EXTENSION_ZBC) begin d_aluop = ALUOP_CLMULH; end else begin d_invalid_32bit = 1'b1; end - RV_CLMULR: if (EXTENSION_ZBC) begin d_aluop = ALUOP_CLMULR; end else begin d_invalid_32bit = 1'b1; end - RV_BCLR: if (EXTENSION_ZBC) begin d_aluop = ALUOP_BCLR; end else begin d_invalid_32bit = 1'b1; end - RV_BCLRI: if (EXTENSION_ZBC) begin d_aluop = ALUOP_BCLR; d_rs2 = X0; d_imm = d_imm_i; d_alusrc_b = ALUSRCB_IMM; end else begin d_invalid_32bit = 1'b1; end - RV_BEXT: if (EXTENSION_ZBC) begin d_aluop = ALUOP_BEXT; end else begin d_invalid_32bit = 1'b1; end - RV_BEXTI: if (EXTENSION_ZBC) begin d_aluop = ALUOP_BEXT; d_rs2 = X0; d_imm = d_imm_i; d_alusrc_b = ALUSRCB_IMM; end else begin d_invalid_32bit = 1'b1; end - RV_BINV: if (EXTENSION_ZBC) begin d_aluop = ALUOP_BINV; end else begin d_invalid_32bit = 1'b1; end - RV_BINVI: if (EXTENSION_ZBC) begin d_aluop = ALUOP_BINV; d_rs2 = X0; d_imm = d_imm_i; d_alusrc_b = ALUSRCB_IMM; end else begin d_invalid_32bit = 1'b1; end - RV_BSET: if (EXTENSION_ZBC) begin d_aluop = ALUOP_BSET; end else begin d_invalid_32bit = 1'b1; end - RV_BSETI: if (EXTENSION_ZBC) begin d_aluop = ALUOP_BSET; d_rs2 = X0; d_imm = d_imm_i; d_alusrc_b = ALUSRCB_IMM; end else begin d_invalid_32bit = 1'b1; end + RV_CLMUL: if (EXTENSION_ZBC) begin d_aluop = ALUOP_CLMUL; end else begin d_invalid_32bit = 1'b1; end + RV_CLMULH: if (EXTENSION_ZBC) begin d_aluop = ALUOP_CLMULH; end else begin d_invalid_32bit = 1'b1; end + RV_CLMULR: if (EXTENSION_ZBC) begin d_aluop = ALUOP_CLMULR; end else begin d_invalid_32bit = 1'b1; end + RV_BCLR: if (EXTENSION_ZBC) begin d_aluop = ALUOP_BCLR; end else begin d_invalid_32bit = 1'b1; end + RV_BCLRI: if (EXTENSION_ZBC) begin d_aluop = ALUOP_BCLR; d_rs2 = X0; d_imm = d_imm_i; d_alusrc_b = ALUSRCB_IMM; end else begin d_invalid_32bit = 1'b1; end + RV_BEXT: if (EXTENSION_ZBC) begin d_aluop = ALUOP_BEXT; end else begin d_invalid_32bit = 1'b1; end + RV_BEXTI: if (EXTENSION_ZBC) begin d_aluop = ALUOP_BEXT; d_rs2 = X0; d_imm = d_imm_i; d_alusrc_b = ALUSRCB_IMM; end else begin d_invalid_32bit = 1'b1; end + RV_BINV: if (EXTENSION_ZBC) begin d_aluop = ALUOP_BINV; end else begin d_invalid_32bit = 1'b1; end + RV_BINVI: if (EXTENSION_ZBC) begin d_aluop = ALUOP_BINV; d_rs2 = X0; d_imm = d_imm_i; d_alusrc_b = ALUSRCB_IMM; end else begin d_invalid_32bit = 1'b1; end + RV_BSET: if (EXTENSION_ZBC) begin d_aluop = ALUOP_BSET; end else begin d_invalid_32bit = 1'b1; end + RV_BSETI: if (EXTENSION_ZBC) begin d_aluop = ALUOP_BSET; d_rs2 = X0; d_imm = d_imm_i; d_alusrc_b = ALUSRCB_IMM; end else begin d_invalid_32bit = 1'b1; end - RV_FENCE: begin d_rd = X0; end // NOP - RV_FENCE_I: begin d_invalid_32bit = DEBUG_SUPPORT && debug_mode; d_rd = X0; d_rs1 = X0; d_rs2 = X0; d_branchcond = BCOND_NZERO; d_imm[31] = 1'b1; end // FIXME this is probably busted now. Maybe implement as an exception? - RV_CSRRW: if (HAVE_CSR) begin d_imm = d_imm_i; d_csr_wen = 1'b1 ; d_csr_ren = |d_rd; d_csr_wtype = CSR_WTYPE_W; end else begin d_invalid_32bit = 1'b1; end - RV_CSRRS: if (HAVE_CSR) begin d_imm = d_imm_i; d_csr_wen = |d_rs1; d_csr_ren = 1'b1 ; d_csr_wtype = CSR_WTYPE_S; end else begin d_invalid_32bit = 1'b1; end - RV_CSRRC: if (HAVE_CSR) begin d_imm = d_imm_i; d_csr_wen = |d_rs1; d_csr_ren = 1'b1 ; d_csr_wtype = CSR_WTYPE_C; end else begin d_invalid_32bit = 1'b1; end - RV_CSRRWI: if (HAVE_CSR) begin d_imm = d_imm_i; d_csr_wen = 1'b1 ; d_csr_ren = |d_rd; d_csr_wtype = CSR_WTYPE_W; d_csr_w_imm = 1'b1; end else begin d_invalid_32bit = 1'b1; end - RV_CSRRSI: if (HAVE_CSR) begin d_imm = d_imm_i; d_csr_wen = |d_rs1; d_csr_ren = 1'b1 ; d_csr_wtype = CSR_WTYPE_S; d_csr_w_imm = 1'b1; end else begin d_invalid_32bit = 1'b1; end - RV_CSRRCI: if (HAVE_CSR) begin d_imm = d_imm_i; d_csr_wen = |d_rs1; d_csr_ren = 1'b1 ; d_csr_wtype = CSR_WTYPE_C; d_csr_w_imm = 1'b1; end else begin d_invalid_32bit = 1'b1; end - RV_ECALL: if (HAVE_CSR) begin d_except = EXCEPT_ECALL; d_rs2 = X0; d_rs1 = X0; d_rd = X0; end else begin d_invalid_32bit = 1'b1; end - RV_EBREAK: if (HAVE_CSR) begin d_except = EXCEPT_EBREAK; d_rs2 = X0; d_rs1 = X0; d_rd = X0; end else begin d_invalid_32bit = 1'b1; end - RV_MRET: if (HAVE_CSR) begin d_except = EXCEPT_MRET; d_rs2 = X0; d_rs1 = X0; d_rd = X0; end else begin d_invalid_32bit = 1'b1; end - RV_WFI: if (HAVE_CSR) begin d_wfi = 1'b1; d_rs2 = X0; d_rs1 = X0; d_rd = X0; end else begin d_invalid_32bit = 1'b1; end - default: begin d_invalid_32bit = 1'b1; end + RV_FENCE: begin d_rd = X0; end // NOP + RV_FENCE_I: begin d_invalid_32bit = DEBUG_SUPPORT && debug_mode; d_rd = X0; d_rs1 = X0; d_rs2 = X0; d_branchcond = BCOND_NZERO; d_imm[31] = 1'b1; end // FIXME this is probably busted now. Maybe implement as an exception? + RV_CSRRW: if (HAVE_CSR) begin d_imm = d_imm_i; d_csr_wen = 1'b1 ; d_csr_ren = |d_rd; d_csr_wtype = CSR_WTYPE_W; end else begin d_invalid_32bit = 1'b1; end + RV_CSRRS: if (HAVE_CSR) begin d_imm = d_imm_i; d_csr_wen = |d_rs1; d_csr_ren = 1'b1 ; d_csr_wtype = CSR_WTYPE_S; end else begin d_invalid_32bit = 1'b1; end + RV_CSRRC: if (HAVE_CSR) begin d_imm = d_imm_i; d_csr_wen = |d_rs1; d_csr_ren = 1'b1 ; d_csr_wtype = CSR_WTYPE_C; end else begin d_invalid_32bit = 1'b1; end + RV_CSRRWI: if (HAVE_CSR) begin d_imm = d_imm_i; d_csr_wen = 1'b1 ; d_csr_ren = |d_rd; d_csr_wtype = CSR_WTYPE_W; d_csr_w_imm = 1'b1; end else begin d_invalid_32bit = 1'b1; end + RV_CSRRSI: if (HAVE_CSR) begin d_imm = d_imm_i; d_csr_wen = |d_rs1; d_csr_ren = 1'b1 ; d_csr_wtype = CSR_WTYPE_S; d_csr_w_imm = 1'b1; end else begin d_invalid_32bit = 1'b1; end + RV_CSRRCI: if (HAVE_CSR) begin d_imm = d_imm_i; d_csr_wen = |d_rs1; d_csr_ren = 1'b1 ; d_csr_wtype = CSR_WTYPE_C; d_csr_w_imm = 1'b1; end else begin d_invalid_32bit = 1'b1; end + RV_ECALL: if (HAVE_CSR) begin d_except = EXCEPT_ECALL; d_rs2 = X0; d_rs1 = X0; d_rd = X0; end else begin d_invalid_32bit = 1'b1; end + RV_EBREAK: if (HAVE_CSR) begin d_except = EXCEPT_EBREAK; d_rs2 = X0; d_rs1 = X0; d_rd = X0; end else begin d_invalid_32bit = 1'b1; end + RV_MRET: if (HAVE_CSR) begin d_except = EXCEPT_MRET; d_rs2 = X0; d_rs1 = X0; d_rd = X0; end else begin d_invalid_32bit = 1'b1; end + RV_WFI: if (HAVE_CSR) begin d_wfi = 1'b1; d_rs2 = X0; d_rs1 = X0; d_rd = X0; end else begin d_invalid_32bit = 1'b1; end + default: begin d_invalid_32bit = 1'b1; end endcase if (d_invalid || d_starved || d_except_instr_bus_fault) begin