Everyday I'm ~~shuffeling~~ refactoring.

Some big refactors:
* improve scheduler performance by using a BinaryHeap
* refactor the scheduler API
* arm7tdmi
	* Change struct arm7tdmi::Core struct layout so frequently accesses fields would benefit from CPU cache
	* Simplify and cleanup cycle counting by implementing a MemoryInterface trait
	* Still not passing many cycle accuracy tests, but I believe it's because I don't have the prefetch buffer yet.
* Timer overflows are now scheduled
	* This fixes #111 and fixes #112
*


Former-commit-id: 17989e841a1ea88c2a7e14f4c99b31790a43c023
Former-commit-id: 109d98d824a464de347f6590a6ffe9af86b4b4ea
This commit is contained in:
Michel Heily 2020-10-17 06:22:31 -07:00 committed by MishMish
parent 85db28dac6
commit b6e2d55550
15 changed files with 1245 additions and 996 deletions

View file

@ -227,27 +227,10 @@ fn arm_format_to_handler(arm_fmt: &str) -> &'static str {
} }
fn generate_thumb_lut(file: &mut fs::File) -> Result<(), std::io::Error> { fn generate_thumb_lut(file: &mut fs::File) -> Result<(), std::io::Error> {
writeln!(file, "impl<I: MemoryInterface> Core<I> {{")?;
writeln!( writeln!(
file, file,
"/// This file is auto-generated from the build script " pub const THUMB_LUT: [ThumbInstructionInfo<I>; 1024] = ["
#[cfg(feature = \"debugger\")]
use super::thumb::ThumbFormat;
pub type ThumbInstructionHandler = fn(&mut Core, &mut SysBus, insn: u16) -> CpuAction;
#[cfg_attr(not(feature = \"debugger\"), repr(transparent))]
pub struct ThumbInstructionInfo {{
pub handler_fn: ThumbInstructionHandler,
#[cfg(feature = \"debugger\")]
pub fmt: ThumbFormat,
}}
"
)?;
writeln!(
file,
"pub const THUMB_LUT: [ThumbInstructionInfo; 1024] = ["
)?; )?;
for i in 0..1024 { for i in 0..1024 {
@ -255,56 +238,44 @@ pub struct ThumbInstructionInfo {{
let handler_name = thumb_format_to_handler(thumb_fmt); let handler_name = thumb_format_to_handler(thumb_fmt);
writeln!( writeln!(
file, file,
" /* {:#x} */ " /* {:#x} */
ThumbInstructionInfo {{ ThumbInstructionInfo {{
handler_fn: Core::{}, handler_fn: Core::{},
#[cfg(feature = \"debugger\")] #[cfg(feature = \"debugger\")]
fmt: ThumbFormat::{}, fmt: ThumbFormat::{},
}},", }},",
i, handler_name, thumb_fmt i, handler_name, thumb_fmt
)?; )?;
} }
writeln!(file, "];")?; writeln!(file, " ];")?;
writeln!(file, "}}")?;
Ok(()) Ok(())
} }
fn generate_arm_lut(file: &mut fs::File) -> Result<(), std::io::Error> { fn generate_arm_lut(file: &mut fs::File) -> Result<(), std::io::Error> {
writeln!(file, "impl<I: MemoryInterface> Core<I> {{")?;
writeln!( writeln!(
file, file,
"/// This file is auto-generated from the build script " pub const ARM_LUT: [ArmInstructionInfo<I>; 4096] = ["
#[cfg(feature = \"debugger\")]
use super::arm::ArmFormat;
pub type ArmInstructionHandler = fn(&mut Core, &mut SysBus, insn: u32) -> CpuAction;
#[cfg_attr(not(feature = \"debugger\"), repr(transparent))]
pub struct ArmInstructionInfo {{
pub handler_fn: ArmInstructionHandler,
#[cfg(feature = \"debugger\")]
pub fmt: ArmFormat,
}}
"
)?; )?;
writeln!(file, "pub const ARM_LUT: [ArmInstructionInfo; 4096] = [")?;
for i in 0..4096 { for i in 0..4096 {
let arm_fmt = arm_decode(((i & 0xff0) << 16) | ((i & 0x00f) << 4)); let arm_fmt = arm_decode(((i & 0xff0) << 16) | ((i & 0x00f) << 4));
let handler_name = arm_format_to_handler(arm_fmt); let handler_name = arm_format_to_handler(arm_fmt);
writeln!( writeln!(
file, file,
" /* {:#x} */ " /* {:#x} */
ArmInstructionInfo {{ ArmInstructionInfo {{
handler_fn: Core::{}, handler_fn: Core::{},
#[cfg(feature = \"debugger\")] #[cfg(feature = \"debugger\")]
fmt: ArmFormat::{}, fmt: ArmFormat::{},
}} ,", }} ,",
i, handler_name, arm_fmt i, handler_name, arm_fmt
)?; )?;
} }
writeln!(file, "];")?; writeln!(file, " ];")?;
writeln!(file, "}}")?;
Ok(()) Ok(())
} }

View file

@ -1,5 +1,6 @@
use bit::BitIndex; use bit::BitIndex;
use super::memory::MemoryInterface;
use super::{Core, REG_PC}; use super::{Core, REG_PC};
#[derive(Debug, Primitive, PartialEq)] #[derive(Debug, Primitive, PartialEq)]
@ -109,7 +110,7 @@ impl BarrelShifterValue {
} }
} }
impl Core { impl<I: MemoryInterface> Core<I> {
pub fn lsl(&mut self, val: u32, amount: u32, carry_in: bool) -> u32 { pub fn lsl(&mut self, val: u32, amount: u32, carry_in: bool) -> u32 {
match amount { match amount {
0 => { 0 => {
@ -215,6 +216,7 @@ impl Core {
} }
/// Performs a generic barrel shifter operation /// Performs a generic barrel shifter operation
#[inline]
pub fn barrel_shift_op( pub fn barrel_shift_op(
&mut self, &mut self,
shift: BarrelShiftOpCode, shift: BarrelShiftOpCode,
@ -253,6 +255,7 @@ impl Core {
} }
} }
#[inline]
pub fn shift_by_register( pub fn shift_by_register(
&mut self, &mut self,
bs_op: BarrelShiftOpCode, bs_op: BarrelShiftOpCode,
@ -261,7 +264,6 @@ impl Core {
carry: bool, carry: bool,
) -> u32 { ) -> u32 {
let mut val = self.get_reg(reg); let mut val = self.get_reg(reg);
self.add_cycle(); // +1I
if reg == REG_PC { if reg == REG_PC {
val += 4; // PC prefetching val += 4; // PC prefetching
} }

View file

@ -4,37 +4,36 @@ use super::super::alu::*;
use crate::arm7tdmi::psr::RegPSR; use crate::arm7tdmi::psr::RegPSR;
use crate::arm7tdmi::CpuAction; use crate::arm7tdmi::CpuAction;
use crate::arm7tdmi::{Addr, Core, CpuMode, CpuState, REG_LR, REG_PC}; use crate::arm7tdmi::{Addr, Core, CpuMode, CpuState, REG_LR, REG_PC};
use crate::sysbus::SysBus;
use crate::Bus; use super::super::memory::{MemoryAccess, MemoryInterface};
use MemoryAccess::*;
use super::ArmDecodeHelper; use super::ArmDecodeHelper;
use super::*; use super::*;
impl Core { impl<I: MemoryInterface> Core<I> {
#[cfg(not(feature = "arm7tdmi_dispatch_table"))] #[cfg(not(feature = "arm7tdmi_dispatch_table"))]
pub fn exec_arm(&mut self, bus: &mut SysBus, insn: u32, fmt: ArmFormat) -> CpuAction { pub fn exec_arm(&mut self, insn: u32, fmt: ArmFormat) -> CpuAction {
match fmt { match fmt {
ArmFormat::BranchExchange => self.exec_arm_bx(bus, insn), ArmFormat::BranchExchange => self.exec_arm_bx(insn),
ArmFormat::BranchLink => self.exec_arm_b_bl(bus, insn), ArmFormat::BranchLink => self.exec_arm_b_bl(insn),
ArmFormat::DataProcessing => self.exec_arm_data_processing(bus, insn), ArmFormat::DataProcessing => self.exec_arm_data_processing(insn),
ArmFormat::SoftwareInterrupt => self.exec_arm_swi(bus, insn), ArmFormat::SoftwareInterrupt => self.exec_arm_swi(insn),
ArmFormat::SingleDataTransfer => self.exec_arm_ldr_str(bus, insn), ArmFormat::SingleDataTransfer => self.exec_arm_ldr_str(insn),
ArmFormat::HalfwordDataTransferImmediateOffset => { ArmFormat::HalfwordDataTransferImmediateOffset => self.exec_arm_ldr_str_hs_imm(insn),
self.exec_arm_ldr_str_hs_imm(bus, insn) ArmFormat::HalfwordDataTransferRegOffset => self.exec_arm_ldr_str_hs_reg(insn),
} ArmFormat::BlockDataTransfer => self.exec_arm_ldm_stm(insn),
ArmFormat::HalfwordDataTransferRegOffset => self.exec_arm_ldr_str_hs_reg(bus, insn), ArmFormat::MoveFromStatus => self.exec_arm_mrs(insn),
ArmFormat::BlockDataTransfer => self.exec_arm_ldm_stm(bus, insn), ArmFormat::MoveToStatus => self.exec_arm_transfer_to_status(insn),
ArmFormat::MoveFromStatus => self.exec_arm_mrs(bus, insn), ArmFormat::MoveToFlags => self.exec_arm_transfer_to_status(insn),
ArmFormat::MoveToStatus => self.exec_arm_transfer_to_status(bus, insn), ArmFormat::Multiply => self.exec_arm_mul_mla(insn),
ArmFormat::MoveToFlags => self.exec_arm_transfer_to_status(bus, insn), ArmFormat::MultiplyLong => self.exec_arm_mull_mlal(insn),
ArmFormat::Multiply => self.exec_arm_mul_mla(bus, insn), ArmFormat::SingleDataSwap => self.exec_arm_swp(insn),
ArmFormat::MultiplyLong => self.exec_arm_mull_mlal(bus, insn), ArmFormat::Undefined => self.arm_undefined(insn),
ArmFormat::SingleDataSwap => self.exec_arm_swp(bus, insn),
ArmFormat::Undefined => self.arm_undefined(bus, insn),
} }
} }
pub fn arm_undefined(&mut self, _: &mut SysBus, insn: u32) -> CpuAction { pub fn arm_undefined(&mut self, insn: u32) -> CpuAction {
panic!( panic!(
"executing undefined arm instruction {:08x} at @{:08x}", "executing undefined arm instruction {:08x} at @{:08x}",
insn, insn,
@ -42,63 +41,51 @@ impl Core {
) )
} }
/// Cycles 2S+1N /// Branch and Branch with Link (B, BL)
pub fn exec_arm_b_bl(&mut self, sb: &mut SysBus, insn: u32) -> CpuAction { /// Execution Time: 2S + 1N
self.S_cycle32(sb, self.pc); pub fn exec_arm_b_bl(&mut self, insn: u32) -> CpuAction {
if insn.link_flag() { if insn.link_flag() {
self.set_reg(REG_LR, (self.pc_arm() + (self.word_size() as u32)) & !0b1); self.set_reg(REG_LR, (self.pc_arm() + (self.word_size() as u32)) & !0b1);
} }
self.pc = (self.pc as i32).wrapping_add(insn.branch_offset()) as u32 & !1; self.pc = (self.pc as i32).wrapping_add(insn.branch_offset()) as u32 & !1;
self.reload_pipeline32(sb); self.reload_pipeline32(); // Implies 2S + 1N
CpuAction::FlushPipeline CpuAction::PipelineFlushed
} }
pub fn branch_exchange(&mut self, sb: &mut SysBus, mut addr: Addr) -> CpuAction { pub fn branch_exchange(&mut self, mut addr: Addr) -> CpuAction {
match self.cpsr.state() {
CpuState::ARM => self.S_cycle32(sb, self.pc),
CpuState::THUMB => self.S_cycle16(sb, self.pc),
}
if addr.bit(0) { if addr.bit(0) {
addr = addr & !0x1; addr = addr & !0x1;
self.cpsr.set_state(CpuState::THUMB); self.cpsr.set_state(CpuState::THUMB);
self.pc = addr; self.pc = addr;
self.reload_pipeline16(sb); self.reload_pipeline16();
} else { } else {
addr = addr & !0x3; addr = addr & !0x3;
self.cpsr.set_state(CpuState::ARM); self.cpsr.set_state(CpuState::ARM);
self.pc = addr; self.pc = addr;
self.reload_pipeline32(sb); self.reload_pipeline32();
} }
CpuAction::PipelineFlushed
CpuAction::FlushPipeline
} }
/// Branch and Exchange (BX)
/// Cycles 2S+1N /// Cycles 2S+1N
pub fn exec_arm_bx(&mut self, sb: &mut SysBus, insn: u32) -> CpuAction { pub fn exec_arm_bx(&mut self, insn: u32) -> CpuAction {
self.branch_exchange(sb, self.get_reg(insn.bit_range(0..4) as usize)) self.branch_exchange(self.get_reg(insn.bit_range(0..4) as usize))
} }
fn move_from_status_register( /// Move from status register
&mut self, /// 1S
sb: &mut SysBus, pub fn exec_arm_mrs(&mut self, insn: u32) -> CpuAction {
rd: usize, let rd = insn.bit_range(12..16) as usize;
is_spsr: bool, let result = if insn.spsr_flag() {
) -> CpuAction {
let result = if is_spsr {
self.spsr.get() self.spsr.get()
} else { } else {
self.cpsr.get() self.cpsr.get()
}; };
self.set_reg(rd, result); self.set_reg(rd, result);
self.S_cycle32(sb, self.pc);
CpuAction::AdvancePC CpuAction::AdvancePC(Seq)
}
pub fn exec_arm_mrs(&mut self, sb: &mut SysBus, insn: u32) -> CpuAction {
self.move_from_status_register(sb, insn.bit_range(12..16) as usize, insn.spsr_flag())
} }
#[inline(always)] #[inline(always)]
@ -112,8 +99,9 @@ impl Core {
} }
} }
// #[cfg(feature = "arm7tdmi_dispatch_table")] /// Move to status register
pub fn exec_arm_transfer_to_status(&mut self, sb: &mut SysBus, insn: u32) -> CpuAction { /// 1S
pub fn exec_arm_transfer_to_status(&mut self, insn: u32) -> CpuAction {
let value = self.decode_msr_param(insn); let value = self.decode_msr_param(insn);
let f = insn.bit(19); let f = insn.bit(19);
@ -158,9 +146,8 @@ impl Core {
} }
} }
} }
self.S_cycle32(sb, self.pc);
CpuAction::AdvancePC CpuAction::AdvancePC(Seq)
} }
fn transfer_spsr_mode(&mut self) { fn transfer_spsr_mode(&mut self) {
@ -175,11 +162,9 @@ impl Core {
/// ///
/// Cycles: 1S+x+y (from GBATEK) /// Cycles: 1S+x+y (from GBATEK)
/// Add x=1I cycles if Op2 shifted-by-register. Add y=1S+1N cycles if Rd=R15. /// Add x=1I cycles if Op2 shifted-by-register. Add y=1S+1N cycles if Rd=R15.
pub fn exec_arm_data_processing(&mut self, sb: &mut SysBus, insn: u32) -> CpuAction { pub fn exec_arm_data_processing(&mut self, insn: u32) -> CpuAction {
use AluOpCode::*; use AluOpCode::*;
self.S_cycle32(sb, self.pc);
let rn = insn.bit_range(16..20) as usize; let rn = insn.bit_range(16..20) as usize;
let rd = insn.bit_range(12..16) as usize; let rd = insn.bit_range(12..16) as usize;
let mut op1 = if rn == REG_PC { let mut op1 = if rn == REG_PC {
@ -204,7 +189,7 @@ impl Core {
if rn == REG_PC { if rn == REG_PC {
op1 += 4; op1 += 4;
} }
self.idle_cycle();
let rs = insn.bit_range(8..12) as usize; let rs = insn.bit_range(8..12) as usize;
ShiftRegisterBy::ByRegister(rs) ShiftRegisterBy::ByRegister(rs)
} else { } else {
@ -270,16 +255,16 @@ impl Core {
}) })
}; };
let mut result = CpuAction::AdvancePC; let mut result = CpuAction::AdvancePC(Seq);
if let Some(alu_res) = alu_res { if let Some(alu_res) = alu_res {
self.set_reg(rd, alu_res as u32); self.set_reg(rd, alu_res as u32);
if rd == REG_PC { if rd == REG_PC {
// T bit might have changed // T bit might have changed
match self.cpsr.state() { match self.cpsr.state() {
CpuState::ARM => self.reload_pipeline32(sb), CpuState::ARM => self.reload_pipeline32(),
CpuState::THUMB => self.reload_pipeline16(sb), CpuState::THUMB => self.reload_pipeline16(),
}; };
result = CpuAction::FlushPipeline; result = CpuAction::PipelineFlushed;
} }
} }
@ -293,8 +278,8 @@ impl Core {
/// STR{cond}{B}{T} Rd,<Address> | 2N | ---- | [Rn+/-<offset>]=Rd /// STR{cond}{B}{T} Rd,<Address> | 2N | ---- | [Rn+/-<offset>]=Rd
/// ------------------------------------------------------------------------------ /// ------------------------------------------------------------------------------
/// For LDR, add y=1S+1N if Rd=R15. /// For LDR, add y=1S+1N if Rd=R15.
pub fn exec_arm_ldr_str(&mut self, sb: &mut SysBus, insn: u32) -> CpuAction { pub fn exec_arm_ldr_str(&mut self, insn: u32) -> CpuAction {
let mut result = CpuAction::AdvancePC; let mut result = CpuAction::AdvancePC(NonSeq);
let load = insn.load_flag(); let load = insn.load_flag();
let pre_index = insn.pre_index_flag(); let pre_index = insn.pre_index_flag();
@ -305,7 +290,7 @@ impl Core {
if base_reg == REG_PC { if base_reg == REG_PC {
addr = self.pc_arm() + 8; // prefetching addr = self.pc_arm() + 8; // prefetching
} }
let offset = self.get_barrel_shifted_value(&insn.ldr_str_offset()); let offset = self.get_barrel_shifted_value(&insn.ldr_str_offset()); // TODO: wrong to use in here
let effective_addr = (addr as i32).wrapping_add(offset as i32) as Addr; let effective_addr = (addr as i32).wrapping_add(offset as i32) as Addr;
// TODO - confirm this // TODO - confirm this
@ -321,23 +306,20 @@ impl Core {
}; };
if load { if load {
self.S_cycle32(sb, self.pc);
let data = if insn.transfer_size() == 1 { let data = if insn.transfer_size() == 1 {
self.N_cycle8(sb, addr); self.load_8(addr, NonSeq) as u32
sb.read_8(addr) as u32
} else { } else {
self.N_cycle32(sb, addr); self.ldr_word(addr, NonSeq)
self.ldr_word(addr, sb)
}; };
self.set_reg(dest_reg, data); self.set_reg(dest_reg, data);
// +1I // +1I
self.add_cycle(); self.idle_cycle();
if dest_reg == REG_PC { if dest_reg == REG_PC {
self.reload_pipeline32(sb); self.reload_pipeline32();
result = CpuAction::FlushPipeline; result = CpuAction::PipelineFlushed;
} }
} else { } else {
let value = if dest_reg == REG_PC { let value = if dest_reg == REG_PC {
@ -346,13 +328,10 @@ impl Core {
self.get_reg(dest_reg) self.get_reg(dest_reg)
}; };
if insn.transfer_size() == 1 { if insn.transfer_size() == 1 {
self.N_cycle8(sb, addr); self.store_8(addr, value as u8, NonSeq);
self.write_8(addr, value as u8, sb);
} else { } else {
self.N_cycle32(sb, addr); self.store_aligned_32(addr & !0x3, value, NonSeq);
self.write_32(addr & !0x3, value, sb);
}; };
self.N_cycle32(sb, self.pc);
} }
if !load || base_reg != dest_reg { if !load || base_reg != dest_reg {
@ -370,9 +349,8 @@ impl Core {
result result
} }
pub fn exec_arm_ldr_str_hs_reg(&mut self, sb: &mut SysBus, insn: u32) -> CpuAction { pub fn exec_arm_ldr_str_hs_reg(&mut self, insn: u32) -> CpuAction {
self.ldr_str_hs( self.ldr_str_hs(
sb,
insn, insn,
BarrelShifterValue::ShiftedRegister(ShiftedRegister { BarrelShifterValue::ShiftedRegister(ShiftedRegister {
reg: (insn & 0xf) as usize, reg: (insn & 0xf) as usize,
@ -383,24 +361,19 @@ impl Core {
) )
} }
pub fn exec_arm_ldr_str_hs_imm(&mut self, sb: &mut SysBus, insn: u32) -> CpuAction { pub fn exec_arm_ldr_str_hs_imm(&mut self, insn: u32) -> CpuAction {
let offset8 = (insn.bit_range(8..12) << 4) + insn.bit_range(0..4); let offset8 = (insn.bit_range(8..12) << 4) + insn.bit_range(0..4);
let offset8 = if insn.add_offset_flag() { let offset8 = if insn.add_offset_flag() {
offset8 offset8
} else { } else {
(-(offset8 as i32)) as u32 (-(offset8 as i32)) as u32
}; };
self.ldr_str_hs(sb, insn, BarrelShifterValue::ImmediateValue(offset8)) self.ldr_str_hs(insn, BarrelShifterValue::ImmediateValue(offset8))
} }
#[inline(always)] #[inline(always)]
pub fn ldr_str_hs( pub fn ldr_str_hs(&mut self, insn: u32, offset: BarrelShifterValue) -> CpuAction {
&mut self, let mut result = CpuAction::AdvancePC(NonSeq);
sb: &mut SysBus,
insn: u32,
offset: BarrelShifterValue,
) -> CpuAction {
let mut result = CpuAction::AdvancePC;
let load = insn.load_flag(); let load = insn.load_flag();
let pre_index = insn.pre_index_flag(); let pre_index = insn.pre_index_flag();
@ -428,30 +401,20 @@ impl Core {
}; };
if load { if load {
self.S_cycle32(sb, self.pc);
let data = match insn.halfword_data_transfer_type() { let data = match insn.halfword_data_transfer_type() {
ArmHalfwordTransferType::SignedByte => { ArmHalfwordTransferType::SignedByte => self.load_8(addr, NonSeq) as u8 as i8 as u32,
self.N_cycle8(sb, addr); ArmHalfwordTransferType::SignedHalfwords => self.ldr_sign_half(addr, NonSeq),
sb.read_8(addr) as u8 as i8 as u32 ArmHalfwordTransferType::UnsignedHalfwords => self.ldr_half(addr, NonSeq),
}
ArmHalfwordTransferType::SignedHalfwords => {
self.N_cycle16(sb, addr);
self.ldr_sign_half(addr, sb)
}
ArmHalfwordTransferType::UnsignedHalfwords => {
self.N_cycle16(sb, addr);
self.ldr_half(addr, sb)
}
}; };
self.set_reg(dest_reg, data); self.set_reg(dest_reg, data);
// +1I // +1I
self.add_cycle(); self.idle_cycle();
if dest_reg == REG_PC { if dest_reg == REG_PC {
self.reload_pipeline32(sb); self.reload_pipeline32();
result = CpuAction::FlushPipeline; result = CpuAction::PipelineFlushed;
} }
} else { } else {
let value = if dest_reg == REG_PC { let value = if dest_reg == REG_PC {
@ -462,9 +425,7 @@ impl Core {
match insn.halfword_data_transfer_type() { match insn.halfword_data_transfer_type() {
ArmHalfwordTransferType::UnsignedHalfwords => { ArmHalfwordTransferType::UnsignedHalfwords => {
self.N_cycle32(sb, addr); self.store_aligned_16(addr, value as u16, NonSeq);
self.write_16(addr, value as u16, sb);
self.N_cycle32(sb, self.pc);
} }
_ => panic!("invalid HS flags for L=0"), _ => panic!("invalid HS flags for L=0"),
}; };
@ -481,8 +442,8 @@ impl Core {
result result
} }
pub fn exec_arm_ldm_stm(&mut self, sb: &mut SysBus, insn: u32) -> CpuAction { pub fn exec_arm_ldm_stm(&mut self, insn: u32) -> CpuAction {
let mut result = CpuAction::AdvancePC; let mut result = CpuAction::AdvancePC(NonSeq);
let mut full = insn.pre_index_flag(); let mut full = insn.pre_index_flag();
let ascending = insn.add_offset_flag(); let ascending = insn.add_offset_flag();
@ -537,8 +498,7 @@ impl Core {
if rlist != 0 { if rlist != 0 {
if is_load { if is_load {
self.add_cycle(); let mut access = NonSeq;
self.N_cycle32(sb, self.pc);
for r in 0..16 { for r in 0..16 {
if rlist.bit(r) { if rlist.bit(r) {
if r == base_reg { if r == base_reg {
@ -547,27 +507,25 @@ impl Core {
if full { if full {
addr = addr.wrapping_add(4); addr = addr.wrapping_add(4);
} }
let val = self.load_32(addr, access);
let val = sb.read_32(addr); access = Seq;
self.S_cycle32(sb, self.pc);
self.set_reg(r, val); self.set_reg(r, val);
if r == REG_PC { if r == REG_PC {
if psr_transfer { if psr_transfer {
self.transfer_spsr_mode(); self.transfer_spsr_mode();
} }
self.reload_pipeline32(sb); self.reload_pipeline32();
result = CpuAction::FlushPipeline; result = CpuAction::PipelineFlushed;
} }
if !full { if !full {
addr = addr.wrapping_add(4); addr = addr.wrapping_add(4);
} }
} }
} }
self.idle_cycle();
} else { } else {
let mut first = true; let mut first = true;
let mut access = NonSeq;
for r in 0..16 { for r in 0..16 {
if rlist.bit(r) { if rlist.bit(r) {
let val = if r != base_reg { let val = if r != base_reg {
@ -593,27 +551,22 @@ impl Core {
addr = addr.wrapping_add(4); addr = addr.wrapping_add(4);
} }
if first { first = false;
self.N_cycle32(sb, addr);
first = false;
} else {
self.S_cycle32(sb, addr);
}
self.write_32(addr, val, sb);
self.store_aligned_32(addr, val, access);
access = Seq;
if !full { if !full {
addr = addr.wrapping_add(4); addr = addr.wrapping_add(4);
} }
} }
} }
self.N_cycle32(sb, self.pc);
} }
} else { } else {
if is_load { if is_load {
let val = self.ldr_word(addr, sb); let val = self.ldr_word(addr, NonSeq);
self.set_reg(REG_PC, val & !3); self.set_reg(REG_PC, val & !3);
self.reload_pipeline32(sb); self.reload_pipeline32();
result = CpuAction::FlushPipeline; result = CpuAction::PipelineFlushed;
} else { } else {
// block data store with empty rlist // block data store with empty rlist
let addr = match (ascending, full) { let addr = match (ascending, full) {
@ -622,7 +575,7 @@ impl Core {
(true, false) => addr, (true, false) => addr,
(true, true) => addr.wrapping_add(4), (true, true) => addr.wrapping_add(4),
}; };
self.write_32(addr, self.pc + 4, sb); self.store_aligned_32(addr, self.pc + 4, NonSeq);
} }
addr = if ascending { addr = if ascending {
addr.wrapping_add(0x40) addr.wrapping_add(0x40)
@ -642,7 +595,9 @@ impl Core {
result result
} }
pub fn exec_arm_mul_mla(&mut self, sb: &mut SysBus, insn: u32) -> CpuAction { /// Multiply and Multiply-Accumulate (MUL, MLA)
/// Execution Time: 1S+mI for MUL, and 1S+(m+1)I for MLA.
pub fn exec_arm_mul_mla(&mut self, insn: u32) -> CpuAction {
let rd = insn.bit_range(16..20) as usize; let rd = insn.bit_range(16..20) as usize;
let rn = insn.bit_range(12..16) as usize; let rn = insn.bit_range(12..16) as usize;
let rs = insn.rs(); let rs = insn.rs();
@ -658,14 +613,14 @@ impl Core {
if insn.accumulate_flag() { if insn.accumulate_flag() {
result = result.wrapping_add(self.get_reg(rn)); result = result.wrapping_add(self.get_reg(rn));
self.add_cycle(); self.idle_cycle();
} }
self.set_reg(rd, result); self.set_reg(rd, result);
let m = self.get_required_multipiler_array_cycles(op2); let m = self.get_required_multipiler_array_cycles(op2);
for _ in 0..m { for _ in 0..m {
self.add_cycle(); self.idle_cycle();
} }
if insn.set_cond_flag() { if insn.set_cond_flag() {
@ -675,12 +630,12 @@ impl Core {
self.cpsr.set_V(false); self.cpsr.set_V(false);
} }
self.S_cycle32(sb, self.pc); CpuAction::AdvancePC(Seq)
CpuAction::AdvancePC
} }
pub fn exec_arm_mull_mlal(&mut self, sb: &mut SysBus, insn: u32) -> CpuAction { /// Multiply Long and Multiply-Accumulate Long (MULL, MLAL)
/// Execution Time: 1S+(m+1)I for MULL, and 1S+(m+2)I for MLAL
pub fn exec_arm_mull_mlal(&mut self, insn: u32) -> CpuAction {
let rd_hi = insn.rd_hi(); let rd_hi = insn.rd_hi();
let rd_lo = insn.rd_lo(); let rd_lo = insn.rd_lo();
let rs = insn.rs(); let rs = insn.rs();
@ -694,21 +649,18 @@ impl Core {
} else { } else {
(op1 as u64).wrapping_mul(op2 as u64) (op1 as u64).wrapping_mul(op2 as u64)
}; };
self.add_cycle();
if insn.accumulate_flag() { if insn.accumulate_flag() {
let hi = self.get_reg(rd_hi) as u64; let hi = self.get_reg(rd_hi) as u64;
let lo = self.get_reg(rd_lo) as u64; let lo = self.get_reg(rd_lo) as u64;
result = result.wrapping_add(hi << 32 | lo); result = result.wrapping_add(hi << 32 | lo);
self.add_cycle(); self.idle_cycle();
} }
self.set_reg(rd_hi, (result >> 32) as i32 as u32); self.set_reg(rd_hi, (result >> 32) as i32 as u32);
self.set_reg(rd_lo, (result & 0xffffffff) as i32 as u32); self.set_reg(rd_lo, (result & 0xffffffff) as i32 as u32);
self.idle_cycle();
let m = self.get_required_multipiler_array_cycles(self.get_reg(rs)); let m = self.get_required_multipiler_array_cycles(self.get_reg(rs));
for _ in 0..m { for _ in 0..m {
self.add_cycle(); self.idle_cycle();
} }
if insn.set_cond_flag() { if insn.set_cond_flag() {
@ -718,35 +670,32 @@ impl Core {
self.cpsr.set_V(false); self.cpsr.set_V(false);
} }
self.S_cycle32(sb, self.pc); CpuAction::AdvancePC(Seq)
CpuAction::AdvancePC
} }
pub fn exec_arm_swp(&mut self, sb: &mut SysBus, insn: u32) -> CpuAction { /// ARM Opcodes: Memory: Single Data Swap (SWP)
/// Execution Time: 1S+2N+1I. That is, 2N data cycles, 1S code cycle, plus 1I.
pub fn exec_arm_swp(&mut self, insn: u32) -> CpuAction {
let base_addr = self.get_reg(insn.bit_range(16..20) as usize); let base_addr = self.get_reg(insn.bit_range(16..20) as usize);
let rd = insn.bit_range(12..16) as usize; let rd = insn.bit_range(12..16) as usize;
if insn.transfer_size() == 1 { if insn.transfer_size() == 1 {
let t = sb.read_8(base_addr); let t = self.load_8(base_addr, NonSeq);
self.N_cycle8(sb, base_addr); self.store_8(base_addr, self.get_reg(insn.rm()) as u8, Seq);
sb.write_8(base_addr, self.get_reg(insn.rm()) as u8);
self.S_cycle8(sb, base_addr);
self.set_reg(rd, t as u32); self.set_reg(rd, t as u32);
} else { } else {
let t = self.ldr_word(base_addr, sb); let t = self.ldr_word(base_addr, NonSeq);
self.N_cycle32(sb, base_addr); self.store_aligned_32(base_addr, self.get_reg(insn.rm()), Seq);
self.write_32(base_addr, self.get_reg(insn.rm()), sb);
self.S_cycle32(sb, base_addr);
self.set_reg(rd, t as u32); self.set_reg(rd, t as u32);
} }
self.add_cycle(); self.idle_cycle();
self.N_cycle32(sb, self.pc);
CpuAction::AdvancePC CpuAction::AdvancePC(NonSeq)
} }
pub fn exec_arm_swi(&mut self, sb: &mut SysBus, insn: u32) -> CpuAction { /// ARM Software Interrupt
self.software_interrupt(sb, self.pc - 4, insn.swi_comment()); /// Execution Time: 2S+1N
CpuAction::FlushPipeline pub fn exec_arm_swi(&mut self, insn: u32) -> CpuAction {
self.software_interrupt(self.pc - 4, insn.swi_comment()); // Implies 2S + 1N
CpuAction::PipelineFlushed
} }
} }

View file

@ -2,15 +2,37 @@ use serde::{Deserialize, Serialize};
pub use super::exception::Exception; pub use super::exception::Exception;
use super::CpuAction; use super::{arm::ArmCond, psr::RegPSR, Addr, CpuMode, CpuState};
use super::{psr::RegPSR, Addr, CpuMode, CpuState, arm::ArmCond};
use crate::util::Shared;
use super::memory::{MemoryAccess, MemoryInterface};
use MemoryAccess::*;
use cfg_if::cfg_if;
cfg_if! { cfg_if! {
if #[cfg(feature = "arm7tdmi_dispatch_table")] { if #[cfg(feature = "arm7tdmi_dispatch_table")] {
// Include files that are auto-generated by the build script
// See `build.rs` #[cfg(feature = "debugger")]
include!(concat!(env!("OUT_DIR"), "/arm_lut.rs")); use super::thumb::ThumbFormat;
include!(concat!(env!("OUT_DIR"), "/thumb_lut.rs"));
#[cfg(feature = "debugger")]
use super::arm::ArmFormat;
#[cfg_attr(not(feature = "debugger"), repr(transparent))]
pub struct ThumbInstructionInfo<I: MemoryInterface> {
pub handler_fn: fn(&mut Core<I>, insn: u16) -> CpuAction,
#[cfg(feature = "debugger")]
pub fmt: ThumbFormat,
}
#[cfg_attr(not(feature = "debugger"), repr(transparent))]
pub struct ArmInstructionInfo<I: MemoryInterface> {
pub handler_fn: fn(&mut Core<I>, insn: u32) -> CpuAction,
#[cfg(feature = "debugger")]
pub fmt: ArmFormat,
}
} else { } else {
use super::arm::ArmFormat; use super::arm::ArmFormat;
use super::thumb::ThumbFormat; use super::thumb::ThumbFormat;
@ -31,59 +53,160 @@ cfg_if! {
} }
} }
use crate::bus::Bus;
use crate::sysbus::{MemoryAccessType::*, MemoryAccessWidth::*, SysBus};
use bit::BitIndex; use bit::BitIndex;
use num::FromPrimitive; use num::FromPrimitive;
pub enum CpuAction {
AdvancePC(MemoryAccess),
PipelineFlushed,
}
#[derive(Serialize, Deserialize, Clone, Debug, Default)] #[derive(Serialize, Deserialize, Clone, Debug, Default)]
pub struct Core { pub(super) struct BankedRegisters {
pub pc: u32,
pub gpr: [u32; 15],
// r13 and r14 are banked for all modes. System&User mode share them // r13 and r14 are banked for all modes. System&User mode share them
pub(super) gpr_banked_r13: [u32; 6], pub(super) gpr_banked_r13: [u32; 6],
pub(super) gpr_banked_r14: [u32; 6], pub(super) gpr_banked_r14: [u32; 6],
// r8-r12 are banked for fiq mode // r8-r12 are banked for fiq mode
pub(super) gpr_banked_old_r8_12: [u32; 5], pub(super) gpr_banked_old_r8_12: [u32; 5],
pub(super) gpr_banked_fiq_r8_12: [u32; 5], pub(super) gpr_banked_fiq_r8_12: [u32; 5],
pub(super) spsr_bank: [RegPSR; 6],
}
#[derive(Serialize, Deserialize, Clone, Debug)]
pub struct SavedCpuState {
pub pc: u32,
pub gpr: [u32; 15],
next_fetch_access: MemoryAccess,
pipeline: [u32; 2],
pub cpsr: RegPSR, pub cpsr: RegPSR,
pub(super) spsr: RegPSR, pub(super) spsr: RegPSR,
pub(super) spsr_bank: [RegPSR; 6],
pub(super) banks: Box<BankedRegisters>,
pub(super) bs_carry_out: bool, pub(super) bs_carry_out: bool,
}
#[derive(Clone, Debug)]
pub struct Core<I: MemoryInterface> {
pub(super) bus: Shared<I>,
next_fetch_access: MemoryAccess,
pipeline: [u32; 2], pipeline: [u32; 2],
pub pc: u32,
pub gpr: [u32; 15],
pub cpsr: RegPSR,
pub(super) spsr: RegPSR,
// Todo - do I still need this?
pub(super) bs_carry_out: bool,
pub(super) banks: Box<BankedRegisters>, // Putting these in a box so the most-used Cpu fields in the same cacheline
#[cfg(feature = "debugger")] #[cfg(feature = "debugger")]
pub last_executed: Option<DecodedInstruction>, pub last_executed: Option<DecodedInstruction>,
/// store the gpr before executing an instruction to show diff in the Display impl
pub cycles: usize, #[cfg(feature = "debugger")]
// store the gpr before executing an instruction to show diff in the Display impl
gpr_previous: [u32; 15], gpr_previous: [u32; 15],
#[cfg(feature = "debugger")]
memreq: Addr,
pub breakpoints: Vec<u32>, pub breakpoints: Vec<u32>,
#[cfg(feature = "debugger")]
pub verbose: bool, pub verbose: bool,
#[cfg(feature = "debugger")]
pub trace_opcodes: bool, pub trace_opcodes: bool,
#[cfg(feature = "debugger")]
pub trace_exceptions: bool, pub trace_exceptions: bool,
} }
impl Core { impl<I: MemoryInterface> Core<I> {
pub fn new() -> Core { pub fn new(bus: Shared<I>) -> Core<I> {
let cpsr = RegPSR::new(0x0000_00D3); let cpsr = RegPSR::new(0x0000_00D3);
Core { Core {
memreq: 0xffff_0000, // set memreq to an invalid addr so the first load cycle will be non-sequential bus,
cpsr: cpsr, pc: 0,
..Default::default() gpr: [0; 15],
pipeline: [0; 2],
next_fetch_access: MemoryAccess::NonSeq,
cpsr,
spsr: Default::default(),
banks: Box::new(BankedRegisters::default()),
bs_carry_out: false,
#[cfg(feature = "debugger")]
last_executed: None,
#[cfg(feature = "debugger")]
gpr_previous: [0; 15],
#[cfg(feature = "debugger")]
breakpoints: Vec::new(),
#[cfg(feature = "debugger")]
verbose: false,
#[cfg(feature = "debugger")]
trace_opcodes: false,
#[cfg(feature = "debugger")]
trace_exceptions: false,
} }
} }
pub fn from_saved_state(bus: Shared<I>, state: SavedCpuState) -> Core<I> {
Core {
bus,
pc: state.pc,
cpsr: state.cpsr,
gpr: state.gpr,
banks: state.banks,
spsr: state.spsr,
bs_carry_out: state.bs_carry_out,
pipeline: state.pipeline,
next_fetch_access: state.next_fetch_access,
// savestate does not keep debugger related information, so just reinitialize to default
#[cfg(feature = "debugger")]
last_executed: None,
#[cfg(feature = "debugger")]
gpr_previous: [0; 15],
#[cfg(feature = "debugger")]
breakpoints: Vec::new(),
#[cfg(feature = "debugger")]
verbose: false,
#[cfg(feature = "debugger")]
trace_opcodes: false,
#[cfg(feature = "debugger")]
trace_exceptions: false,
}
}
pub fn save_state(&self) -> SavedCpuState {
SavedCpuState {
cpsr: self.cpsr,
pc: self.pc,
gpr: self.gpr.clone(),
spsr: self.spsr,
banks: self.banks.clone(),
bs_carry_out: self.bs_carry_out,
pipeline: self.pipeline.clone(),
next_fetch_access: self.next_fetch_access,
}
}
pub fn restore_state(&mut self, state: SavedCpuState) {
self.pc = state.pc;
self.cpsr = state.cpsr;
self.gpr = state.gpr;
self.spsr = state.spsr;
self.banks = state.banks;
self.bs_carry_out = state.bs_carry_out;
self.pipeline = state.pipeline;
self.next_fetch_access = state.next_fetch_access;
}
pub fn set_memory_interface(&mut self, i: Shared<I>) {
self.bus = i;
}
#[cfg(feature = "debugger")]
pub fn set_verbose(&mut self, v: bool) { pub fn set_verbose(&mut self, v: bool) {
self.verbose = v; self.verbose = v;
} }
@ -115,11 +238,11 @@ impl Core {
if self.cpsr.mode() == CpuMode::Fiq { if self.cpsr.mode() == CpuMode::Fiq {
self.gpr[r] self.gpr[r]
} else { } else {
self.gpr_banked_old_r8_12[r - 8] self.banks.gpr_banked_old_r8_12[r - 8]
} }
} }
13 => self.gpr_banked_r13[0], 13 => self.banks.gpr_banked_r13[0],
14 => self.gpr_banked_r14[0], 14 => self.banks.gpr_banked_r14[0],
_ => panic!("invalid register"), _ => panic!("invalid register"),
} }
} }
@ -146,62 +269,19 @@ impl Core {
if self.cpsr.mode() == CpuMode::Fiq { if self.cpsr.mode() == CpuMode::Fiq {
self.gpr[r] = val; self.gpr[r] = val;
} else { } else {
self.gpr_banked_old_r8_12[r - 8] = val; self.banks.gpr_banked_old_r8_12[r - 8] = val;
} }
} }
13 => { 13 => {
self.gpr_banked_r13[0] = val; self.banks.gpr_banked_r13[0] = val;
} }
14 => { 14 => {
self.gpr_banked_r14[0] = val; self.banks.gpr_banked_r14[0] = val;
} }
_ => panic!("invalid register"), _ => panic!("invalid register"),
} }
} }
pub(super) fn write_32(&mut self, addr: Addr, value: u32, bus: &mut SysBus) {
bus.write_32(addr & !0x3, value);
}
pub(super) fn write_16(&mut self, addr: Addr, value: u16, bus: &mut SysBus) {
bus.write_16(addr & !0x1, value);
}
pub(super) fn write_8(&mut self, addr: Addr, value: u8, bus: &mut SysBus) {
bus.write_8(addr, value);
}
/// Helper function for "ldr" instruction that handles misaligned addresses
pub(super) fn ldr_word(&mut self, addr: Addr, bus: &SysBus) -> u32 {
if addr & 0x3 != 0 {
let rotation = (addr & 0x3) << 3;
let value = bus.read_32(addr & !0x3);
self.ror(value, rotation, self.cpsr.C(), false, false)
} else {
bus.read_32(addr)
}
}
/// Helper function for "ldrh" instruction that handles misaligned addresses
pub(super) fn ldr_half(&mut self, addr: Addr, bus: &SysBus) -> u32 {
if addr & 0x1 != 0 {
let rotation = (addr & 0x1) << 3;
let value = bus.read_16(addr & !0x1);
self.ror(value as u32, rotation, self.cpsr.C(), false, false)
} else {
bus.read_16(addr) as u32
}
}
/// Helper function for "ldrsh" instruction that handles misaligned addresses
pub(super) fn ldr_sign_half(&mut self, addr: Addr, bus: &SysBus) -> u32 {
if addr & 0x1 != 0 {
bus.read_8(addr) as i8 as i32 as u32
} else {
bus.read_16(addr) as i16 as i32 as u32
}
}
pub fn get_registers(&self) -> [u32; 15] { pub fn get_registers(&self) -> [u32; 15] {
self.gpr.clone() self.gpr.clone()
} }
@ -214,31 +294,33 @@ impl Core {
return; return;
} }
self.spsr_bank[old_index] = self.spsr; let banks = &mut self.banks;
self.gpr_banked_r13[old_index] = self.gpr[13];
self.gpr_banked_r14[old_index] = self.gpr[14];
self.spsr = self.spsr_bank[new_index]; banks.spsr_bank[old_index] = self.spsr;
self.gpr[13] = self.gpr_banked_r13[new_index]; banks.gpr_banked_r13[old_index] = self.gpr[13];
self.gpr[14] = self.gpr_banked_r14[new_index]; banks.gpr_banked_r14[old_index] = self.gpr[14];
self.spsr = banks.spsr_bank[new_index];
self.gpr[13] = banks.gpr_banked_r13[new_index];
self.gpr[14] = banks.gpr_banked_r14[new_index];
if new_mode == CpuMode::Fiq { if new_mode == CpuMode::Fiq {
for r in 0..5 { for r in 0..5 {
self.gpr_banked_old_r8_12[r] = self.gpr[r + 8]; banks.gpr_banked_old_r8_12[r] = self.gpr[r + 8];
self.gpr[r + 8] = self.gpr_banked_fiq_r8_12[r]; self.gpr[r + 8] = banks.gpr_banked_fiq_r8_12[r];
} }
} else if old_mode == CpuMode::Fiq { } else if old_mode == CpuMode::Fiq {
for r in 0..5 { for r in 0..5 {
self.gpr_banked_fiq_r8_12[r] = self.gpr[r + 8]; banks.gpr_banked_fiq_r8_12[r] = self.gpr[r + 8];
self.gpr[r + 8] = self.gpr_banked_old_r8_12[r]; self.gpr[r + 8] = banks.gpr_banked_old_r8_12[r];
} }
} }
self.cpsr.set_mode(new_mode); self.cpsr.set_mode(new_mode);
} }
/// Resets the cpu /// Resets the cpu
pub fn reset(&mut self, sb: &mut SysBus) { pub fn reset(&mut self) {
self.exception(sb, Exception::Reset, 0); self.exception(Exception::Reset, 0);
} }
pub fn word_size(&self) -> usize { pub fn word_size(&self) -> usize {
@ -248,15 +330,6 @@ impl Core {
} }
} }
pub fn cycles(&self) -> usize {
self.cycles
}
pub(super) fn add_cycle(&mut self) {
// println!("<cycle I-Cyclel> total: {}", self.cycles);
self.cycles += 1;
}
pub(super) fn get_required_multipiler_array_cycles(&self, rs: u32) -> usize { pub(super) fn get_required_multipiler_array_cycles(&self, rs: u32) -> usize {
if rs & 0xff == rs { if rs & 0xff == rs {
1 1
@ -269,42 +342,6 @@ impl Core {
} }
} }
#[allow(non_snake_case)]
#[inline(always)]
pub(super) fn S_cycle32(&mut self, sb: &SysBus, addr: u32) {
self.cycles += sb.get_cycles(addr, Seq, MemoryAccess32);
}
#[allow(non_snake_case)]
#[inline(always)]
pub(super) fn S_cycle16(&mut self, sb: &SysBus, addr: u32) {
self.cycles += sb.get_cycles(addr, Seq, MemoryAccess16);
}
#[allow(non_snake_case)]
#[inline(always)]
pub(super) fn S_cycle8(&mut self, sb: &SysBus, addr: u32) {
self.cycles += sb.get_cycles(addr, Seq, MemoryAccess8);
}
#[allow(non_snake_case)]
#[inline(always)]
pub(super) fn N_cycle32(&mut self, sb: &SysBus, addr: u32) {
self.cycles += sb.get_cycles(addr, NonSeq, MemoryAccess32);
}
#[allow(non_snake_case)]
#[inline(always)]
pub(super) fn N_cycle16(&mut self, sb: &SysBus, addr: u32) {
self.cycles += sb.get_cycles(addr, NonSeq, MemoryAccess16);
}
#[allow(non_snake_case)]
#[inline(always)]
pub(super) fn N_cycle8(&mut self, sb: &SysBus, addr: u32) {
self.cycles += sb.get_cycles(addr, NonSeq, MemoryAccess8);
}
#[inline] #[inline]
pub(super) fn check_arm_cond(&self, cond: ArmCond) -> bool { pub(super) fn check_arm_cond(&self, cond: ArmCond) -> bool {
use ArmCond::*; use ArmCond::*;
@ -337,80 +374,73 @@ impl Core {
self.last_executed = Some(d); self.last_executed = Some(d);
} }
#[cfg(feature = "arm7tdmi_dispatch_table")] cfg_if! {
fn step_arm_exec(&mut self, insn: u32, sb: &mut SysBus) -> CpuAction { if #[cfg(feature = "arm7tdmi_dispatch_table")] {
let hash = (((insn >> 16) & 0xff0) | ((insn >> 4) & 0x00f)) as usize; fn step_arm_exec(&mut self, insn: u32) -> CpuAction {
let arm_info = &ARM_LUT[hash]; let hash = (((insn >> 16) & 0xff0) | ((insn >> 4) & 0x00f)) as usize;
let arm_info = &Self::ARM_LUT[hash];
#[cfg(feature = "debugger")]
self.debugger_record_step(DecodedInstruction::Arm(ArmInstruction::new(
insn,
self.pc.wrapping_sub(8),
arm_info.fmt,
)));
(arm_info.handler_fn)(self, insn)
}
#[cfg(feature = "debugger")] fn step_thumb_exec(&mut self, insn: u16) -> CpuAction {
self.debugger_record_step(DecodedInstruction::Arm(ArmInstruction::new( let thumb_info = &Self::THUMB_LUT[(insn >> 6) as usize];
insn, #[cfg(feature = "debugger")]
self.pc.wrapping_sub(8), self.debugger_record_step(DecodedInstruction::Thumb(ThumbInstruction::new(
arm_info.fmt, insn,
))); self.pc.wrapping_sub(4),
thumb_info.fmt,
)));
(thumb_info.handler_fn)(self, insn)
}
} else {
(arm_info.handler_fn)(self, sb, insn) fn step_arm_exec(&mut self, insn: u32) -> CpuAction {
} let arm_fmt = ArmFormat::from(insn);
#[cfg(feature = "debugger")]
#[cfg(feature = "arm7tdmi_dispatch_table")] self.debugger_record_step(DecodedInstruction::Arm(ArmInstruction::new(
fn step_thumb_exec(&mut self, insn: u16, sb: &mut SysBus) -> CpuAction { insn,
let thumb_info = &THUMB_LUT[(insn >> 6) as usize]; self.pc.wrapping_sub(8),
arm_fmt,
#[cfg(feature = "debugger")] )));
self.debugger_record_step(DecodedInstruction::Thumb(ThumbInstruction::new( self.exec_arm(insn, arm_fmt)
insn, }
self.pc.wrapping_sub(4), fn step_thumb_exec(&mut self, insn: u16) -> CpuAction {
thumb_info.fmt, let thumb_fmt = ThumbFormat::from(insn);
))); #[cfg(feature = "debugger")]
self.debugger_record_step(DecodedInstruction::Thumb(ThumbInstruction::new(
(thumb_info.handler_fn)(self, sb, insn) insn,
} self.pc.wrapping_sub(4),
thumb_fmt,
#[cfg(not(feature = "arm7tdmi_dispatch_table"))] )));
fn step_arm_exec(&mut self, insn: u32, sb: &mut SysBus) -> CpuAction { self.exec_thumb(insn, thumb_fmt)
let arm_fmt = ArmFormat::from(insn); }
#[cfg(feature = "debugger")] }
self.debugger_record_step(DecodedInstruction::Arm(ArmInstruction::new(
insn,
self.pc.wrapping_sub(8),
arm_fmt,
)));
self.exec_arm(sb, insn, arm_fmt)
}
#[cfg(not(feature = "arm7tdmi_dispatch_table"))]
fn step_thumb_exec(&mut self, insn: u16, sb: &mut SysBus) -> CpuAction {
let thumb_fmt = ThumbFormat::from(insn);
#[cfg(feature = "debugger")]
self.debugger_record_step(DecodedInstruction::Thumb(ThumbInstruction::new(
insn,
self.pc.wrapping_sub(4),
thumb_fmt,
)));
self.exec_thumb(sb, insn, thumb_fmt)
} }
/// 2S + 1N
#[inline(always)] #[inline(always)]
pub fn reload_pipeline16(&mut self, sb: &mut SysBus) { pub fn reload_pipeline16(&mut self) {
self.pipeline[0] = sb.read_16(self.pc) as u32; self.pipeline[0] = self.load_16(self.pc, NonSeq) as u32;
self.N_cycle16(sb, self.pc);
self.advance_thumb(); self.advance_thumb();
self.pipeline[1] = sb.read_16(self.pc) as u32; self.pipeline[1] = self.load_16(self.pc, Seq) as u32;
self.S_cycle16(sb, self.pc);
self.advance_thumb(); self.advance_thumb();
self.next_fetch_access = Seq;
} }
/// 2S + 1N
#[inline(always)] #[inline(always)]
pub fn reload_pipeline32(&mut self, sb: &mut SysBus) { pub fn reload_pipeline32(&mut self) {
self.pipeline[0] = sb.read_32(self.pc); self.pipeline[0] = self.load_32(self.pc, NonSeq);
self.N_cycle16(sb, self.pc);
self.advance_arm(); self.advance_arm();
self.pipeline[1] = sb.read_32(self.pc); self.pipeline[1] = self.load_32(self.pc, Seq);
self.S_cycle16(sb, self.pc);
self.advance_arm(); self.advance_arm();
self.next_fetch_access = Seq;
} }
#[inline] #[inline]
@ -425,12 +455,12 @@ impl Core {
/// Perform a pipeline step /// Perform a pipeline step
/// If an instruction was executed in this step, return it. /// If an instruction was executed in this step, return it.
pub fn step(&mut self, bus: &mut SysBus) { pub fn step(&mut self) {
let pc = self.pc;
match self.cpsr.state() { match self.cpsr.state() {
CpuState::ARM => { CpuState::ARM => {
let fetched_now = bus.read_32(pc); let pc = self.pc & !3;
let fetched_now = self.load_32(pc, self.next_fetch_access);
let insn = self.pipeline[0]; let insn = self.pipeline[0];
self.pipeline[0] = self.pipeline[1]; self.pipeline[0] = self.pipeline[1];
self.pipeline[1] = fetched_now; self.pipeline[1] = fetched_now;
@ -438,24 +468,32 @@ impl Core {
.unwrap_or_else(|| unsafe { std::hint::unreachable_unchecked() }); .unwrap_or_else(|| unsafe { std::hint::unreachable_unchecked() });
if cond != ArmCond::AL { if cond != ArmCond::AL {
if !self.check_arm_cond(cond) { if !self.check_arm_cond(cond) {
self.S_cycle32(bus, self.pc);
self.advance_arm(); self.advance_arm();
self.next_fetch_access = MemoryAccess::NonSeq;
return; return;
} }
} }
match self.step_arm_exec(insn, bus) { match self.step_arm_exec(insn) {
CpuAction::AdvancePC => self.advance_arm(), CpuAction::AdvancePC(access) => {
CpuAction::FlushPipeline => {} self.next_fetch_access = access;
self.advance_arm();
}
CpuAction::PipelineFlushed => {}
} }
} }
CpuState::THUMB => { CpuState::THUMB => {
let fetched_now = bus.read_16(pc); let pc = self.pc & !1;
let fetched_now = self.load_16(pc, self.next_fetch_access);
let insn = self.pipeline[0]; let insn = self.pipeline[0];
self.pipeline[0] = self.pipeline[1]; self.pipeline[0] = self.pipeline[1];
self.pipeline[1] = fetched_now as u32; self.pipeline[1] = fetched_now as u32;
match self.step_thumb_exec(insn as u16, bus) { match self.step_thumb_exec(insn as u16) {
CpuAction::AdvancePC => self.advance_thumb(), CpuAction::AdvancePC(access) => {
CpuAction::FlushPipeline => {} self.advance_thumb();
self.next_fetch_access = access;
}
CpuAction::PipelineFlushed => {}
} }
} }
} }
@ -472,12 +510,12 @@ impl Core {
} }
pub fn skip_bios(&mut self) { pub fn skip_bios(&mut self) {
self.gpr_banked_r13[0] = 0x0300_7f00; // USR/SYS self.banks.gpr_banked_r13[0] = 0x0300_7f00; // USR/SYS
self.gpr_banked_r13[1] = 0x0300_7f00; // FIQ self.banks.gpr_banked_r13[1] = 0x0300_7f00; // FIQ
self.gpr_banked_r13[2] = 0x0300_7fa0; // IRQ self.banks.gpr_banked_r13[2] = 0x0300_7fa0; // IRQ
self.gpr_banked_r13[3] = 0x0300_7fe0; // SVC self.banks.gpr_banked_r13[3] = 0x0300_7fe0; // SVC
self.gpr_banked_r13[4] = 0x0300_7f00; // ABT self.banks.gpr_banked_r13[4] = 0x0300_7f00; // ABT
self.gpr_banked_r13[5] = 0x0300_7f00; // UND self.banks.gpr_banked_r13[5] = 0x0300_7f00; // UND
self.gpr[13] = 0x0300_7f00; self.gpr[13] = 0x0300_7f00;
self.pc = 0x0800_0000; self.pc = 0x0800_0000;
@ -487,10 +525,9 @@ impl Core {
} }
#[cfg(feature = "debugger")] #[cfg(feature = "debugger")]
impl fmt::Display for Core { impl<I: MemoryInterface> fmt::Display for Core<I> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
writeln!(f, "ARM7TDMI Core Status:")?; writeln!(f, "ARM7TDMI Core Status:")?;
writeln!(f, "\tCycles: {}", self.cycles)?;
writeln!(f, "\tCPSR: {}", self.cpsr)?; writeln!(f, "\tCPSR: {}", self.cpsr)?;
writeln!(f, "\tGeneral Purpose Registers:")?; writeln!(f, "\tGeneral Purpose Registers:")?;
let reg_normal_style = Style::new().bold(); let reg_normal_style = Style::new().bold();
@ -519,3 +556,8 @@ impl fmt::Display for Core {
writeln!(f, "{}", reg_normal_style.paint(pc)) writeln!(f, "{}", reg_normal_style.paint(pc))
} }
} }
#[cfg(feature = "arm7tdmi_dispatch_table")]
include!(concat!(env!("OUT_DIR"), "/arm_lut.rs"));
#[cfg(feature = "arm7tdmi_dispatch_table")]
include!(concat!(env!("OUT_DIR"), "/thumb_lut.rs"));

View file

@ -1,7 +1,6 @@
use super::super::sysbus::SysBus;
use super::cpu::Core; use super::cpu::Core;
use super::memory::MemoryInterface;
use super::{CpuMode, CpuState}; use super::{CpuMode, CpuState};
use colored::*;
#[derive(Debug, Clone, Copy, PartialEq)] #[derive(Debug, Clone, Copy, PartialEq)]
#[allow(dead_code)] #[allow(dead_code)]
@ -17,8 +16,8 @@ pub enum Exception {
Fiq = 0x1c, Fiq = 0x1c,
} }
impl Core { impl<I: MemoryInterface> Core<I> {
pub fn exception(&mut self, sb: &mut SysBus, e: Exception, lr: u32) { pub fn exception(&mut self, e: Exception, lr: u32) {
use Exception::*; use Exception::*;
let (new_mode, irq_disable, fiq_disable) = match e { let (new_mode, irq_disable, fiq_disable) = match e {
Reset => (CpuMode::Supervisor, true, true), Reset => (CpuMode::Supervisor, true, true),
@ -30,18 +29,9 @@ impl Core {
Irq => (CpuMode::Irq, true, false), Irq => (CpuMode::Irq, true, false),
Fiq => (CpuMode::Fiq, true, true), Fiq => (CpuMode::Fiq, true, true),
}; };
trace!(
"{}: {:?}, pc: {:#x}, new_mode: {:?} old_mode: {:?}",
"Exception".cyan(),
e,
self.pc,
new_mode,
self.cpsr.mode(),
);
let new_bank = new_mode.bank_index(); let new_bank = new_mode.bank_index();
self.spsr_bank[new_bank] = self.cpsr; self.banks.spsr_bank[new_bank] = self.cpsr;
self.gpr_banked_r14[new_bank] = lr; self.banks.gpr_banked_r14[new_bank] = lr;
self.change_mode(self.cpsr.mode(), new_mode); self.change_mode(self.cpsr.mode(), new_mode);
// Set appropriate CPSR bits // Set appropriate CPSR bits
@ -56,21 +46,19 @@ impl Core {
// Set PC to vector address // Set PC to vector address
self.pc = e as u32; self.pc = e as u32;
self.reload_pipeline32(sb); self.reload_pipeline32();
} }
pub fn irq(&mut self, sb: &mut SysBus) { #[inline]
pub fn irq(&mut self) {
if !self.cpsr.irq_disabled() { if !self.cpsr.irq_disabled() {
let lr = self.get_next_pc() + 4; let lr = self.get_next_pc() + 4;
self.exception(sb, Exception::Irq, lr); self.exception(Exception::Irq, lr);
} }
} }
pub fn software_interrupt(&mut self, sb: &mut SysBus, lr: u32, _cmt: u32) { #[inline]
match self.cpsr.state() { pub fn software_interrupt(&mut self, lr: u32, _cmt: u32) {
CpuState::ARM => self.N_cycle32(sb, self.pc), self.exception(Exception::SoftwareInterrupt, lr);
CpuState::THUMB => self.N_cycle16(sb, self.pc),
};
self.exception(sb, Exception::SoftwareInterrupt, lr);
} }
} }

164
core/src/arm7tdmi/memory.rs Normal file
View file

@ -0,0 +1,164 @@
use super::cpu::Core;
use super::Addr;
use std::fmt;
#[derive(Serialize, Deserialize, Debug, Copy, Clone)]
pub enum MemoryAccess {
NonSeq = 0,
Seq,
}
impl Default for MemoryAccess {
fn default() -> MemoryAccess {
MemoryAccess::NonSeq
}
}
impl fmt::Display for MemoryAccess {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(
f,
"{}",
match self {
MemoryAccess::NonSeq => "N",
MemoryAccess::Seq => "S",
}
)
}
}
#[derive(Debug, PartialEq, Copy, Clone)]
#[repr(u8)]
pub enum MemoryAccessWidth {
MemoryAccess8 = 0,
MemoryAccess16,
MemoryAccess32,
}
/// A trait meant to abstract memory accesses and report the access type back to the user of the arm7tdmi::Core
///
/// struct Memory {
/// data: [u8; 0x4000]
/// }
///
/// impl MemoryInterface for Memory {
/// fn load_8(&mut self, addr: u32, access: MemoryAccess) {
/// debug!("CPU read {:?} cycle", access);
/// self.data[addr & 0x3fff]
/// }
///
/// fn store_8(&mut self, addr: u32, value: u8, access: MemoryAccess) {
/// debug!("CPU write {:?} cycle", access);
/// self.data[addr & 0x3fff] = value;
/// }
///
/// fn idle_cycle(&mut self) {
/// debug!("CPU idle cycle");
/// }
///
/// // implement rest of trait methods
/// }
///
/// let mem = Shared::new(Memory { ... });
/// let cpu = arm7tdmi::Core::new(mem.clone())
///
pub trait MemoryInterface {
/// Read a byte
fn load_8(&mut self, addr: u32, access: MemoryAccess) -> u8;
/// Read a halfword
fn load_16(&mut self, addr: u32, access: MemoryAccess) -> u16;
/// Read a word
fn load_32(&mut self, addr: u32, access: MemoryAccess) -> u32;
/// Write a byte
fn store_8(&mut self, addr: u32, value: u8, access: MemoryAccess);
/// Write a halfword
fn store_16(&mut self, addr: u32, value: u16, access: MemoryAccess);
/// Write a word
fn store_32(&mut self, addr: u32, value: u32, access: MemoryAccess);
fn idle_cycle(&mut self);
}
impl<I: MemoryInterface> MemoryInterface for Core<I> {
#[inline]
fn load_8(&mut self, addr: u32, access: MemoryAccess) -> u8 {
self.bus.load_8(addr, access)
}
#[inline]
fn load_16(&mut self, addr: u32, access: MemoryAccess) -> u16 {
self.bus.load_16(addr, access)
}
#[inline]
fn load_32(&mut self, addr: u32, access: MemoryAccess) -> u32 {
self.bus.load_32(addr, access)
}
#[inline]
fn store_8(&mut self, addr: u32, value: u8, access: MemoryAccess) {
self.bus.store_8(addr, value, access);
}
#[inline]
fn store_16(&mut self, addr: u32, value: u16, access: MemoryAccess) {
self.bus.store_16(addr, value, access);
}
#[inline]
fn store_32(&mut self, addr: u32, value: u32, access: MemoryAccess) {
self.bus.store_32(addr, value, access);
}
#[inline]
fn idle_cycle(&mut self) {
self.bus.idle_cycle();
}
}
/// Implementation of memory access helpers
impl<I: MemoryInterface> Core<I> {
#[inline]
pub(super) fn store_aligned_32(&mut self, addr: Addr, value: u32, access: MemoryAccess) {
self.store_32(addr & !0x3, value, access);
}
#[inline]
pub(super) fn store_aligned_16(&mut self, addr: Addr, value: u16, access: MemoryAccess) {
self.store_16(addr & !0x1, value, access);
}
/// Helper function for "ldr" instruction that handles misaligned addresses
#[inline]
pub(super) fn ldr_word(&mut self, addr: Addr, access: MemoryAccess) -> u32 {
if addr & 0x3 != 0 {
let rotation = (addr & 0x3) << 3;
let value = self.load_32(addr & !0x3, access);
self.ror(value, rotation, self.cpsr.C(), false, false)
} else {
self.load_32(addr, access)
}
}
/// Helper function for "ldrh" instruction that handles misaligned addresses
#[inline]
pub(super) fn ldr_half(&mut self, addr: Addr, access: MemoryAccess) -> u32 {
if addr & 0x1 != 0 {
let rotation = (addr & 0x1) << 3;
let value = self.load_16(addr & !0x1, access);
self.ror(value as u32, rotation, self.cpsr.C(), false, false)
} else {
self.load_16(addr, access) as u32
}
}
/// Helper function for "ldrsh" instruction that handles misaligned addresses
#[inline]
pub(super) fn ldr_sign_half(&mut self, addr: Addr, access: MemoryAccess) -> u32 {
if addr & 0x1 != 0 {
self.load_8(addr, access) as i8 as i32 as u32
} else {
self.load_16(addr, access) as i16 as i32 as u32
}
}
}

View file

@ -12,6 +12,7 @@ use thumb::ThumbInstruction;
pub mod cpu; pub mod cpu;
pub use cpu::*; pub use cpu::*;
pub mod alu; pub mod alu;
pub mod memory;
pub use alu::*; pub use alu::*;
pub mod exception; pub mod exception;
pub mod psr; pub mod psr;
@ -23,11 +24,6 @@ pub const REG_SP: usize = 13;
pub(self) use crate::Addr; pub(self) use crate::Addr;
pub enum CpuAction {
AdvancePC,
FlushPipeline,
}
#[derive(Serialize, Deserialize, Debug, PartialEq, Clone)] #[derive(Serialize, Deserialize, Debug, PartialEq, Clone)]
pub enum DecodedInstruction { pub enum DecodedInstruction {
Arm(ArmInstruction), Arm(ArmInstruction),

View file

@ -1,30 +1,16 @@
use crate::arm7tdmi::*; use crate::arm7tdmi::*;
use crate::sysbus::SysBus;
use crate::Bus;
use crate::bit::BitIndex; use crate::bit::BitIndex;
use super::super::memory::{MemoryAccess, MemoryInterface};
use super::ThumbDecodeHelper; use super::ThumbDecodeHelper;
use super::*; use super::*;
use MemoryAccess::*;
fn push(cpu: &mut Core, bus: &mut SysBus, r: usize) { impl<I: MemoryInterface> Core<I> {
cpu.gpr[REG_SP] -= 4;
let stack_addr = cpu.gpr[REG_SP] & !3;
bus.write_32(stack_addr, cpu.get_reg(r))
}
fn pop(cpu: &mut Core, bus: &mut SysBus, r: usize) {
let val = bus.read_32(cpu.gpr[REG_SP] & !3);
cpu.set_reg(r, val);
cpu.gpr[REG_SP] += 4;
}
impl Core {
/// Format 1 /// Format 1
pub(in super::super) fn exec_thumb_move_shifted_reg( /// Execution Time: 1S
&mut self, pub(in super::super) fn exec_thumb_move_shifted_reg(&mut self, insn: u16) -> CpuAction {
sb: &mut SysBus,
insn: u16,
) -> CpuAction {
let rd = (insn & 0b111) as usize; let rd = (insn & 0b111) as usize;
let rs = insn.bit_range(3..6) as usize; let rs = insn.bit_range(3..6) as usize;
@ -39,13 +25,12 @@ impl Core {
self.gpr[rd] = op2; self.gpr[rd] = op2;
self.alu_update_flags(op2, false, self.bs_carry_out, self.cpsr.V()); self.alu_update_flags(op2, false, self.bs_carry_out, self.cpsr.V());
self.S_cycle16(sb, self.pc + 2); CpuAction::AdvancePC(Seq)
CpuAction::AdvancePC
} }
/// Format 2 /// Format 2
pub(in super::super) fn exec_thumb_add_sub(&mut self, sb: &mut SysBus, insn: u16) -> CpuAction { /// Execution Time: 1S
pub(in super::super) fn exec_thumb_add_sub(&mut self, insn: u16) -> CpuAction {
let rd = (insn & 0b111) as usize; let rd = (insn & 0b111) as usize;
let op1 = self.get_reg(insn.rs()); let op1 = self.get_reg(insn.rs());
let op2 = if insn.is_immediate_operand() { let op2 = if insn.is_immediate_operand() {
@ -64,17 +49,12 @@ impl Core {
self.alu_update_flags(result, true, carry, overflow); self.alu_update_flags(result, true, carry, overflow);
self.set_reg(rd, result as u32); self.set_reg(rd, result as u32);
self.S_cycle16(sb, self.pc + 2); CpuAction::AdvancePC(Seq)
CpuAction::AdvancePC
} }
/// Format 3 /// Format 3
pub(in super::super) fn exec_thumb_data_process_imm( /// Execution Time: 1S
&mut self, pub(in super::super) fn exec_thumb_data_process_imm(&mut self, insn: u16) -> CpuAction {
sb: &mut SysBus,
insn: u16,
) -> CpuAction {
use OpFormat3::*; use OpFormat3::*;
let op = insn.format3_op(); let op = insn.format3_op();
let rd = insn.bit_range(8..11) as usize; let rd = insn.bit_range(8..11) as usize;
@ -92,13 +72,16 @@ impl Core {
if op != CMP { if op != CMP {
self.gpr[rd] = result as u32; self.gpr[rd] = result as u32;
} }
self.S_cycle16(sb, self.pc + 2);
CpuAction::AdvancePC CpuAction::AdvancePC(Seq)
} }
/// Format 4 /// Format 4
pub(in super::super) fn exec_thumb_alu_ops(&mut self, sb: &mut SysBus, insn: u16) -> CpuAction { /// Execution Time:
/// 1S for AND,EOR,ADC,SBC,TST,NEG,CMP,CMN,ORR,BIC,MVN
/// 1S+1I for LSL,LSR,ASR,ROR
/// 1S+mI for MUL on ARMv4 (m=1..4; depending on MSBs of incoming Rd value)
pub(in super::super) fn exec_thumb_alu_ops(&mut self, insn: u16) -> CpuAction {
let rd = (insn & 0b111) as usize; let rd = (insn & 0b111) as usize;
let rs = insn.rs(); let rs = insn.rs();
let dst = self.get_reg(rd); let dst = self.get_reg(rd);
@ -109,22 +92,23 @@ impl Core {
use ThumbAluOps::*; use ThumbAluOps::*;
let op = insn.format4_alu_op(); let op = insn.format4_alu_op();
macro_rules! shifter_op {
($bs_op:expr) => {{
let result = self.shift_by_register($bs_op, rd, rs, carry);
self.idle_cycle();
carry = self.bs_carry_out;
result
}};
}
let result = match op { let result = match op {
AND | TST => dst & src, AND | TST => dst & src,
EOR => dst ^ src, EOR => dst ^ src,
LSL | LSR | ASR | ROR => { LSL => shifter_op!(BarrelShiftOpCode::LSL),
// TODO optimize this second match, keeping it here for code clearity LSR => shifter_op!(BarrelShiftOpCode::LSR),
let bs_op = match op { ASR => shifter_op!(BarrelShiftOpCode::ASR),
LSL => BarrelShiftOpCode::LSL, ROR => shifter_op!(BarrelShiftOpCode::ROR),
LSR => BarrelShiftOpCode::LSR,
ASR => BarrelShiftOpCode::ASR,
ROR => BarrelShiftOpCode::ROR,
_ => unreachable!(),
};
let result = self.shift_by_register(bs_op, rd, rs, carry);
carry = self.bs_carry_out;
result
}
ADC => self.alu_adc_flags(dst, src, &mut carry, &mut overflow), ADC => self.alu_adc_flags(dst, src, &mut carry, &mut overflow),
SBC => self.alu_sbc_flags(dst, src, &mut carry, &mut overflow), SBC => self.alu_sbc_flags(dst, src, &mut carry, &mut overflow),
NEG => self.alu_sub_flags(0, src, &mut carry, &mut overflow), NEG => self.alu_sub_flags(0, src, &mut carry, &mut overflow),
@ -134,7 +118,7 @@ impl Core {
MUL => { MUL => {
let m = self.get_required_multipiler_array_cycles(src); let m = self.get_required_multipiler_array_cycles(src);
for _ in 0..m { for _ in 0..m {
self.add_cycle(); self.idle_cycle();
} }
// TODO - meaningless values? // TODO - meaningless values?
carry = false; carry = false;
@ -149,17 +133,15 @@ impl Core {
if !op.is_setting_flags() { if !op.is_setting_flags() {
self.set_reg(rd, result as u32); self.set_reg(rd, result as u32);
} }
self.S_cycle16(sb, self.pc + 2);
CpuAction::AdvancePC CpuAction::AdvancePC(Seq)
} }
/// Format 5 /// Format 5
pub(in super::super) fn exec_thumb_hi_reg_op_or_bx( /// Execution Time:
&mut self, /// 1S for ADD/MOV/CMP
sb: &mut SysBus, /// 2S+1N for ADD/MOV with Rd=R15, and for BX
insn: u16, pub(in super::super) fn exec_thumb_hi_reg_op_or_bx(&mut self, insn: u16) -> CpuAction {
) -> CpuAction {
let op = insn.format5_op(); let op = insn.format5_op();
let rd = (insn & 0b111) as usize; let rd = (insn & 0b111) as usize;
let dst_reg = if insn.bit(consts::flags::FLAG_H1) { let dst_reg = if insn.bit(consts::flags::FLAG_H1) {
@ -175,16 +157,16 @@ impl Core {
let op1 = self.get_reg(dst_reg); let op1 = self.get_reg(dst_reg);
let op2 = self.get_reg(src_reg); let op2 = self.get_reg(src_reg);
let mut result = CpuAction::AdvancePC; let mut result = CpuAction::AdvancePC(Seq);
match op { match op {
OpFormat5::BX => { OpFormat5::BX => {
return self.branch_exchange(sb, self.get_reg(src_reg)); return self.branch_exchange(self.get_reg(src_reg));
} }
OpFormat5::ADD => { OpFormat5::ADD => {
self.set_reg(dst_reg, op1.wrapping_add(op2)); self.set_reg(dst_reg, op1.wrapping_add(op2));
if dst_reg == REG_PC { if dst_reg == REG_PC {
result = CpuAction::FlushPipeline; self.reload_pipeline16();
self.reload_pipeline16(sb); result = CpuAction::PipelineFlushed;
} }
} }
OpFormat5::CMP => { OpFormat5::CMP => {
@ -196,38 +178,35 @@ impl Core {
OpFormat5::MOV => { OpFormat5::MOV => {
self.set_reg(dst_reg, op2 as u32); self.set_reg(dst_reg, op2 as u32);
if dst_reg == REG_PC { if dst_reg == REG_PC {
result = CpuAction::FlushPipeline; self.reload_pipeline16();
self.reload_pipeline16(sb); result = CpuAction::PipelineFlushed;
} }
} }
} }
self.S_cycle16(sb, self.pc + 2);
result result
} }
/// Format 6 /// Format 6 load PC-relative (for loading immediates from literal pool)
pub(in super::super) fn exec_thumb_ldr_pc(&mut self, sb: &mut SysBus, insn: u16) -> CpuAction { /// Execution Time: 1S+1N+1I
pub(in super::super) fn exec_thumb_ldr_pc(&mut self, insn: u16) -> CpuAction {
let rd = insn.bit_range(8..11) as usize; let rd = insn.bit_range(8..11) as usize;
let ofs = insn.word8() as Addr; let ofs = insn.word8() as Addr;
let addr = (self.pc & !3) + ofs; let addr = (self.pc & !3) + ofs;
self.S_cycle16(sb, self.pc + 2); self.gpr[rd] = self.load_32(addr, NonSeq);
let data = self.ldr_word(addr, sb);
self.N_cycle16(sb, addr);
self.gpr[rd] = data;
// +1I // +1I
self.add_cycle(); self.idle_cycle();
CpuAction::AdvancePC CpuAction::AdvancePC(NonSeq)
} }
/// Helper function for various ldr/str handler
/// Execution Time: 1S+1N+1I for LDR, or 2N for STR
fn do_exec_thumb_ldr_str( fn do_exec_thumb_ldr_str(
&mut self, &mut self,
sb: &mut SysBus,
insn: u16, insn: u16,
addr: Addr, addr: Addr,
@ -236,50 +215,38 @@ impl Core {
let rd = (insn & 0b111) as usize; let rd = (insn & 0b111) as usize;
if insn.is_load() { if insn.is_load() {
let data = if is_transferring_bytes { let data = if is_transferring_bytes {
self.S_cycle8(sb, addr); self.load_8(addr, NonSeq) as u32
sb.read_8(addr) as u32
} else { } else {
self.S_cycle32(sb, addr); self.ldr_word(addr, NonSeq)
self.ldr_word(addr, sb)
}; };
self.gpr[rd] = data; self.gpr[rd] = data;
// +1I // +1I
self.add_cycle(); self.idle_cycle();
CpuAction::AdvancePC(Seq)
} else { } else {
let value = self.get_reg(rd); let value = self.get_reg(rd);
if is_transferring_bytes { if is_transferring_bytes {
self.N_cycle8(sb, addr); self.store_8(addr, value as u8, NonSeq);
self.write_8(addr, value as u8, sb);
} else { } else {
self.N_cycle32(sb, addr); self.store_aligned_32(addr, value, NonSeq);
self.write_32(addr, value, sb);
}; };
CpuAction::AdvancePC(NonSeq)
} }
self.N_cycle16(sb, self.pc + 2);
CpuAction::AdvancePC
} }
/// Format 7 /// Format 7 load/store with register offset
pub(in super::super) fn exec_thumb_ldr_str_reg_offset( /// Execution Time: 1S+1N+1I for LDR, or 2N for STR
&mut self, pub(in super::super) fn exec_thumb_ldr_str_reg_offset(&mut self, insn: u16) -> CpuAction {
bus: &mut SysBus,
insn: u16,
) -> CpuAction {
let rb = insn.bit_range(3..6) as usize; let rb = insn.bit_range(3..6) as usize;
let addr = self.gpr[rb].wrapping_add(self.gpr[insn.ro()]); let addr = self.gpr[rb].wrapping_add(self.gpr[insn.ro()]);
self.do_exec_thumb_ldr_str(bus, insn, addr, insn.bit(10)) self.do_exec_thumb_ldr_str(insn, addr, insn.bit(10))
} }
/// Format 8 /// Format 8 load/store sign-extended byte/halfword
pub(in super::super) fn exec_thumb_ldr_str_shb( /// Execution Time: 1S+1N+1I for LDR, or 2N for STR
&mut self, pub(in super::super) fn exec_thumb_ldr_str_shb(&mut self, insn: u16) -> CpuAction {
sb: &mut SysBus,
insn: u16,
) -> CpuAction {
let rb = insn.bit_range(3..6) as usize; let rb = insn.bit_range(3..6) as usize;
let rd = (insn & 0b111) as usize; let rd = (insn & 0b111) as usize;
@ -291,45 +258,36 @@ impl Core {
(false, false) => (false, false) =>
/* strh */ /* strh */
{ {
self.write_16(addr, self.gpr[rd] as u16, sb); self.store_aligned_16(addr, self.gpr[rd] as u16, NonSeq);
self.N_cycle16(sb, addr);
} }
(false, true) => (false, true) =>
/* ldrh */ /* ldrh */
{ {
self.gpr[rd] = self.ldr_half(addr, sb); self.gpr[rd] = self.ldr_half(addr, NonSeq);
self.S_cycle16(sb, addr); self.idle_cycle();
self.add_cycle();
} }
(true, false) => (true, false) =>
/* ldsb */ /* ldself */
{ {
let val = sb.read_8(addr) as i8 as i32 as u32; let val = self.load_8(addr, NonSeq) as i8 as i32 as u32;
self.gpr[rd] = val; self.gpr[rd] = val;
self.S_cycle8(sb, addr); self.idle_cycle();
self.add_cycle();
} }
(true, true) => (true, true) =>
/* ldsh */ /* ldsh */
{ {
let val = self.ldr_sign_half(addr, sb); let val = self.ldr_sign_half(addr, NonSeq);
self.gpr[rd] = val; self.gpr[rd] = val;
self.S_cycle16(sb, addr); self.idle_cycle();
self.add_cycle();
} }
} }
self.N_cycle16(sb, self.pc + 2); CpuAction::AdvancePC(NonSeq)
CpuAction::AdvancePC
} }
/// Format 9 /// Format 9
pub(in super::super) fn exec_thumb_ldr_str_imm_offset( /// Execution Time: 1S+1N+1I for LDR, or 2N for STR
&mut self, pub(in super::super) fn exec_thumb_ldr_str_imm_offset(&mut self, insn: u16) -> CpuAction {
sb: &mut SysBus,
insn: u16,
) -> CpuAction {
let rb = insn.bit_range(3..6) as usize; let rb = insn.bit_range(3..6) as usize;
let offset = if insn.bit(12) { let offset = if insn.bit(12) {
@ -338,129 +296,117 @@ impl Core {
(insn.offset5() << 3) >> 1 (insn.offset5() << 3) >> 1
}; };
let addr = self.gpr[rb].wrapping_add(offset as u32); let addr = self.gpr[rb].wrapping_add(offset as u32);
self.do_exec_thumb_ldr_str(sb, insn, addr, insn.bit(12)) self.do_exec_thumb_ldr_str(insn, addr, insn.bit(12))
} }
/// Format 10 /// Format 10
pub(in super::super) fn exec_thumb_ldr_str_halfword( /// Execution Time: 1S+1N+1I for LDR, or 2N for STR
&mut self, pub(in super::super) fn exec_thumb_ldr_str_halfword(&mut self, insn: u16) -> CpuAction {
sb: &mut SysBus,
insn: u16,
) -> CpuAction {
let rb = insn.bit_range(3..6) as usize; let rb = insn.bit_range(3..6) as usize;
let rd = (insn & 0b111) as usize; let rd = (insn & 0b111) as usize;
let base = self.gpr[rb] as i32; let base = self.gpr[rb] as i32;
let addr = base.wrapping_add((insn.offset5() << 1) as i32) as Addr; let addr = base.wrapping_add((insn.offset5() << 1) as i32) as Addr;
if insn.is_load() { if insn.is_load() {
let data = self.ldr_half(addr, sb); let data = self.ldr_half(addr, NonSeq);
self.S_cycle16(sb, addr); self.idle_cycle();
self.add_cycle();
self.gpr[rd] = data as u32; self.gpr[rd] = data as u32;
CpuAction::AdvancePC(Seq)
} else { } else {
self.write_16(addr, self.gpr[rd] as u16, sb); self.store_aligned_16(addr, self.gpr[rd] as u16, NonSeq);
self.N_cycle16(sb, addr); CpuAction::AdvancePC(NonSeq)
} }
self.N_cycle16(sb, self.pc + 2);
CpuAction::AdvancePC
} }
/// Format 11 /// Format 11 load/store SP-relative
pub(in super::super) fn exec_thumb_ldr_str_sp( /// Execution Time: 1S+1N+1I for LDR, or 2N for STR
&mut self, pub(in super::super) fn exec_thumb_ldr_str_sp(&mut self, insn: u16) -> CpuAction {
sb: &mut SysBus,
insn: u16,
) -> CpuAction {
let addr = self.gpr[REG_SP] + (insn.word8() as Addr); let addr = self.gpr[REG_SP] + (insn.word8() as Addr);
let rd = insn.bit_range(8..11) as usize; let rd = insn.bit_range(8..11) as usize;
if insn.is_load() { if insn.is_load() {
let data = self.ldr_word(addr, sb); let data = self.ldr_word(addr, NonSeq);
self.S_cycle16(sb, addr); self.idle_cycle();
self.add_cycle();
self.gpr[rd] = data; self.gpr[rd] = data;
CpuAction::AdvancePC(Seq)
} else { } else {
self.write_32(addr, self.gpr[rd], sb); self.store_aligned_32(addr, self.gpr[rd], NonSeq);
self.N_cycle16(sb, addr); CpuAction::AdvancePC(NonSeq)
} }
self.N_cycle16(sb, self.pc + 2);
CpuAction::AdvancePC
} }
/// Format 12 /// Format 12
pub(in super::super) fn exec_thumb_load_address( /// Execution Time: 1S
&mut self, pub(in super::super) fn exec_thumb_load_address(&mut self, insn: u16) -> CpuAction {
sb: &mut SysBus,
insn: u16,
) -> CpuAction {
let rd = insn.bit_range(8..11) as usize; let rd = insn.bit_range(8..11) as usize;
let result = if insn.bit(consts::flags::FLAG_SP) {
self.gpr[rd] = if insn.bit(consts::flags::FLAG_SP) {
self.gpr[REG_SP] + (insn.word8() as Addr) self.gpr[REG_SP] + (insn.word8() as Addr)
} else { } else {
(self.pc_thumb() & !0b10) + 4 + (insn.word8() as Addr) (self.pc_thumb() & !0b10) + 4 + (insn.word8() as Addr)
}; };
self.gpr[rd] = result;
self.S_cycle16(sb, self.pc + 2);
CpuAction::AdvancePC CpuAction::AdvancePC(Seq)
} }
/// Format 13 /// Format 13
pub(in super::super) fn exec_thumb_add_sp(&mut self, sb: &mut SysBus, insn: u16) -> CpuAction { /// Execution Time: 1S
pub(in super::super) fn exec_thumb_add_sp(&mut self, insn: u16) -> CpuAction {
let op1 = self.gpr[REG_SP] as i32; let op1 = self.gpr[REG_SP] as i32;
let op2 = insn.sword7(); let op2 = insn.sword7();
self.gpr[REG_SP] = op1.wrapping_add(op2) as u32; self.gpr[REG_SP] = op1.wrapping_add(op2) as u32;
self.S_cycle16(sb, self.pc + 2);
CpuAction::AdvancePC CpuAction::AdvancePC(Seq)
} }
/// Format 14 /// Format 14
pub(in super::super) fn exec_thumb_push_pop( /// Execution Time: nS+1N+1I (POP), (n+1)S+2N+1I (POP PC), or (n-1)S+2N (PUSH).
&mut self, pub(in super::super) fn exec_thumb_push_pop(&mut self, insn: u16) -> CpuAction {
sb: &mut SysBus, macro_rules! push {
insn: u16, ($r:expr, $access:ident) => {
) -> CpuAction { self.gpr[REG_SP] -= 4;
let mut result = CpuAction::AdvancePC; let stack_addr = self.gpr[REG_SP] & !3;
self.store_32(stack_addr, self.get_reg($r), $access);
// (From GBATEK) Execution Time: nS+1N+1I (POP), (n+1)S+2N+1I (POP PC), or (n-1)S+2N (PUSH). $access = Seq;
};
}
macro_rules! pop {
($r:expr) => {
let val = self.load_32(self.gpr[REG_SP] & !3, Seq);
self.set_reg($r, val);
self.gpr[REG_SP] += 4;
};
($r:expr, $access:ident) => {
let val = self.load_32(self.gpr[REG_SP] & !3, $access);
$access = Seq;
self.set_reg($r, val);
self.gpr[REG_SP] += 4;
};
}
let mut result = CpuAction::AdvancePC(NonSeq);
let is_pop = insn.is_load(); let is_pop = insn.is_load();
let pc_lr_flag = insn.bit(consts::flags::FLAG_R); let pc_lr_flag = insn.bit(consts::flags::FLAG_R);
let rlist = insn.register_list(); let rlist = insn.register_list();
self.N_cycle16(sb, self.pc); let mut access = MemoryAccess::NonSeq;
let mut first = true;
if is_pop { if is_pop {
for r in 0..8 { for r in 0..8 {
if rlist.bit(r) { if rlist.bit(r) {
pop(self, sb, r); pop!(r, access);
if first {
self.add_cycle();
first = false;
} else {
self.S_cycle16(sb, self.gpr[REG_SP]);
}
} }
} }
if pc_lr_flag { if pc_lr_flag {
pop(self, sb, REG_PC); pop!(REG_PC);
self.pc = self.pc & !1; self.pc = self.pc & !1;
result = CpuAction::FlushPipeline; result = CpuAction::PipelineFlushed;
self.reload_pipeline16(sb); self.reload_pipeline16();
} }
self.S_cycle16(sb, self.pc + 2); // Idle 1 cycle
self.idle_cycle();
} else { } else {
if pc_lr_flag { if pc_lr_flag {
push(self, sb, REG_LR); push!(REG_LR, access);
} }
for r in (0..8).rev() { for r in (0..8).rev() {
if rlist.bit(r) { if rlist.bit(r) {
push(self, sb, r); push!(r, access);
if first {
first = false;
} else {
self.S_cycle16(sb, self.gpr[REG_SP]);
}
} }
} }
} }
@ -469,10 +415,9 @@ impl Core {
} }
/// Format 15 /// Format 15
pub(in super::super) fn exec_thumb_ldm_stm(&mut self, sb: &mut SysBus, insn: u16) -> CpuAction { /// Execution Time: nS+1N+1I for LDM, or (n-1)S+2N for STM.
let mut result = CpuAction::AdvancePC; pub(in super::super) fn exec_thumb_ldm_stm(&mut self, insn: u16) -> CpuAction {
let mut result = CpuAction::AdvancePC(NonSeq);
// (From GBATEK) Execution Time: nS+1N+1I (POP), (n+1)S+2N+1I (POP PC), or (n-1)S+2N (PUSH).
let rb = insn.bit_range(8..11) as usize; let rb = insn.bit_range(8..11) as usize;
let base_reg = rb; let base_reg = rb;
@ -481,31 +426,25 @@ impl Core {
let align_preserve = self.gpr[base_reg] & 3; let align_preserve = self.gpr[base_reg] & 3;
let mut addr = self.gpr[base_reg] & !3; let mut addr = self.gpr[base_reg] & !3;
let rlist = insn.register_list(); let rlist = insn.register_list();
self.N_cycle16(sb, self.pc); // let mut first = true;
let mut first = true;
if rlist != 0 { if rlist != 0 {
if is_load { if is_load {
let writeback = !rlist.bit(base_reg); let mut access = NonSeq;
for r in 0..8 { for r in 0..8 {
if rlist.bit(r) { if rlist.bit(r) {
let val = sb.read_32(addr); let val = self.load_32(addr, access);
if first { access = Seq;
first = false;
self.add_cycle();
} else {
self.S_cycle16(sb, addr);
}
addr += 4; addr += 4;
self.add_cycle();
self.set_reg(r, val); self.set_reg(r, val);
} }
} }
self.S_cycle16(sb, self.pc + 2); self.idle_cycle();
if writeback { if !rlist.bit(base_reg) {
self.gpr[base_reg] = addr + align_preserve; self.gpr[base_reg] = addr + align_preserve;
} }
} else { } else {
let mut first = true;
let mut access = NonSeq;
for r in 0..8 { for r in 0..8 {
if rlist.bit(r) { if rlist.bit(r) {
let v = if r != base_reg { let v = if r != base_reg {
@ -519,10 +458,9 @@ impl Core {
}; };
if first { if first {
first = false; first = false;
} else {
self.S_cycle16(sb, addr);
} }
sb.write_32(addr, v); self.store_32(addr, v, access);
access = Seq;
addr += 4; addr += 4;
} }
self.gpr[base_reg] = addr + align_preserve; self.gpr[base_reg] = addr + align_preserve;
@ -531,12 +469,12 @@ impl Core {
} else { } else {
// From gbatek.htm: Empty Rlist: R15 loaded/stored (ARMv4 only), and Rb=Rb+40h (ARMv4-v5). // From gbatek.htm: Empty Rlist: R15 loaded/stored (ARMv4 only), and Rb=Rb+40h (ARMv4-v5).
if is_load { if is_load {
let val = sb.read_32(addr); let val = self.load_32(addr, NonSeq);
self.set_reg(REG_PC, val & !1); self.pc = val & !1;
result = CpuAction::FlushPipeline; result = CpuAction::PipelineFlushed;
self.reload_pipeline16(sb); self.reload_pipeline16();
} else { } else {
sb.write_32(addr, self.pc + 2); self.store_32(addr, self.pc + 2, NonSeq);
} }
addr += 0x40; addr += 0x40;
self.gpr[base_reg] = addr + align_preserve; self.gpr[base_reg] = addr + align_preserve;
@ -546,64 +484,55 @@ impl Core {
} }
/// Format 16 /// Format 16
pub(in super::super) fn exec_thumb_branch_with_cond( /// Execution Time:
&mut self, /// 2S+1N if condition true (jump executed)
sb: &mut SysBus, /// 1S if condition false
insn: u16, pub(in super::super) fn exec_thumb_branch_with_cond(&mut self, insn: u16) -> CpuAction {
) -> CpuAction {
if !self.check_arm_cond(insn.cond()) { if !self.check_arm_cond(insn.cond()) {
self.S_cycle16(sb, self.pc + 2); CpuAction::AdvancePC(Seq)
CpuAction::AdvancePC
} else { } else {
let offset = insn.bcond_offset(); let offset = insn.bcond_offset();
self.S_cycle16(sb, self.pc);
self.pc = (self.pc as i32).wrapping_add(offset) as u32; self.pc = (self.pc as i32).wrapping_add(offset) as u32;
self.reload_pipeline16(sb); self.reload_pipeline16();
CpuAction::FlushPipeline CpuAction::PipelineFlushed
} }
} }
/// Format 17 /// Format 17
pub(in super::super) fn exec_thumb_swi(&mut self, sb: &mut SysBus, _insn: u16) -> CpuAction { /// Execution Time: 2S+1N
self.N_cycle16(sb, self.pc); pub(in super::super) fn exec_thumb_swi(&mut self, _insn: u16) -> CpuAction {
self.exception(sb, Exception::SoftwareInterrupt, self.pc - 2); self.exception(Exception::SoftwareInterrupt, self.pc - 2); // implies pipeline reload
CpuAction::FlushPipeline CpuAction::PipelineFlushed
} }
/// Format 18 /// Format 18
pub(in super::super) fn exec_thumb_branch(&mut self, sb: &mut SysBus, insn: u16) -> CpuAction { /// Execution Time: 2S+1N
pub(in super::super) fn exec_thumb_branch(&mut self, insn: u16) -> CpuAction {
let offset = ((insn.offset11() << 21) >> 20) as i32; let offset = ((insn.offset11() << 21) >> 20) as i32;
self.pc = (self.pc as i32).wrapping_add(offset) as u32; self.pc = (self.pc as i32).wrapping_add(offset) as u32;
self.S_cycle16(sb, self.pc); self.reload_pipeline16(); // 2S + 1N
self.reload_pipeline16(sb); CpuAction::PipelineFlushed
CpuAction::FlushPipeline
} }
/// Format 19 /// Format 19
pub(in super::super) fn exec_thumb_branch_long_with_link( /// Execution Time: 3S+1N (first opcode 1S, second opcode 2S+1N).
&mut self, pub(in super::super) fn exec_thumb_branch_long_with_link(&mut self, insn: u16) -> CpuAction {
sb: &mut SysBus,
insn: u16,
) -> CpuAction {
let mut off = insn.offset11(); let mut off = insn.offset11();
if insn.bit(consts::flags::FLAG_LOW_OFFSET) { if insn.bit(consts::flags::FLAG_LOW_OFFSET) {
self.S_cycle16(sb, self.pc);
off = off << 1; off = off << 1;
let next_pc = (self.pc - 2) | 1; let next_pc = (self.pc - 2) | 1;
self.pc = ((self.gpr[REG_LR] & !1) as i32).wrapping_add(off) as u32; self.pc = ((self.gpr[REG_LR] & !1) as i32).wrapping_add(off) as u32;
self.gpr[REG_LR] = next_pc; self.gpr[REG_LR] = next_pc;
self.reload_pipeline16(sb); self.reload_pipeline16(); // implies 2S + 1N
CpuAction::FlushPipeline CpuAction::PipelineFlushed
} else { } else {
off = (off << 21) >> 9; off = (off << 21) >> 9;
self.gpr[REG_LR] = (self.pc as i32).wrapping_add(off) as u32; self.gpr[REG_LR] = (self.pc as i32).wrapping_add(off) as u32;
self.S_cycle16(sb, self.pc); CpuAction::AdvancePC(Seq) // 1S
CpuAction::AdvancePC
} }
} }
pub fn thumb_undefined(&mut self, _: &mut SysBus, insn: u16) -> CpuAction { pub fn thumb_undefined(&mut self, insn: u16) -> CpuAction {
panic!( panic!(
"executing undefind thumb instruction {:04x} at @{:08x}", "executing undefind thumb instruction {:04x} at @{:08x}",
insn, insn,
@ -612,28 +541,28 @@ impl Core {
} }
#[cfg(not(feature = "arm7tdmi_dispatch_table"))] #[cfg(not(feature = "arm7tdmi_dispatch_table"))]
pub fn exec_thumb(&mut self, bus: &mut SysBus, insn: u16, fmt: ThumbFormat) -> CpuAction { pub fn exec_thumb(&mut self, insn: u16, fmt: ThumbFormat) -> CpuAction {
match fmt { match fmt {
ThumbFormat::MoveShiftedReg => self.exec_thumb_move_shifted_reg(bus, insn), ThumbFormat::MoveShiftedReg => self.exec_thumb_move_shifted_reg(insn),
ThumbFormat::AddSub => self.exec_thumb_add_sub(bus, insn), ThumbFormat::AddSub => self.exec_thumb_add_sub(insn),
ThumbFormat::DataProcessImm => self.exec_thumb_data_process_imm(bus, insn), ThumbFormat::DataProcessImm => self.exec_thumb_data_process_imm(insn),
ThumbFormat::AluOps => self.exec_thumb_alu_ops(bus, insn), ThumbFormat::AluOps => self.exec_thumb_alu_ops(insn),
ThumbFormat::HiRegOpOrBranchExchange => self.exec_thumb_hi_reg_op_or_bx(bus, insn), ThumbFormat::HiRegOpOrBranchExchange => self.exec_thumb_hi_reg_op_or_bx(insn),
ThumbFormat::LdrPc => self.exec_thumb_ldr_pc(bus, insn), ThumbFormat::LdrPc => self.exec_thumb_ldr_pc(insn),
ThumbFormat::LdrStrRegOffset => self.exec_thumb_ldr_str_reg_offset(bus, insn), ThumbFormat::LdrStrRegOffset => self.exec_thumb_ldr_str_reg_offset(insn),
ThumbFormat::LdrStrSHB => self.exec_thumb_ldr_str_shb(bus, insn), ThumbFormat::LdrStrSHB => self.exec_thumb_ldr_str_shb(insn),
ThumbFormat::LdrStrImmOffset => self.exec_thumb_ldr_str_imm_offset(bus, insn), ThumbFormat::LdrStrImmOffset => self.exec_thumb_ldr_str_imm_offset(insn),
ThumbFormat::LdrStrHalfWord => self.exec_thumb_ldr_str_halfword(bus, insn), ThumbFormat::LdrStrHalfWord => self.exec_thumb_ldr_str_halfword(insn),
ThumbFormat::LdrStrSp => self.exec_thumb_ldr_str_sp(bus, insn), ThumbFormat::LdrStrSp => self.exec_thumb_ldr_str_sp(insn),
ThumbFormat::LoadAddress => self.exec_thumb_load_address(bus, insn), ThumbFormat::LoadAddress => self.exec_thumb_load_address(insn),
ThumbFormat::AddSp => self.exec_thumb_add_sp(bus, insn), ThumbFormat::AddSp => self.exec_thumb_add_sp(insn),
ThumbFormat::PushPop => self.exec_thumb_push_pop(bus, insn), ThumbFormat::PushPop => self.exec_thumb_push_pop(insn),
ThumbFormat::LdmStm => self.exec_thumb_ldm_stm(bus, insn), ThumbFormat::LdmStm => self.exec_thumb_ldm_stm(insn),
ThumbFormat::BranchConditional => self.exec_thumb_branch_with_cond(bus, insn), ThumbFormat::BranchConditional => self.exec_thumb_branch_with_cond(insn),
ThumbFormat::Swi => self.exec_thumb_swi(bus, insn), ThumbFormat::Swi => self.exec_thumb_swi(insn),
ThumbFormat::Branch => self.exec_thumb_branch(bus, insn), ThumbFormat::Branch => self.exec_thumb_branch(insn),
ThumbFormat::BranchLongWithLink => self.exec_thumb_branch_long_with_link(bus, insn), ThumbFormat::BranchLongWithLink => self.exec_thumb_branch_long_with_link(insn),
ThumbFormat::Undefined => self.thumb_undefined(bus, insn), ThumbFormat::Undefined => self.thumb_undefined(insn),
} }
} }
} }

View file

@ -105,17 +105,17 @@ impl Debugger {
} }
println!("{}", self.gba.cpu); println!("{}", self.gba.cpu);
println!("IME={}", self.gba.sysbus.io.intc.interrupt_master_enable); println!("IME={}", self.gba.io_devs.intc.interrupt_master_enable);
println!("IE={:#?}", self.gba.sysbus.io.intc.interrupt_enable); println!("IE={:#?}", self.gba.io_devs.intc.interrupt_enable);
println!("IF={:#?}", self.gba.sysbus.io.intc.interrupt_flags); println!("IF={:#?}", self.gba.io_devs.intc.interrupt_flags);
} }
GpuInfo => println!("GPU: {:#?}", self.gba.sysbus.io.gpu), GpuInfo => println!("GPU: {:#?}", self.gba.io_devs.gpu),
GpioInfo => println!("GPIO: {:#?}", self.gba.sysbus.cartridge.get_gpio()), GpioInfo => println!("GPIO: {:#?}", self.gba.sysbus.cartridge.get_gpio()),
Step(count) => { Step(count) => {
for _ in 0..count { for _ in 0..count {
self.gba.cpu.step(&mut self.gba.sysbus); self.gba.cpu.step();
while self.gba.cpu.last_executed.is_none() { while self.gba.cpu.last_executed.is_none() {
self.gba.cpu.step(&mut self.gba.sysbus); self.gba.cpu.step();
} }
if let Some(last_executed) = &self.gba.cpu.last_executed { if let Some(last_executed) = &self.gba.cpu.last_executed {
let pc = last_executed.get_pc(); let pc = last_executed.get_pc();
@ -143,6 +143,7 @@ impl Debugger {
); );
} }
} }
println!("cycles: {}", self.gba.scheduler.timestamp());
println!("{}\n", self.gba.cpu); println!("{}\n", self.gba.cpu);
} }
Continue => 'running: loop { Continue => 'running: loop {
@ -218,7 +219,7 @@ impl Debugger {
// TileView(bg) => create_tile_view(bg, &self.gba), // TileView(bg) => create_tile_view(bg, &self.gba),
Reset => { Reset => {
println!("resetting cpu..."); println!("resetting cpu...");
self.gba.cpu.reset(&mut self.gba.sysbus); self.gba.cpu.reset();
println!("cpu is restarted!") println!("cpu is restarted!")
} }
TraceToggle(flags) => { TraceToggle(flags) => {

View file

@ -11,7 +11,7 @@ use super::dma::DmaController;
use super::gpu::*; use super::gpu::*;
use super::interrupt::*; use super::interrupt::*;
use super::iodev::*; use super::iodev::*;
use super::sched::{EventHandler, EventType, Scheduler, SharedScheduler}; use super::sched::{EventType, Scheduler, SharedScheduler};
use super::sound::SoundController; use super::sound::SoundController;
use super::sysbus::SysBus; use super::sysbus::SysBus;
use super::timer::Timers; use super::timer::Timers;
@ -22,21 +22,15 @@ use super::VideoInterface;
use super::{AudioInterface, InputInterface}; use super::{AudioInterface, InputInterface};
pub struct GameBoyAdvance { pub struct GameBoyAdvance {
pub cpu: arm7tdmi::Core, pub cpu: Box<arm7tdmi::Core<SysBus>>,
pub sysbus: Box<SysBus>, pub sysbus: Shared<SysBus>,
io_devs: Shared<IoDevices>, pub io_devs: Shared<IoDevices>,
pub scheduler: SharedScheduler,
interrupt_flags: SharedInterruptFlags,
#[cfg(not(feature = "no_video_interface"))] #[cfg(not(feature = "no_video_interface"))]
pub video_device: Rc<RefCell<dyn VideoInterface>>, pub video_device: Rc<RefCell<dyn VideoInterface>>,
pub audio_device: Rc<RefCell<dyn AudioInterface>>, pub audio_device: Rc<RefCell<dyn AudioInterface>>,
pub input_device: Rc<RefCell<dyn InputInterface>>, pub input_device: Rc<RefCell<dyn InputInterface>>,
pub cycles_to_next_event: usize,
scheduler: SharedScheduler,
overshoot_cycles: usize,
interrupt_flags: SharedInterruptFlags,
} }
#[derive(Serialize, Deserialize)] #[derive(Serialize, Deserialize)]
@ -47,7 +41,13 @@ struct SaveState {
ewram: Box<[u8]>, ewram: Box<[u8]>,
iwram: Box<[u8]>, iwram: Box<[u8]>,
interrupt_flags: u16, interrupt_flags: u16,
cpu: arm7tdmi::Core, cpu_state: arm7tdmi::SavedCpuState,
}
#[derive(Debug, PartialEq)]
enum BusMaster {
Dma,
Cpu,
} }
/// Checks if the bios provided is the real one /// Checks if the bios provided is the real one
@ -83,20 +83,20 @@ impl GameBoyAdvance {
let intc = InterruptController::new(interrupt_flags.clone()); let intc = InterruptController::new(interrupt_flags.clone());
let gpu = Box::new(Gpu::new(scheduler.clone(), interrupt_flags.clone())); let gpu = Box::new(Gpu::new(scheduler.clone(), interrupt_flags.clone()));
let dmac = DmaController::new(interrupt_flags.clone(), scheduler.clone()); let dmac = DmaController::new(interrupt_flags.clone(), scheduler.clone());
let timers = Timers::new(interrupt_flags.clone()); let timers = Timers::new(interrupt_flags.clone(), scheduler.clone());
let sound_controller = Box::new(SoundController::new( let sound_controller = Box::new(SoundController::new(
scheduler.clone(), scheduler.clone(),
audio_device.borrow().get_sample_rate() as f32, audio_device.borrow().get_sample_rate() as f32,
)); ));
let io_devs = Shared::new(IoDevices::new(intc, gpu, dmac, timers, sound_controller)); let io_devs = Shared::new(IoDevices::new(intc, gpu, dmac, timers, sound_controller));
let sysbus = Box::new(SysBus::new( let sysbus = Shared::new(SysBus::new(
scheduler.clone(), scheduler.clone(),
io_devs.clone(), io_devs.clone(),
bios_rom, bios_rom,
gamepak, gamepak,
)); ));
let cpu = arm7tdmi::Core::new(); let cpu = Box::new(arm7tdmi::Core::new(sysbus.clone()));
let mut gba = GameBoyAdvance { let mut gba = GameBoyAdvance {
cpu, cpu,
@ -110,8 +110,6 @@ impl GameBoyAdvance {
scheduler: scheduler, scheduler: scheduler,
cycles_to_next_event: 1,
overshoot_cycles: 0,
interrupt_flags: interrupt_flags, interrupt_flags: interrupt_flags,
}; };
@ -130,7 +128,6 @@ impl GameBoyAdvance {
) -> bincode::Result<GameBoyAdvance> { ) -> bincode::Result<GameBoyAdvance> {
let decoded: Box<SaveState> = bincode::deserialize_from(savestate)?; let decoded: Box<SaveState> = bincode::deserialize_from(savestate)?;
let arm7tdmi = decoded.cpu;
let interrupts = Rc::new(Cell::new(IrqBitmask(decoded.interrupt_flags))); let interrupts = Rc::new(Cell::new(IrqBitmask(decoded.interrupt_flags)));
let scheduler = decoded.scheduler.make_shared(); let scheduler = decoded.scheduler.make_shared();
let mut io_devs = Shared::new(decoded.io_devs); let mut io_devs = Shared::new(decoded.io_devs);
@ -139,8 +136,7 @@ impl GameBoyAdvance {
io_devs.connect_irq(interrupts.clone()); io_devs.connect_irq(interrupts.clone());
io_devs.gpu.set_scheduler(scheduler.clone()); io_devs.gpu.set_scheduler(scheduler.clone());
io_devs.sound.set_scheduler(scheduler.clone()); io_devs.sound.set_scheduler(scheduler.clone());
let sysbus = Shared::new(SysBus::new_with_memories(
let sysbus = Box::new(SysBus::new_with_memories(
scheduler.clone(), scheduler.clone(),
io_devs.clone(), io_devs.clone(),
cartridge, cartridge,
@ -148,6 +144,10 @@ impl GameBoyAdvance {
decoded.ewram, decoded.ewram,
decoded.iwram, decoded.iwram,
)); ));
let arm7tdmi = Box::new(arm7tdmi::Core::from_saved_state(
sysbus.clone(),
decoded.cpu_state,
));
Ok(GameBoyAdvance { Ok(GameBoyAdvance {
cpu: arm7tdmi, cpu: arm7tdmi,
@ -161,17 +161,13 @@ impl GameBoyAdvance {
audio_device: audio_device, audio_device: audio_device,
input_device: input_device, input_device: input_device,
cycles_to_next_event: 1,
overshoot_cycles: 0,
scheduler, scheduler,
}) })
} }
pub fn save_state(&self) -> bincode::Result<Vec<u8>> { pub fn save_state(&self) -> bincode::Result<Vec<u8>> {
let s = SaveState { let s = SaveState {
cpu: self.cpu.clone(), cpu_state: self.cpu.save_state(),
io_devs: self.io_devs.clone_inner(), io_devs: self.io_devs.clone_inner(),
cartridge: self.sysbus.cartridge.thin_copy(), cartridge: self.sysbus.cartridge.thin_copy(),
iwram: Box::from(self.sysbus.get_iwram()), iwram: Box::from(self.sysbus.get_iwram()),
@ -186,11 +182,12 @@ impl GameBoyAdvance {
pub fn restore_state(&mut self, bytes: &[u8], bios: Box<[u8]>) -> bincode::Result<()> { pub fn restore_state(&mut self, bytes: &[u8], bios: Box<[u8]>) -> bincode::Result<()> {
let decoded: Box<SaveState> = bincode::deserialize_from(bytes)?; let decoded: Box<SaveState> = bincode::deserialize_from(bytes)?;
self.cpu = decoded.cpu; self.cpu.restore_state(decoded.cpu_state);
self.scheduler = Scheduler::make_shared(decoded.scheduler); self.scheduler = Scheduler::make_shared(decoded.scheduler);
self.interrupt_flags = Rc::new(Cell::new(IrqBitmask(decoded.interrupt_flags))); self.interrupt_flags = Rc::new(Cell::new(IrqBitmask(decoded.interrupt_flags)));
self.io_devs = Shared::new(decoded.io_devs); self.io_devs = Shared::new(decoded.io_devs);
// Restore memory state // Restore memory state
self.cpu.set_memory_interface(self.sysbus.clone());
self.sysbus.set_bios(bios); self.sysbus.set_bios(bios);
self.sysbus.set_iwram(decoded.iwram); self.sysbus.set_iwram(decoded.iwram);
self.sysbus.set_ewram(decoded.ewram); self.sysbus.set_ewram(decoded.ewram);
@ -202,7 +199,6 @@ impl GameBoyAdvance {
self.sysbus.set_io_devices(self.io_devs.clone()); self.sysbus.set_io_devices(self.io_devs.clone());
self.sysbus.cartridge.update_from(decoded.cartridge); self.sysbus.cartridge.update_from(decoded.cartridge);
self.sysbus.created(); self.sysbus.created();
self.cycles_to_next_event = 1;
Ok(()) Ok(())
} }
@ -222,24 +218,113 @@ impl GameBoyAdvance {
pub fn frame(&mut self) { pub fn frame(&mut self) {
self.key_poll(); self.key_poll();
static mut OVERSHOOT: usize = 0;
unsafe {
OVERSHOOT = self.run(CYCLES_FULL_REFRESH - OVERSHOOT);
}
}
let mut scheduler = self.scheduler.clone(); #[inline]
fn dma_step(&mut self) {
self.io_devs.dmac.perform_work(&mut self.sysbus);
}
let mut remaining_cycles = CYCLES_FULL_REFRESH - self.overshoot_cycles; #[inline]
pub fn cpu_step(&mut self) {
if self.io_devs.intc.irq_pending() {
self.cpu.irq();
self.io_devs.haltcnt = HaltState::Running;
}
self.cpu.step();
}
while remaining_cycles > 0 { #[inline]
let cycles = self.step(&mut scheduler); fn get_bus_master(&mut self) -> Option<BusMaster> {
if remaining_cycles >= cycles { match (self.io_devs.dmac.is_active(), self.io_devs.haltcnt) {
remaining_cycles -= cycles; (true, _) => Some(BusMaster::Dma),
} else { (false, HaltState::Running) => Some(BusMaster::Cpu),
self.overshoot_cycles = cycles - remaining_cycles; (false, _) => None,
return; }
}
/// Runs the emulation for a given amount of cycles
/// @return number of extra cycle ran in this iteration
#[inline]
fn run(&mut self, cycles_to_run: usize) -> usize {
let run_start_time = self.scheduler.timestamp();
// Register an event to mark the end of this run
self.scheduler
.push(EventType::RunLimitReached, cycles_to_run);
let mut running = true;
while running {
// The tricky part is to avoid unnecessary calls for Scheduler::process_pending,
// performance-wise it would be best to run as many cycles as fast as possible while we know there are no pending events.
// Fast forward emulation until an event occurs
while self.scheduler.timestamp() <= self.scheduler.timestamp_of_next_event() {
// 3 Options:
// 1. DMA is active - thus CPU is blocked
// 2. DMA inactive and halt state is RUN - CPU can run
// 3. DMA inactive and halt state is HALT - CPU is blocked
match self.get_bus_master() {
Some(BusMaster::Dma) => self.dma_step(),
Some(BusMaster::Cpu) => self.cpu_step(),
None => {
if self.io_devs.intc.irq_pending() {
self.io_devs.haltcnt = HaltState::Running;
} else {
self.scheduler.fast_forward_to_next();
let (event, cycles_late) = self
.scheduler
.pop_pending_event()
.unwrap_or_else(|| unreachable!());
self.handle_event(event, cycles_late, &mut running);
}
}
}
}
while let Some((event, cycles_late)) = self.scheduler.pop_pending_event() {
self.handle_event(event, cycles_late, &mut running);
} }
} }
self.overshoot_cycles = 0; let total_cycles_ran = self.scheduler.timestamp() - run_start_time;
total_cycles_ran - cycles_to_run
} }
#[inline]
fn handle_event(&mut self, event: EventType, cycles_late: usize, running: &mut bool) {
let io = &mut (*self.io_devs);
match event {
EventType::RunLimitReached => {
*running = false;
}
EventType::DmaActivateChannel(channel_id) => io.dmac.activate_channel(channel_id),
EventType::TimerOverflow(channel_id) => {
let timers = &mut io.timers;
let dmac = &mut io.dmac;
let apu = &mut io.sound;
timers.handle_overflow_event(channel_id, cycles_late, apu, dmac);
}
EventType::Gpu(event) => io.gpu.on_event(
event,
cycles_late,
&mut *self.sysbus,
#[cfg(not(feature = "no_video_interface"))]
&self.video_device,
),
EventType::Apu(event) => io.sound.on_event(event, cycles_late, &self.audio_device),
}
}
pub fn skip_bios(&mut self) {
self.cpu.skip_bios();
self.sysbus.io.gpu.skip_bios();
}
#[cfg(feature = "debugger")]
pub fn add_breakpoint(&mut self, addr: u32) -> Option<usize> { pub fn add_breakpoint(&mut self, addr: u32) -> Option<usize> {
if !self.cpu.breakpoints.contains(&addr) { if !self.cpu.breakpoints.contains(&addr) {
let new_index = self.cpu.breakpoints.len(); let new_index = self.cpu.breakpoints.len();
@ -250,6 +335,7 @@ impl GameBoyAdvance {
} }
} }
#[cfg(feature = "debugger")]
pub fn check_breakpoint(&self) -> Option<u32> { pub fn check_breakpoint(&self) -> Option<u32> {
let next_pc = self.cpu.get_next_pc(); let next_pc = self.cpu.get_next_pc();
for bp in &self.cpu.breakpoints { for bp in &self.cpu.breakpoints {
@ -261,82 +347,23 @@ impl GameBoyAdvance {
None None
} }
pub fn skip_bios(&mut self) {
self.cpu.skip_bios();
self.sysbus.io.gpu.skip_bios();
}
pub fn step_cpu(&mut self, io: &mut IoDevices) -> usize {
if io.intc.irq_pending() {
self.cpu.irq(&mut self.sysbus);
io.haltcnt = HaltState::Running;
}
let previous_cycles = self.cpu.cycles;
self.cpu.step(&mut self.sysbus);
self.cpu.cycles - previous_cycles
}
pub fn step(&mut self, scheduler: &mut Scheduler) -> usize {
// I hate myself for doing this, but rust left me no choice.
let io = unsafe {
let ptr = &mut *self.sysbus as *mut SysBus;
&mut (*ptr).io as &mut IoDevices
};
let available_cycles = self.scheduler.get_cycles_to_next_event();
let mut cycles_left = available_cycles;
let mut cycles = 0;
while cycles_left > 0 {
let _cycles = if !io.dmac.is_active() {
if HaltState::Running == io.haltcnt {
self.step_cpu(io)
} else {
cycles = cycles_left;
break;
}
} else {
io.dmac.perform_work(&mut self.sysbus);
return cycles;
};
cycles += _cycles;
if cycles_left < _cycles {
break;
}
cycles_left -= _cycles;
}
io.timers.update(cycles, &mut self.sysbus);
scheduler.run(cycles, self);
cycles
}
#[cfg(feature = "debugger")] #[cfg(feature = "debugger")]
/// 'step' function that checks for breakpoints /// 'step' function that checks for breakpoints
/// TODO avoid code duplication /// TODO avoid code duplication
pub fn step_debugger(&mut self) -> Option<u32> { pub fn step_debugger(&mut self) -> Option<u32> {
// I hate myself for doing this, but rust left me no choice.
let io = unsafe {
let ptr = &mut *self.sysbus as *mut SysBus;
&mut (*ptr).io as &mut IoDevices
};
// clear any pending DMAs // clear any pending DMAs
while io.dmac.is_active() { self.dma_step();
io.dmac.perform_work(&mut self.sysbus);
} // Run the CPU
let _cycles = self.scheduler.measure_cycles(|| {
self.cpu_step();
});
let cycles = self.step_cpu(io);
let breakpoint = self.check_breakpoint(); let breakpoint = self.check_breakpoint();
io.timers.update(cycles, &mut self.sysbus); while let Some((event, cycles_late)) = self.scheduler.pop_pending_event() {
self.handle_event(event, cycles_late, &mut running);
// update gpu & sound }
let mut scheduler = self.scheduler.clone();
scheduler.run(cycles, self);
breakpoint breakpoint
} }
@ -349,27 +376,7 @@ impl GameBoyAdvance {
/// Reset the emulator /// Reset the emulator
pub fn soft_reset(&mut self) { pub fn soft_reset(&mut self) {
self.cpu.reset(&mut self.sysbus); self.cpu.reset();
}
}
impl EventHandler for GameBoyAdvance {
fn handle_event(&mut self, event: EventType, extra_cycles: usize) {
let io = unsafe {
let ptr = &mut *self.sysbus as *mut SysBus;
&mut (*ptr).io as &mut IoDevices
};
match event {
EventType::DmaActivateChannel(channel_id) => io.dmac.activate_channel(channel_id),
EventType::Gpu(event) => io.gpu.on_event(
event,
extra_cycles,
self.sysbus.as_mut(),
#[cfg(not(feature = "no_video_interface"))]
&self.video_device,
),
EventType::Apu(event) => io.sound.on_event(event, extra_cycles, &self.audio_device),
}
} }
} }

View file

@ -231,7 +231,7 @@ impl InterruptConnect for Gpu {
impl Gpu { impl Gpu {
pub fn new(mut scheduler: SharedScheduler, interrupt_flags: SharedInterruptFlags) -> Gpu { pub fn new(mut scheduler: SharedScheduler, interrupt_flags: SharedInterruptFlags) -> Gpu {
scheduler.add_gpu_event(GpuEvent::HDraw, CYCLES_HDRAW); scheduler.push_gpu_event(GpuEvent::HDraw, CYCLES_HDRAW);
Gpu { Gpu {
interrupt_flags, interrupt_flags,
scheduler, scheduler,
@ -654,7 +654,7 @@ impl Gpu {
GpuEvent::VBlankHBlank => self.handle_vblank_hblank(), GpuEvent::VBlankHBlank => self.handle_vblank_hblank(),
}; };
self.scheduler self.scheduler
.schedule(EventType::Gpu(next_event), cycles - extra_cycles); .push(EventType::Gpu(next_event), cycles - extra_cycles);
} }
} }

View file

@ -1,3 +1,7 @@
use std::cell::Cell;
use std::cmp::Ordering;
use std::collections::BinaryHeap;
use super::util::Shared; use super::util::Shared;
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
@ -5,7 +9,7 @@ use serde::{Deserialize, Serialize};
const NUM_EVENTS: usize = 32; const NUM_EVENTS: usize = 32;
#[repr(u32)] #[repr(u32)]
#[derive(Serialize, Deserialize, Debug, PartialEq, Copy, Clone)] #[derive(Serialize, Deserialize, Debug, PartialOrd, PartialEq, Eq, Copy, Clone)]
pub enum GpuEvent { pub enum GpuEvent {
HDraw, HDraw,
HBlank, HBlank,
@ -14,7 +18,7 @@ pub enum GpuEvent {
} }
#[repr(u32)] #[repr(u32)]
#[derive(Serialize, Deserialize, Debug, PartialEq, Copy, Clone)] #[derive(Serialize, Deserialize, Debug, PartialOrd, PartialEq, Eq, Copy, Clone)]
pub enum ApuEvent { pub enum ApuEvent {
Psg1Generate, Psg1Generate,
Psg2Generate, Psg2Generate,
@ -24,48 +28,98 @@ pub enum ApuEvent {
} }
#[repr(u32)] #[repr(u32)]
#[derive(Serialize, Deserialize, Debug, PartialEq, Copy, Clone)] #[derive(Serialize, Deserialize, Debug, PartialOrd, PartialEq, Eq, Copy, Clone)]
pub enum EventType { pub enum EventType {
RunLimitReached,
Gpu(GpuEvent), Gpu(GpuEvent),
Apu(ApuEvent), Apu(ApuEvent),
DmaActivateChannel(usize), DmaActivateChannel(usize),
TimerOverflow(usize),
} }
#[derive(Serialize, Deserialize, Debug, Clone)] #[derive(Serialize, Deserialize, Debug, Clone, Eq)]
struct Event { pub struct Event {
typ: EventType, typ: EventType,
/// Timestamp in cycles /// Timestamp in cycles
time: usize, time: usize,
cancel: Cell<bool>,
} }
impl Event { impl Event {
fn new(typ: EventType, time: usize) -> Event { fn new(typ: EventType, time: usize) -> Event {
Event { typ, time } Event {
typ,
time,
cancel: Cell::new(false),
}
} }
#[inline]
fn get_type(&self) -> EventType { fn get_type(&self) -> EventType {
self.typ self.typ
} }
fn is_canceled(&self) -> bool {
self.cancel.get()
}
} }
impl Ord for Event {
fn cmp(&self, other: &Self) -> Ordering {
self.time.cmp(&other.time).reverse()
}
}
/// Implement custom reverse ordering
impl PartialOrd for Event {
#[inline]
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
other.time.partial_cmp(&self.time)
}
#[inline]
fn lt(&self, other: &Self) -> bool {
other.time < self.time
}
#[inline]
fn le(&self, other: &Self) -> bool {
other.time <= self.time
}
#[inline]
fn gt(&self, other: &Self) -> bool {
other.time > self.time
}
#[inline]
fn ge(&self, other: &Self) -> bool {
other.time >= self.time
}
}
impl PartialEq for Event {
fn eq(&self, other: &Self) -> bool {
self.time == other.time
}
}
/// Event scheduelr for cycle aware components
/// The scheduler should be "shared" to all event generating components.
/// Each event generator software component can call Scheduler::schedule to generate an event later in the emulation.
/// The scheduler should be updated for each increment in CPU cycles,
///
/// The main emulation loop can then call Scheduler::process_pending to handle the events.
#[derive(Serialize, Deserialize, Debug, Clone)] #[derive(Serialize, Deserialize, Debug, Clone)]
pub struct Scheduler { pub struct Scheduler {
timestamp: usize, timestamp: usize,
events: Vec<Event>, events: BinaryHeap<Event>,
} }
pub type SharedScheduler = Shared<Scheduler>; pub type SharedScheduler = Shared<Scheduler>;
pub trait EventHandler {
/// Handle the scheduler event
fn handle_event(&mut self, e: EventType, extra_cycles: usize);
}
impl Scheduler { impl Scheduler {
pub fn new_shared() -> SharedScheduler { pub fn new_shared() -> SharedScheduler {
let sched = Scheduler { let sched = Scheduler {
timestamp: 0, timestamp: 0,
events: Vec::with_capacity(NUM_EVENTS), events: BinaryHeap::with_capacity(NUM_EVENTS),
}; };
SharedScheduler::new(sched) SharedScheduler::new(sched)
} }
@ -74,46 +128,88 @@ impl Scheduler {
SharedScheduler::new(self) SharedScheduler::new(self)
} }
pub fn schedule(&mut self, typ: EventType, cycles: usize) { /// Schedule an event to be executed in `cycles` cycles from now
pub fn push(&mut self, typ: EventType, cycles: usize) {
let event = Event::new(typ, self.timestamp + cycles); let event = Event::new(typ, self.timestamp + cycles);
let idx = self self.events.push(event);
.events
.binary_search_by(|e| e.time.cmp(&event.time))
.unwrap_or_else(|x| x);
self.events.insert(idx, event);
} }
pub fn add_gpu_event(&mut self, e: GpuEvent, cycles: usize) { /// Cancel all events with type `typ`
self.schedule(EventType::Gpu(e), cycles); /// This method is rather expansive to call
pub fn cancel(&mut self, typ: EventType) {
self.events
.iter()
.filter(|e| e.typ == typ)
.for_each(|e| e.cancel.set(true));
} }
pub fn add_apu_event(&mut self, e: ApuEvent, cycles: usize) { pub fn push_gpu_event(&mut self, e: GpuEvent, cycles: usize) {
self.schedule(EventType::Apu(e), cycles); self.push(EventType::Gpu(e), cycles);
} }
pub fn run<H: EventHandler>(&mut self, cycles: usize, handler: &mut H) { pub fn push_apu_event(&mut self, e: ApuEvent, cycles: usize) {
let run_to = self.timestamp + cycles; self.push(EventType::Apu(e), cycles);
self.timestamp = run_to; }
while self.events.len() > 0 { /// Updates the scheduler timestamp
if run_to >= self.events[0].time { #[inline]
let event = self.events.remove(0); pub fn update(&mut self, cycles: usize) {
handler.handle_event(event.get_type(), run_to - event.time); self.timestamp += cycles;
}
pub fn pop_pending_event(&mut self) -> Option<(EventType, usize)> {
if let Some(event) = self.events.peek() {
if self.timestamp >= event.time {
// remove the event
let event = self.events.pop().unwrap_or_else(|| unreachable!());
if !event.is_canceled() {
Some((event.get_type(), self.timestamp - event.time))
} else {
None
}
} else { } else {
return; None
} }
} else {
None
} }
} }
#[inline]
pub fn fast_forward_to_next(&mut self) {
self.timestamp += self.get_cycles_to_next_event();
}
#[inline]
pub fn get_cycles_to_next_event(&self) -> usize { pub fn get_cycles_to_next_event(&self) -> usize {
assert_ne!(self.events.len(), 0); if let Some(event) = self.events.peek() {
self.events[0].time - self.timestamp event.time - self.timestamp
} else {
0
}
}
#[inline]
/// The event queue is assumed to be not empty
pub fn timestamp_of_next_event(&self) -> usize {
self.events.peek().unwrap_or_else(|| unreachable!()).time
}
#[inline]
pub fn timestamp(&self) -> usize {
self.timestamp
} }
#[allow(unused)] #[allow(unused)]
fn is_empty(&self) -> bool { fn is_empty(&self) -> bool {
self.events.is_empty() self.events.is_empty()
} }
pub fn measure_cycles<F: FnMut()>(&mut self, mut f: F) -> usize {
let start = self.timestamp;
f();
self.timestamp - start
}
} }
#[cfg(test)] #[cfg(test)]
@ -158,15 +254,36 @@ mod test {
fn is_event_done(&self, e: EventType) -> bool { fn is_event_done(&self, e: EventType) -> bool {
(self.event_bitmask & get_event_bit(e)) != 0 (self.event_bitmask & get_event_bit(e)) != 0
} }
}
impl EventHandler for Holder {
fn handle_event(&mut self, e: EventType, extra_cycles: usize) { fn handle_event(&mut self, e: EventType, extra_cycles: usize) {
println!("[holder] got event {:?} extra_cycles {}", e, extra_cycles); println!("[holder] got event {:?} extra_cycles {}", e, extra_cycles);
self.event_bitmask |= get_event_bit(e); self.event_bitmask |= get_event_bit(e);
} }
} }
#[test]
fn test_scheduler_ordering() {
let mut holder = Holder::new();
let mut sched = holder.sched.clone();
holder
.sched
.push(EventType::Gpu(GpuEvent::VBlankHDraw), 240);
holder
.sched
.push(EventType::Apu(ApuEvent::Psg1Generate), 60);
holder.sched.push(EventType::Apu(ApuEvent::Sample), 512);
holder
.sched
.push(EventType::Apu(ApuEvent::Psg2Generate), 13);
holder
.sched
.push(EventType::Apu(ApuEvent::Psg4Generate), 72);
assert_eq!(
sched.events.pop(),
Some(Event::new(EventType::Apu(ApuEvent::Psg2Generate), 13))
);
}
#[test] #[test]
fn test_scheduler() { fn test_scheduler() {
let mut holder = Holder::new(); let mut holder = Holder::new();
@ -178,17 +295,17 @@ mod test {
let mut sched = holder.sched.clone(); let mut sched = holder.sched.clone();
holder holder
.sched .sched
.schedule(EventType::Gpu(GpuEvent::VBlankHDraw), 240); .push(EventType::Gpu(GpuEvent::VBlankHDraw), 240);
holder holder
.sched .sched
.schedule(EventType::Apu(ApuEvent::Psg1Generate), 60); .push(EventType::Apu(ApuEvent::Psg1Generate), 60);
holder.sched.schedule(EventType::Apu(ApuEvent::Sample), 512); holder.sched.push(EventType::Apu(ApuEvent::Sample), 512);
holder holder
.sched .sched
.schedule(EventType::Apu(ApuEvent::Psg2Generate), 13); .push(EventType::Apu(ApuEvent::Psg2Generate), 13);
holder holder
.sched .sched
.schedule(EventType::Apu(ApuEvent::Psg4Generate), 72); .push(EventType::Apu(ApuEvent::Psg4Generate), 72);
println!("all events"); println!("all events");
for e in sched.events.iter() { for e in sched.events.iter() {
@ -199,7 +316,10 @@ mod test {
macro_rules! run_for { macro_rules! run_for {
($cycles:expr) => { ($cycles:expr) => {
println!("running the scheduler for {} cycles", $cycles); println!("running the scheduler for {} cycles", $cycles);
sched.run($cycles, &mut holder); sched.update($cycles);
while let Some((event, cycles_late)) = sched.pop_pending_event() {
holder.handle_event(event, cycles_late);
}
if (!sched.is_empty()) { if (!sched.is_empty()) {
println!( println!(
"cycles for next event: {}", "cycles for next event: {}",
@ -211,6 +331,7 @@ mod test {
run_for!(100); run_for!(100);
println!("{:?}", *sched);
assert_eq!( assert_eq!(
holder.is_event_done(EventType::Apu(ApuEvent::Psg1Generate)), holder.is_event_done(EventType::Apu(ApuEvent::Psg1Generate)),
true true

View file

@ -109,7 +109,7 @@ impl SoundController {
pub fn new(mut scheduler: SharedScheduler, audio_device_sample_rate: f32) -> SoundController { pub fn new(mut scheduler: SharedScheduler, audio_device_sample_rate: f32) -> SoundController {
let resampler = CosineResampler::new(32768_f32, audio_device_sample_rate); let resampler = CosineResampler::new(32768_f32, audio_device_sample_rate);
let cycles_per_sample = 512; let cycles_per_sample = 512;
scheduler.schedule(EventType::Apu(ApuEvent::Sample), cycles_per_sample); scheduler.push(EventType::Apu(ApuEvent::Sample), cycles_per_sample);
SoundController { SoundController {
scheduler, scheduler,
cycles_per_sample, cycles_per_sample,
@ -363,7 +363,7 @@ impl SoundController {
}); });
self.scheduler self.scheduler
.add_apu_event(ApuEvent::Sample, self.cycles_per_sample - extra_cycles); .push_apu_event(ApuEvent::Sample, self.cycles_per_sample - extra_cycles);
} }
pub fn on_event( pub fn on_event(

View file

@ -1,7 +1,6 @@
use std::fmt;
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use super::arm7tdmi::memory::*;
use super::bus::*; use super::bus::*;
use super::cartridge::Cartridge; use super::cartridge::Cartridge;
use super::dma::DmaNotifer; use super::dma::DmaNotifer;
@ -45,32 +44,6 @@ pub mod consts {
use consts::*; use consts::*;
#[derive(Debug, Copy, Clone)]
pub enum MemoryAccessType {
NonSeq,
Seq,
}
impl fmt::Display for MemoryAccessType {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(
f,
"{}",
match self {
MemoryAccessType::NonSeq => "N",
MemoryAccessType::Seq => "S",
}
)
}
}
#[derive(Debug, PartialEq, Copy, Clone)]
pub enum MemoryAccessWidth {
MemoryAccess8,
MemoryAccess16,
MemoryAccess32,
}
const CYCLE_LUT_SIZE: usize = 0x10; const CYCLE_LUT_SIZE: usize = 0x10;
#[derive(Serialize, Deserialize, Clone)] #[derive(Serialize, Deserialize, Clone)]
@ -261,33 +234,34 @@ impl SysBus {
pub fn on_waitcnt_written(&mut self, waitcnt: WaitControl) { pub fn on_waitcnt_written(&mut self, waitcnt: WaitControl) {
self.cycle_luts.update_gamepak_waitstates(waitcnt); self.cycle_luts.update_gamepak_waitstates(waitcnt);
} }
pub fn idle_cycle(&mut self) {
self.scheduler.update(1);
}
#[inline(always)] #[inline(always)]
pub fn get_cycles( pub fn add_cycles(&mut self, addr: Addr, access: MemoryAccess, width: MemoryAccessWidth) {
&self, use MemoryAccess::*;
addr: Addr,
access: MemoryAccessType,
width: MemoryAccessWidth,
) -> usize {
use MemoryAccessType::*;
use MemoryAccessWidth::*; use MemoryAccessWidth::*;
let page = (addr >> 24) as usize; let page = (addr >> 24) as usize;
// TODO optimize out by making the LUTs have 0x100 entries for each possible page ? // TODO optimize out by making the LUTs have 0x100 entries for each possible page ?
if page > 0xF { let cycles = if page > 0xF {
// open bus // open bus
return 1; 1
} } else {
match width { match width {
MemoryAccess8 | MemoryAccess16 => match access { MemoryAccess8 | MemoryAccess16 => match access {
NonSeq => self.cycle_luts.n_cycles16[page], NonSeq => self.cycle_luts.n_cycles16[page],
Seq => self.cycle_luts.s_cycles16[page], Seq => self.cycle_luts.s_cycles16[page],
}, },
MemoryAccess32 => match access { MemoryAccess32 => match access {
NonSeq => self.cycle_luts.n_cycles32[page], NonSeq => self.cycle_luts.n_cycles32[page],
Seq => self.cycle_luts.s_cycles32[page], Seq => self.cycle_luts.s_cycles32[page],
}, },
} }
};
self.scheduler.update(cycles);
} }
} }
@ -477,6 +451,49 @@ impl DebugRead for SysBus {
} }
} }
impl MemoryInterface for SysBus {
#[inline]
fn load_8(&mut self, addr: u32, access: MemoryAccess) -> u8 {
self.add_cycles(addr, access, MemoryAccessWidth::MemoryAccess8);
self.read_8(addr)
}
#[inline]
fn load_16(&mut self, addr: u32, access: MemoryAccess) -> u16 {
self.add_cycles(addr, access, MemoryAccessWidth::MemoryAccess16);
self.read_16(addr)
}
#[inline]
fn load_32(&mut self, addr: u32, access: MemoryAccess) -> u32 {
self.add_cycles(addr, access, MemoryAccessWidth::MemoryAccess32);
self.read_32(addr)
}
#[inline]
fn store_8(&mut self, addr: u32, value: u8, access: MemoryAccess) {
self.add_cycles(addr, access, MemoryAccessWidth::MemoryAccess8);
self.write_8(addr, value);
}
#[inline]
fn store_16(&mut self, addr: u32, value: u16, access: MemoryAccess) {
self.add_cycles(addr, access, MemoryAccessWidth::MemoryAccess8);
self.write_16(addr, value);
}
#[inline]
fn store_32(&mut self, addr: u32, value: u32, access: MemoryAccess) {
self.add_cycles(addr, access, MemoryAccessWidth::MemoryAccess8);
self.write_32(addr, value);
}
#[inline]
fn idle_cycle(&mut self) {
self.scheduler.update(1)
}
}
impl DmaNotifer for SysBus { impl DmaNotifer for SysBus {
fn notify(&mut self, timing: u16) { fn notify(&mut self, timing: u16) {
self.io.dmac.notify_from_gpu(timing); self.io.dmac.notify_from_gpu(timing);

View file

@ -1,6 +1,8 @@
use super::dma::DmaController;
use super::interrupt::{self, Interrupt, InterruptConnect, SharedInterruptFlags}; use super::interrupt::{self, Interrupt, InterruptConnect, SharedInterruptFlags};
use super::iodev::consts::*; use super::iodev::consts::*;
use super::sysbus::SysBus; use super::sched::{EventType, Scheduler, SharedScheduler};
use super::sound::SoundController;
use num::FromPrimitive; use num::FromPrimitive;
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
@ -14,10 +16,12 @@ pub struct Timer {
pub data: u16, pub data: u16,
pub initial_data: u16, pub initial_data: u16,
start_time: usize,
is_scheduled: bool,
irq: Interrupt, irq: Interrupt,
interrupt_flags: SharedInterruptFlags, interrupt_flags: SharedInterruptFlags,
timer_id: usize, timer_id: usize,
cycles: usize,
prescalar_shift: usize, prescalar_shift: usize,
} }
@ -33,8 +37,9 @@ impl Timer {
data: 0, data: 0,
ctl: TimerCtl(0), ctl: TimerCtl(0),
initial_data: 0, initial_data: 0,
cycles: 0,
prescalar_shift: 0, prescalar_shift: 0,
start_time: 0,
is_scheduled: false,
} }
} }
@ -43,6 +48,21 @@ impl Timer {
0x1_0000 - (self.data as u32) 0x1_0000 - (self.data as u32)
} }
#[inline]
fn sync_timer_data(&mut self, timestamp: usize) {
let ticks_passed = (timestamp - self.start_time) >> self.prescalar_shift;
self.data += ticks_passed as u16;
}
#[inline]
fn overflow(&mut self) {
// reload counter
self.data = self.initial_data;
if self.ctl.irq_enabled() {
interrupt::signal_irq(&self.interrupt_flags, self.irq);
}
}
/// increments the timer with an amount of ticks /// increments the timer with an amount of ticks
/// returns the number of times it overflowed /// returns the number of times it overflowed
fn update(&mut self, ticks: usize) -> usize { fn update(&mut self, ticks: usize) -> usize {
@ -73,6 +93,9 @@ impl Timer {
#[derive(Serialize, Deserialize, Clone, Debug)] #[derive(Serialize, Deserialize, Clone, Debug)]
pub struct Timers { pub struct Timers {
#[serde(skip)]
#[serde(default = "Scheduler::new_shared")]
scheduler: SharedScheduler,
timers: [Timer; 4], timers: [Timer; 4],
running_timers: u8, running_timers: u8,
pub trace: bool, pub trace: bool,
@ -100,8 +123,9 @@ impl std::ops::IndexMut<usize> for Timers {
} }
impl Timers { impl Timers {
pub fn new(interrupt_flags: SharedInterruptFlags) -> Timers { pub fn new(interrupt_flags: SharedInterruptFlags, scheduler: SharedScheduler) -> Timers {
Timers { Timers {
scheduler,
timers: [ timers: [
Timer::new(0, interrupt_flags.clone()), Timer::new(0, interrupt_flags.clone()),
Timer::new(1, interrupt_flags.clone()), Timer::new(1, interrupt_flags.clone()),
@ -113,18 +137,75 @@ impl Timers {
} }
} }
fn add_timer_event(&mut self, id: usize) {
let timer = &mut self.timers[id];
timer.is_scheduled = true;
timer.start_time = self.scheduler.timestamp();
let cycles = (timer.ticks_to_overflow() as usize) << timer.prescalar_shift;
self.scheduler
.push(EventType::TimerOverflow(id), cycles);
}
fn cancel_timer_event(&mut self, id: usize) {
self.scheduler.cancel(EventType::TimerOverflow(id));
self[id].is_scheduled = false;
}
fn handle_timer_overflow(
&mut self,
id: usize,
apu: &mut SoundController,
dmac: &mut DmaController,
) {
self[id].overflow();
if id != 3 {
let next_timer_id = id + 1;
let next_timer = &mut self.timers[next_timer_id];
if next_timer.ctl.cascade() {
if next_timer.update(1) > 0 {
drop(next_timer);
self.handle_timer_overflow(next_timer_id, apu, dmac);
}
}
}
if id == 0 || id == 1 {
apu.handle_timer_overflow(dmac, id, 1);
}
}
pub fn handle_overflow_event(
&mut self,
id: usize,
extra_cycles: usize,
apu: &mut SoundController,
dmac: &mut DmaController,
) {
self.handle_timer_overflow(id, apu, dmac);
// TODO: re-use add_timer_event function
let timer = &mut self.timers[id];
timer.is_scheduled = true;
timer.start_time = self.scheduler.timestamp() - extra_cycles;
let cycles = (timer.ticks_to_overflow() as usize) << timer.prescalar_shift;
self.scheduler
.push(EventType::TimerOverflow(id), cycles - extra_cycles);
}
pub fn write_timer_ctl(&mut self, id: usize, value: u16) { pub fn write_timer_ctl(&mut self, id: usize, value: u16) {
let timer = &mut self.timers[id];
let new_ctl = TimerCtl(value); let new_ctl = TimerCtl(value);
let old_enabled = self[id].ctl.enabled(); let old_enabled = timer.ctl.enabled();
let new_enabled = new_ctl.enabled(); let new_enabled = new_ctl.enabled();
let cascade = new_ctl.cascade(); let cascade = new_ctl.cascade();
self[id].cycles = 0; timer.prescalar_shift = SHIFT_LUT[new_ctl.prescalar() as usize];
self[id].prescalar_shift = SHIFT_LUT[new_ctl.prescalar() as usize]; timer.ctl = new_ctl;
self[id].ctl = new_ctl;
if new_enabled && !cascade { if new_enabled && !cascade {
self.running_timers |= 1 << id; self.running_timers |= 1 << id;
self.cancel_timer_event(id);
self.add_timer_event(id);
} else { } else {
self.running_timers &= !(1 << id); self.running_timers &= !(1 << id);
self.cancel_timer_event(id);
} }
if old_enabled != new_enabled { if old_enabled != new_enabled {
trace!( trace!(
@ -135,16 +216,28 @@ impl Timers {
} }
} }
pub fn handle_read(&self, io_addr: u32) -> u16 { #[inline]
fn read_timer_data(&mut self, id: usize) -> u16 {
let timer = &mut self.timers[id];
if timer.is_scheduled {
// this timer is controlled by the scheduler so we need to manually calculate
// the current value of the counter
timer.sync_timer_data(self.scheduler.timestamp());
}
timer.data
}
pub fn handle_read(&mut self, io_addr: u32) -> u16 {
match io_addr { match io_addr {
REG_TM0CNT_L => self.timers[0].data,
REG_TM0CNT_H => self.timers[0].ctl.0, REG_TM0CNT_H => self.timers[0].ctl.0,
REG_TM1CNT_L => self.timers[1].data,
REG_TM1CNT_H => self.timers[1].ctl.0, REG_TM1CNT_H => self.timers[1].ctl.0,
REG_TM2CNT_L => self.timers[2].data,
REG_TM2CNT_H => self.timers[2].ctl.0, REG_TM2CNT_H => self.timers[2].ctl.0,
REG_TM3CNT_L => self.timers[3].data,
REG_TM3CNT_H => self.timers[3].ctl.0, REG_TM3CNT_H => self.timers[3].ctl.0,
REG_TM0CNT_L => self.read_timer_data(0),
REG_TM1CNT_L => self.read_timer_data(1),
REG_TM2CNT_L => self.read_timer_data(2),
REG_TM3CNT_L => self.read_timer_data(3),
_ => unreachable!(), _ => unreachable!(),
} }
} }
@ -177,37 +270,6 @@ impl Timers {
_ => unreachable!(), _ => unreachable!(),
} }
} }
pub fn update(&mut self, cycles: usize, sb: &mut SysBus) {
for id in 0..4 {
if self.running_timers & (1 << id) == 0 {
continue;
}
if !self.timers[id].ctl.cascade() {
let timer = &mut self.timers[id];
let cycles = timer.cycles + cycles;
let inc = cycles >> timer.prescalar_shift;
let num_overflows = timer.update(inc);
timer.cycles = cycles & ((1 << timer.prescalar_shift) - 1);
if num_overflows > 0 {
if id != 3 {
let next_timer = &mut self.timers[id + 1];
if next_timer.ctl.cascade() {
next_timer.update(num_overflows);
}
}
if id == 0 || id == 1 {
let io = unsafe { sb.io.inner_unsafe() };
io.sound
.handle_timer_overflow(&mut io.dmac, id, num_overflows);
}
}
}
}
}
} }
bitfield! { bitfield! {