Everyday I'm ~~shuffeling~~ refactoring.
Some big refactors: * improve scheduler performance by using a BinaryHeap * refactor the scheduler API * arm7tdmi * Change struct arm7tdmi::Core struct layout so frequently accesses fields would benefit from CPU cache * Simplify and cleanup cycle counting by implementing a MemoryInterface trait * Still not passing many cycle accuracy tests, but I believe it's because I don't have the prefetch buffer yet. * Timer overflows are now scheduled * This fixes #111 and fixes #112 * Former-commit-id: 17989e841a1ea88c2a7e14f4c99b31790a43c023 Former-commit-id: 109d98d824a464de347f6590a6ffe9af86b4b4ea
This commit is contained in:
parent
85db28dac6
commit
b6e2d55550
|
@ -227,27 +227,10 @@ fn arm_format_to_handler(arm_fmt: &str) -> &'static str {
|
|||
}
|
||||
|
||||
fn generate_thumb_lut(file: &mut fs::File) -> Result<(), std::io::Error> {
|
||||
writeln!(file, "impl<I: MemoryInterface> Core<I> {{")?;
|
||||
writeln!(
|
||||
file,
|
||||
"/// This file is auto-generated from the build script
|
||||
|
||||
#[cfg(feature = \"debugger\")]
|
||||
use super::thumb::ThumbFormat;
|
||||
|
||||
pub type ThumbInstructionHandler = fn(&mut Core, &mut SysBus, insn: u16) -> CpuAction;
|
||||
|
||||
#[cfg_attr(not(feature = \"debugger\"), repr(transparent))]
|
||||
pub struct ThumbInstructionInfo {{
|
||||
pub handler_fn: ThumbInstructionHandler,
|
||||
#[cfg(feature = \"debugger\")]
|
||||
pub fmt: ThumbFormat,
|
||||
}}
|
||||
"
|
||||
)?;
|
||||
|
||||
writeln!(
|
||||
file,
|
||||
"pub const THUMB_LUT: [ThumbInstructionInfo; 1024] = ["
|
||||
" pub const THUMB_LUT: [ThumbInstructionInfo<I>; 1024] = ["
|
||||
)?;
|
||||
|
||||
for i in 0..1024 {
|
||||
|
@ -255,56 +238,44 @@ pub struct ThumbInstructionInfo {{
|
|||
let handler_name = thumb_format_to_handler(thumb_fmt);
|
||||
writeln!(
|
||||
file,
|
||||
" /* {:#x} */
|
||||
ThumbInstructionInfo {{
|
||||
handler_fn: Core::{},
|
||||
#[cfg(feature = \"debugger\")]
|
||||
fmt: ThumbFormat::{},
|
||||
}},",
|
||||
" /* {:#x} */
|
||||
ThumbInstructionInfo {{
|
||||
handler_fn: Core::{},
|
||||
#[cfg(feature = \"debugger\")]
|
||||
fmt: ThumbFormat::{},
|
||||
}},",
|
||||
i, handler_name, thumb_fmt
|
||||
)?;
|
||||
}
|
||||
|
||||
writeln!(file, "];")?;
|
||||
writeln!(file, " ];")?;
|
||||
writeln!(file, "}}")?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn generate_arm_lut(file: &mut fs::File) -> Result<(), std::io::Error> {
|
||||
writeln!(file, "impl<I: MemoryInterface> Core<I> {{")?;
|
||||
writeln!(
|
||||
file,
|
||||
"/// This file is auto-generated from the build script
|
||||
|
||||
#[cfg(feature = \"debugger\")]
|
||||
use super::arm::ArmFormat;
|
||||
|
||||
pub type ArmInstructionHandler = fn(&mut Core, &mut SysBus, insn: u32) -> CpuAction;
|
||||
|
||||
#[cfg_attr(not(feature = \"debugger\"), repr(transparent))]
|
||||
pub struct ArmInstructionInfo {{
|
||||
pub handler_fn: ArmInstructionHandler,
|
||||
#[cfg(feature = \"debugger\")]
|
||||
pub fmt: ArmFormat,
|
||||
}}
|
||||
"
|
||||
" pub const ARM_LUT: [ArmInstructionInfo<I>; 4096] = ["
|
||||
)?;
|
||||
|
||||
writeln!(file, "pub const ARM_LUT: [ArmInstructionInfo; 4096] = [")?;
|
||||
for i in 0..4096 {
|
||||
let arm_fmt = arm_decode(((i & 0xff0) << 16) | ((i & 0x00f) << 4));
|
||||
let handler_name = arm_format_to_handler(arm_fmt);
|
||||
writeln!(
|
||||
file,
|
||||
" /* {:#x} */
|
||||
ArmInstructionInfo {{
|
||||
handler_fn: Core::{},
|
||||
#[cfg(feature = \"debugger\")]
|
||||
fmt: ArmFormat::{},
|
||||
}} ,",
|
||||
" /* {:#x} */
|
||||
ArmInstructionInfo {{
|
||||
handler_fn: Core::{},
|
||||
#[cfg(feature = \"debugger\")]
|
||||
fmt: ArmFormat::{},
|
||||
}} ,",
|
||||
i, handler_name, arm_fmt
|
||||
)?;
|
||||
}
|
||||
writeln!(file, "];")?;
|
||||
writeln!(file, " ];")?;
|
||||
writeln!(file, "}}")?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
use bit::BitIndex;
|
||||
|
||||
use super::memory::MemoryInterface;
|
||||
use super::{Core, REG_PC};
|
||||
|
||||
#[derive(Debug, Primitive, PartialEq)]
|
||||
|
@ -109,7 +110,7 @@ impl BarrelShifterValue {
|
|||
}
|
||||
}
|
||||
|
||||
impl Core {
|
||||
impl<I: MemoryInterface> Core<I> {
|
||||
pub fn lsl(&mut self, val: u32, amount: u32, carry_in: bool) -> u32 {
|
||||
match amount {
|
||||
0 => {
|
||||
|
@ -215,6 +216,7 @@ impl Core {
|
|||
}
|
||||
|
||||
/// Performs a generic barrel shifter operation
|
||||
#[inline]
|
||||
pub fn barrel_shift_op(
|
||||
&mut self,
|
||||
shift: BarrelShiftOpCode,
|
||||
|
@ -253,6 +255,7 @@ impl Core {
|
|||
}
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn shift_by_register(
|
||||
&mut self,
|
||||
bs_op: BarrelShiftOpCode,
|
||||
|
@ -261,7 +264,6 @@ impl Core {
|
|||
carry: bool,
|
||||
) -> u32 {
|
||||
let mut val = self.get_reg(reg);
|
||||
self.add_cycle(); // +1I
|
||||
if reg == REG_PC {
|
||||
val += 4; // PC prefetching
|
||||
}
|
||||
|
|
|
@ -4,37 +4,36 @@ use super::super::alu::*;
|
|||
use crate::arm7tdmi::psr::RegPSR;
|
||||
use crate::arm7tdmi::CpuAction;
|
||||
use crate::arm7tdmi::{Addr, Core, CpuMode, CpuState, REG_LR, REG_PC};
|
||||
use crate::sysbus::SysBus;
|
||||
use crate::Bus;
|
||||
|
||||
use super::super::memory::{MemoryAccess, MemoryInterface};
|
||||
use MemoryAccess::*;
|
||||
|
||||
use super::ArmDecodeHelper;
|
||||
use super::*;
|
||||
|
||||
impl Core {
|
||||
impl<I: MemoryInterface> Core<I> {
|
||||
#[cfg(not(feature = "arm7tdmi_dispatch_table"))]
|
||||
pub fn exec_arm(&mut self, bus: &mut SysBus, insn: u32, fmt: ArmFormat) -> CpuAction {
|
||||
pub fn exec_arm(&mut self, insn: u32, fmt: ArmFormat) -> CpuAction {
|
||||
match fmt {
|
||||
ArmFormat::BranchExchange => self.exec_arm_bx(bus, insn),
|
||||
ArmFormat::BranchLink => self.exec_arm_b_bl(bus, insn),
|
||||
ArmFormat::DataProcessing => self.exec_arm_data_processing(bus, insn),
|
||||
ArmFormat::SoftwareInterrupt => self.exec_arm_swi(bus, insn),
|
||||
ArmFormat::SingleDataTransfer => self.exec_arm_ldr_str(bus, insn),
|
||||
ArmFormat::HalfwordDataTransferImmediateOffset => {
|
||||
self.exec_arm_ldr_str_hs_imm(bus, insn)
|
||||
}
|
||||
ArmFormat::HalfwordDataTransferRegOffset => self.exec_arm_ldr_str_hs_reg(bus, insn),
|
||||
ArmFormat::BlockDataTransfer => self.exec_arm_ldm_stm(bus, insn),
|
||||
ArmFormat::MoveFromStatus => self.exec_arm_mrs(bus, insn),
|
||||
ArmFormat::MoveToStatus => self.exec_arm_transfer_to_status(bus, insn),
|
||||
ArmFormat::MoveToFlags => self.exec_arm_transfer_to_status(bus, insn),
|
||||
ArmFormat::Multiply => self.exec_arm_mul_mla(bus, insn),
|
||||
ArmFormat::MultiplyLong => self.exec_arm_mull_mlal(bus, insn),
|
||||
ArmFormat::SingleDataSwap => self.exec_arm_swp(bus, insn),
|
||||
ArmFormat::Undefined => self.arm_undefined(bus, insn),
|
||||
ArmFormat::BranchExchange => self.exec_arm_bx(insn),
|
||||
ArmFormat::BranchLink => self.exec_arm_b_bl(insn),
|
||||
ArmFormat::DataProcessing => self.exec_arm_data_processing(insn),
|
||||
ArmFormat::SoftwareInterrupt => self.exec_arm_swi(insn),
|
||||
ArmFormat::SingleDataTransfer => self.exec_arm_ldr_str(insn),
|
||||
ArmFormat::HalfwordDataTransferImmediateOffset => self.exec_arm_ldr_str_hs_imm(insn),
|
||||
ArmFormat::HalfwordDataTransferRegOffset => self.exec_arm_ldr_str_hs_reg(insn),
|
||||
ArmFormat::BlockDataTransfer => self.exec_arm_ldm_stm(insn),
|
||||
ArmFormat::MoveFromStatus => self.exec_arm_mrs(insn),
|
||||
ArmFormat::MoveToStatus => self.exec_arm_transfer_to_status(insn),
|
||||
ArmFormat::MoveToFlags => self.exec_arm_transfer_to_status(insn),
|
||||
ArmFormat::Multiply => self.exec_arm_mul_mla(insn),
|
||||
ArmFormat::MultiplyLong => self.exec_arm_mull_mlal(insn),
|
||||
ArmFormat::SingleDataSwap => self.exec_arm_swp(insn),
|
||||
ArmFormat::Undefined => self.arm_undefined(insn),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn arm_undefined(&mut self, _: &mut SysBus, insn: u32) -> CpuAction {
|
||||
pub fn arm_undefined(&mut self, insn: u32) -> CpuAction {
|
||||
panic!(
|
||||
"executing undefined arm instruction {:08x} at @{:08x}",
|
||||
insn,
|
||||
|
@ -42,63 +41,51 @@ impl Core {
|
|||
)
|
||||
}
|
||||
|
||||
/// Cycles 2S+1N
|
||||
pub fn exec_arm_b_bl(&mut self, sb: &mut SysBus, insn: u32) -> CpuAction {
|
||||
self.S_cycle32(sb, self.pc);
|
||||
/// Branch and Branch with Link (B, BL)
|
||||
/// Execution Time: 2S + 1N
|
||||
pub fn exec_arm_b_bl(&mut self, insn: u32) -> CpuAction {
|
||||
if insn.link_flag() {
|
||||
self.set_reg(REG_LR, (self.pc_arm() + (self.word_size() as u32)) & !0b1);
|
||||
}
|
||||
|
||||
self.pc = (self.pc as i32).wrapping_add(insn.branch_offset()) as u32 & !1;
|
||||
|
||||
self.reload_pipeline32(sb);
|
||||
CpuAction::FlushPipeline
|
||||
self.reload_pipeline32(); // Implies 2S + 1N
|
||||
CpuAction::PipelineFlushed
|
||||
}
|
||||
|
||||
pub fn branch_exchange(&mut self, sb: &mut SysBus, mut addr: Addr) -> CpuAction {
|
||||
match self.cpsr.state() {
|
||||
CpuState::ARM => self.S_cycle32(sb, self.pc),
|
||||
CpuState::THUMB => self.S_cycle16(sb, self.pc),
|
||||
}
|
||||
pub fn branch_exchange(&mut self, mut addr: Addr) -> CpuAction {
|
||||
if addr.bit(0) {
|
||||
addr = addr & !0x1;
|
||||
self.cpsr.set_state(CpuState::THUMB);
|
||||
self.pc = addr;
|
||||
self.reload_pipeline16(sb);
|
||||
self.reload_pipeline16();
|
||||
} else {
|
||||
addr = addr & !0x3;
|
||||
self.cpsr.set_state(CpuState::ARM);
|
||||
self.pc = addr;
|
||||
self.reload_pipeline32(sb);
|
||||
self.reload_pipeline32();
|
||||
}
|
||||
|
||||
CpuAction::FlushPipeline
|
||||
CpuAction::PipelineFlushed
|
||||
}
|
||||
|
||||
/// Branch and Exchange (BX)
|
||||
/// Cycles 2S+1N
|
||||
pub fn exec_arm_bx(&mut self, sb: &mut SysBus, insn: u32) -> CpuAction {
|
||||
self.branch_exchange(sb, self.get_reg(insn.bit_range(0..4) as usize))
|
||||
pub fn exec_arm_bx(&mut self, insn: u32) -> CpuAction {
|
||||
self.branch_exchange(self.get_reg(insn.bit_range(0..4) as usize))
|
||||
}
|
||||
|
||||
fn move_from_status_register(
|
||||
&mut self,
|
||||
sb: &mut SysBus,
|
||||
rd: usize,
|
||||
is_spsr: bool,
|
||||
) -> CpuAction {
|
||||
let result = if is_spsr {
|
||||
/// Move from status register
|
||||
/// 1S
|
||||
pub fn exec_arm_mrs(&mut self, insn: u32) -> CpuAction {
|
||||
let rd = insn.bit_range(12..16) as usize;
|
||||
let result = if insn.spsr_flag() {
|
||||
self.spsr.get()
|
||||
} else {
|
||||
self.cpsr.get()
|
||||
};
|
||||
self.set_reg(rd, result);
|
||||
self.S_cycle32(sb, self.pc);
|
||||
|
||||
CpuAction::AdvancePC
|
||||
}
|
||||
|
||||
pub fn exec_arm_mrs(&mut self, sb: &mut SysBus, insn: u32) -> CpuAction {
|
||||
self.move_from_status_register(sb, insn.bit_range(12..16) as usize, insn.spsr_flag())
|
||||
CpuAction::AdvancePC(Seq)
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
|
@ -112,8 +99,9 @@ impl Core {
|
|||
}
|
||||
}
|
||||
|
||||
// #[cfg(feature = "arm7tdmi_dispatch_table")]
|
||||
pub fn exec_arm_transfer_to_status(&mut self, sb: &mut SysBus, insn: u32) -> CpuAction {
|
||||
/// Move to status register
|
||||
/// 1S
|
||||
pub fn exec_arm_transfer_to_status(&mut self, insn: u32) -> CpuAction {
|
||||
let value = self.decode_msr_param(insn);
|
||||
|
||||
let f = insn.bit(19);
|
||||
|
@ -158,9 +146,8 @@ impl Core {
|
|||
}
|
||||
}
|
||||
}
|
||||
self.S_cycle32(sb, self.pc);
|
||||
|
||||
CpuAction::AdvancePC
|
||||
CpuAction::AdvancePC(Seq)
|
||||
}
|
||||
|
||||
fn transfer_spsr_mode(&mut self) {
|
||||
|
@ -175,11 +162,9 @@ impl Core {
|
|||
///
|
||||
/// Cycles: 1S+x+y (from GBATEK)
|
||||
/// Add x=1I cycles if Op2 shifted-by-register. Add y=1S+1N cycles if Rd=R15.
|
||||
pub fn exec_arm_data_processing(&mut self, sb: &mut SysBus, insn: u32) -> CpuAction {
|
||||
pub fn exec_arm_data_processing(&mut self, insn: u32) -> CpuAction {
|
||||
use AluOpCode::*;
|
||||
|
||||
self.S_cycle32(sb, self.pc);
|
||||
|
||||
let rn = insn.bit_range(16..20) as usize;
|
||||
let rd = insn.bit_range(12..16) as usize;
|
||||
let mut op1 = if rn == REG_PC {
|
||||
|
@ -204,7 +189,7 @@ impl Core {
|
|||
if rn == REG_PC {
|
||||
op1 += 4;
|
||||
}
|
||||
|
||||
self.idle_cycle();
|
||||
let rs = insn.bit_range(8..12) as usize;
|
||||
ShiftRegisterBy::ByRegister(rs)
|
||||
} else {
|
||||
|
@ -270,16 +255,16 @@ impl Core {
|
|||
})
|
||||
};
|
||||
|
||||
let mut result = CpuAction::AdvancePC;
|
||||
let mut result = CpuAction::AdvancePC(Seq);
|
||||
if let Some(alu_res) = alu_res {
|
||||
self.set_reg(rd, alu_res as u32);
|
||||
if rd == REG_PC {
|
||||
// T bit might have changed
|
||||
match self.cpsr.state() {
|
||||
CpuState::ARM => self.reload_pipeline32(sb),
|
||||
CpuState::THUMB => self.reload_pipeline16(sb),
|
||||
CpuState::ARM => self.reload_pipeline32(),
|
||||
CpuState::THUMB => self.reload_pipeline16(),
|
||||
};
|
||||
result = CpuAction::FlushPipeline;
|
||||
result = CpuAction::PipelineFlushed;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -293,8 +278,8 @@ impl Core {
|
|||
/// STR{cond}{B}{T} Rd,<Address> | 2N | ---- | [Rn+/-<offset>]=Rd
|
||||
/// ------------------------------------------------------------------------------
|
||||
/// For LDR, add y=1S+1N if Rd=R15.
|
||||
pub fn exec_arm_ldr_str(&mut self, sb: &mut SysBus, insn: u32) -> CpuAction {
|
||||
let mut result = CpuAction::AdvancePC;
|
||||
pub fn exec_arm_ldr_str(&mut self, insn: u32) -> CpuAction {
|
||||
let mut result = CpuAction::AdvancePC(NonSeq);
|
||||
|
||||
let load = insn.load_flag();
|
||||
let pre_index = insn.pre_index_flag();
|
||||
|
@ -305,7 +290,7 @@ impl Core {
|
|||
if base_reg == REG_PC {
|
||||
addr = self.pc_arm() + 8; // prefetching
|
||||
}
|
||||
let offset = self.get_barrel_shifted_value(&insn.ldr_str_offset());
|
||||
let offset = self.get_barrel_shifted_value(&insn.ldr_str_offset()); // TODO: wrong to use in here
|
||||
let effective_addr = (addr as i32).wrapping_add(offset as i32) as Addr;
|
||||
|
||||
// TODO - confirm this
|
||||
|
@ -321,23 +306,20 @@ impl Core {
|
|||
};
|
||||
|
||||
if load {
|
||||
self.S_cycle32(sb, self.pc);
|
||||
let data = if insn.transfer_size() == 1 {
|
||||
self.N_cycle8(sb, addr);
|
||||
sb.read_8(addr) as u32
|
||||
self.load_8(addr, NonSeq) as u32
|
||||
} else {
|
||||
self.N_cycle32(sb, addr);
|
||||
self.ldr_word(addr, sb)
|
||||
self.ldr_word(addr, NonSeq)
|
||||
};
|
||||
|
||||
self.set_reg(dest_reg, data);
|
||||
|
||||
// +1I
|
||||
self.add_cycle();
|
||||
self.idle_cycle();
|
||||
|
||||
if dest_reg == REG_PC {
|
||||
self.reload_pipeline32(sb);
|
||||
result = CpuAction::FlushPipeline;
|
||||
self.reload_pipeline32();
|
||||
result = CpuAction::PipelineFlushed;
|
||||
}
|
||||
} else {
|
||||
let value = if dest_reg == REG_PC {
|
||||
|
@ -346,13 +328,10 @@ impl Core {
|
|||
self.get_reg(dest_reg)
|
||||
};
|
||||
if insn.transfer_size() == 1 {
|
||||
self.N_cycle8(sb, addr);
|
||||
self.write_8(addr, value as u8, sb);
|
||||
self.store_8(addr, value as u8, NonSeq);
|
||||
} else {
|
||||
self.N_cycle32(sb, addr);
|
||||
self.write_32(addr & !0x3, value, sb);
|
||||
self.store_aligned_32(addr & !0x3, value, NonSeq);
|
||||
};
|
||||
self.N_cycle32(sb, self.pc);
|
||||
}
|
||||
|
||||
if !load || base_reg != dest_reg {
|
||||
|
@ -370,9 +349,8 @@ impl Core {
|
|||
result
|
||||
}
|
||||
|
||||
pub fn exec_arm_ldr_str_hs_reg(&mut self, sb: &mut SysBus, insn: u32) -> CpuAction {
|
||||
pub fn exec_arm_ldr_str_hs_reg(&mut self, insn: u32) -> CpuAction {
|
||||
self.ldr_str_hs(
|
||||
sb,
|
||||
insn,
|
||||
BarrelShifterValue::ShiftedRegister(ShiftedRegister {
|
||||
reg: (insn & 0xf) as usize,
|
||||
|
@ -383,24 +361,19 @@ impl Core {
|
|||
)
|
||||
}
|
||||
|
||||
pub fn exec_arm_ldr_str_hs_imm(&mut self, sb: &mut SysBus, insn: u32) -> CpuAction {
|
||||
pub fn exec_arm_ldr_str_hs_imm(&mut self, insn: u32) -> CpuAction {
|
||||
let offset8 = (insn.bit_range(8..12) << 4) + insn.bit_range(0..4);
|
||||
let offset8 = if insn.add_offset_flag() {
|
||||
offset8
|
||||
} else {
|
||||
(-(offset8 as i32)) as u32
|
||||
};
|
||||
self.ldr_str_hs(sb, insn, BarrelShifterValue::ImmediateValue(offset8))
|
||||
self.ldr_str_hs(insn, BarrelShifterValue::ImmediateValue(offset8))
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
pub fn ldr_str_hs(
|
||||
&mut self,
|
||||
sb: &mut SysBus,
|
||||
insn: u32,
|
||||
offset: BarrelShifterValue,
|
||||
) -> CpuAction {
|
||||
let mut result = CpuAction::AdvancePC;
|
||||
pub fn ldr_str_hs(&mut self, insn: u32, offset: BarrelShifterValue) -> CpuAction {
|
||||
let mut result = CpuAction::AdvancePC(NonSeq);
|
||||
|
||||
let load = insn.load_flag();
|
||||
let pre_index = insn.pre_index_flag();
|
||||
|
@ -428,30 +401,20 @@ impl Core {
|
|||
};
|
||||
|
||||
if load {
|
||||
self.S_cycle32(sb, self.pc);
|
||||
let data = match insn.halfword_data_transfer_type() {
|
||||
ArmHalfwordTransferType::SignedByte => {
|
||||
self.N_cycle8(sb, addr);
|
||||
sb.read_8(addr) as u8 as i8 as u32
|
||||
}
|
||||
ArmHalfwordTransferType::SignedHalfwords => {
|
||||
self.N_cycle16(sb, addr);
|
||||
self.ldr_sign_half(addr, sb)
|
||||
}
|
||||
ArmHalfwordTransferType::UnsignedHalfwords => {
|
||||
self.N_cycle16(sb, addr);
|
||||
self.ldr_half(addr, sb)
|
||||
}
|
||||
ArmHalfwordTransferType::SignedByte => self.load_8(addr, NonSeq) as u8 as i8 as u32,
|
||||
ArmHalfwordTransferType::SignedHalfwords => self.ldr_sign_half(addr, NonSeq),
|
||||
ArmHalfwordTransferType::UnsignedHalfwords => self.ldr_half(addr, NonSeq),
|
||||
};
|
||||
|
||||
self.set_reg(dest_reg, data);
|
||||
|
||||
// +1I
|
||||
self.add_cycle();
|
||||
self.idle_cycle();
|
||||
|
||||
if dest_reg == REG_PC {
|
||||
self.reload_pipeline32(sb);
|
||||
result = CpuAction::FlushPipeline;
|
||||
self.reload_pipeline32();
|
||||
result = CpuAction::PipelineFlushed;
|
||||
}
|
||||
} else {
|
||||
let value = if dest_reg == REG_PC {
|
||||
|
@ -462,9 +425,7 @@ impl Core {
|
|||
|
||||
match insn.halfword_data_transfer_type() {
|
||||
ArmHalfwordTransferType::UnsignedHalfwords => {
|
||||
self.N_cycle32(sb, addr);
|
||||
self.write_16(addr, value as u16, sb);
|
||||
self.N_cycle32(sb, self.pc);
|
||||
self.store_aligned_16(addr, value as u16, NonSeq);
|
||||
}
|
||||
_ => panic!("invalid HS flags for L=0"),
|
||||
};
|
||||
|
@ -481,8 +442,8 @@ impl Core {
|
|||
result
|
||||
}
|
||||
|
||||
pub fn exec_arm_ldm_stm(&mut self, sb: &mut SysBus, insn: u32) -> CpuAction {
|
||||
let mut result = CpuAction::AdvancePC;
|
||||
pub fn exec_arm_ldm_stm(&mut self, insn: u32) -> CpuAction {
|
||||
let mut result = CpuAction::AdvancePC(NonSeq);
|
||||
|
||||
let mut full = insn.pre_index_flag();
|
||||
let ascending = insn.add_offset_flag();
|
||||
|
@ -537,8 +498,7 @@ impl Core {
|
|||
|
||||
if rlist != 0 {
|
||||
if is_load {
|
||||
self.add_cycle();
|
||||
self.N_cycle32(sb, self.pc);
|
||||
let mut access = NonSeq;
|
||||
for r in 0..16 {
|
||||
if rlist.bit(r) {
|
||||
if r == base_reg {
|
||||
|
@ -547,27 +507,25 @@ impl Core {
|
|||
if full {
|
||||
addr = addr.wrapping_add(4);
|
||||
}
|
||||
|
||||
let val = sb.read_32(addr);
|
||||
self.S_cycle32(sb, self.pc);
|
||||
|
||||
let val = self.load_32(addr, access);
|
||||
access = Seq;
|
||||
self.set_reg(r, val);
|
||||
|
||||
if r == REG_PC {
|
||||
if psr_transfer {
|
||||
self.transfer_spsr_mode();
|
||||
}
|
||||
self.reload_pipeline32(sb);
|
||||
result = CpuAction::FlushPipeline;
|
||||
self.reload_pipeline32();
|
||||
result = CpuAction::PipelineFlushed;
|
||||
}
|
||||
|
||||
if !full {
|
||||
addr = addr.wrapping_add(4);
|
||||
}
|
||||
}
|
||||
}
|
||||
self.idle_cycle();
|
||||
} else {
|
||||
let mut first = true;
|
||||
let mut access = NonSeq;
|
||||
for r in 0..16 {
|
||||
if rlist.bit(r) {
|
||||
let val = if r != base_reg {
|
||||
|
@ -593,27 +551,22 @@ impl Core {
|
|||
addr = addr.wrapping_add(4);
|
||||
}
|
||||
|
||||
if first {
|
||||
self.N_cycle32(sb, addr);
|
||||
first = false;
|
||||
} else {
|
||||
self.S_cycle32(sb, addr);
|
||||
}
|
||||
self.write_32(addr, val, sb);
|
||||
first = false;
|
||||
|
||||
self.store_aligned_32(addr, val, access);
|
||||
access = Seq;
|
||||
if !full {
|
||||
addr = addr.wrapping_add(4);
|
||||
}
|
||||
}
|
||||
}
|
||||
self.N_cycle32(sb, self.pc);
|
||||
}
|
||||
} else {
|
||||
if is_load {
|
||||
let val = self.ldr_word(addr, sb);
|
||||
let val = self.ldr_word(addr, NonSeq);
|
||||
self.set_reg(REG_PC, val & !3);
|
||||
self.reload_pipeline32(sb);
|
||||
result = CpuAction::FlushPipeline;
|
||||
self.reload_pipeline32();
|
||||
result = CpuAction::PipelineFlushed;
|
||||
} else {
|
||||
// block data store with empty rlist
|
||||
let addr = match (ascending, full) {
|
||||
|
@ -622,7 +575,7 @@ impl Core {
|
|||
(true, false) => addr,
|
||||
(true, true) => addr.wrapping_add(4),
|
||||
};
|
||||
self.write_32(addr, self.pc + 4, sb);
|
||||
self.store_aligned_32(addr, self.pc + 4, NonSeq);
|
||||
}
|
||||
addr = if ascending {
|
||||
addr.wrapping_add(0x40)
|
||||
|
@ -642,7 +595,9 @@ impl Core {
|
|||
result
|
||||
}
|
||||
|
||||
pub fn exec_arm_mul_mla(&mut self, sb: &mut SysBus, insn: u32) -> CpuAction {
|
||||
/// Multiply and Multiply-Accumulate (MUL, MLA)
|
||||
/// Execution Time: 1S+mI for MUL, and 1S+(m+1)I for MLA.
|
||||
pub fn exec_arm_mul_mla(&mut self, insn: u32) -> CpuAction {
|
||||
let rd = insn.bit_range(16..20) as usize;
|
||||
let rn = insn.bit_range(12..16) as usize;
|
||||
let rs = insn.rs();
|
||||
|
@ -658,14 +613,14 @@ impl Core {
|
|||
|
||||
if insn.accumulate_flag() {
|
||||
result = result.wrapping_add(self.get_reg(rn));
|
||||
self.add_cycle();
|
||||
self.idle_cycle();
|
||||
}
|
||||
|
||||
self.set_reg(rd, result);
|
||||
|
||||
let m = self.get_required_multipiler_array_cycles(op2);
|
||||
for _ in 0..m {
|
||||
self.add_cycle();
|
||||
self.idle_cycle();
|
||||
}
|
||||
|
||||
if insn.set_cond_flag() {
|
||||
|
@ -675,12 +630,12 @@ impl Core {
|
|||
self.cpsr.set_V(false);
|
||||
}
|
||||
|
||||
self.S_cycle32(sb, self.pc);
|
||||
|
||||
CpuAction::AdvancePC
|
||||
CpuAction::AdvancePC(Seq)
|
||||
}
|
||||
|
||||
pub fn exec_arm_mull_mlal(&mut self, sb: &mut SysBus, insn: u32) -> CpuAction {
|
||||
/// Multiply Long and Multiply-Accumulate Long (MULL, MLAL)
|
||||
/// Execution Time: 1S+(m+1)I for MULL, and 1S+(m+2)I for MLAL
|
||||
pub fn exec_arm_mull_mlal(&mut self, insn: u32) -> CpuAction {
|
||||
let rd_hi = insn.rd_hi();
|
||||
let rd_lo = insn.rd_lo();
|
||||
let rs = insn.rs();
|
||||
|
@ -694,21 +649,18 @@ impl Core {
|
|||
} else {
|
||||
(op1 as u64).wrapping_mul(op2 as u64)
|
||||
};
|
||||
self.add_cycle();
|
||||
|
||||
if insn.accumulate_flag() {
|
||||
let hi = self.get_reg(rd_hi) as u64;
|
||||
let lo = self.get_reg(rd_lo) as u64;
|
||||
result = result.wrapping_add(hi << 32 | lo);
|
||||
self.add_cycle();
|
||||
self.idle_cycle();
|
||||
}
|
||||
|
||||
self.set_reg(rd_hi, (result >> 32) as i32 as u32);
|
||||
self.set_reg(rd_lo, (result & 0xffffffff) as i32 as u32);
|
||||
|
||||
self.idle_cycle();
|
||||
let m = self.get_required_multipiler_array_cycles(self.get_reg(rs));
|
||||
for _ in 0..m {
|
||||
self.add_cycle();
|
||||
self.idle_cycle();
|
||||
}
|
||||
|
||||
if insn.set_cond_flag() {
|
||||
|
@ -718,35 +670,32 @@ impl Core {
|
|||
self.cpsr.set_V(false);
|
||||
}
|
||||
|
||||
self.S_cycle32(sb, self.pc);
|
||||
|
||||
CpuAction::AdvancePC
|
||||
CpuAction::AdvancePC(Seq)
|
||||
}
|
||||
|
||||
pub fn exec_arm_swp(&mut self, sb: &mut SysBus, insn: u32) -> CpuAction {
|
||||
/// ARM Opcodes: Memory: Single Data Swap (SWP)
|
||||
/// Execution Time: 1S+2N+1I. That is, 2N data cycles, 1S code cycle, plus 1I.
|
||||
pub fn exec_arm_swp(&mut self, insn: u32) -> CpuAction {
|
||||
let base_addr = self.get_reg(insn.bit_range(16..20) as usize);
|
||||
let rd = insn.bit_range(12..16) as usize;
|
||||
if insn.transfer_size() == 1 {
|
||||
let t = sb.read_8(base_addr);
|
||||
self.N_cycle8(sb, base_addr);
|
||||
sb.write_8(base_addr, self.get_reg(insn.rm()) as u8);
|
||||
self.S_cycle8(sb, base_addr);
|
||||
let t = self.load_8(base_addr, NonSeq);
|
||||
self.store_8(base_addr, self.get_reg(insn.rm()) as u8, Seq);
|
||||
self.set_reg(rd, t as u32);
|
||||
} else {
|
||||
let t = self.ldr_word(base_addr, sb);
|
||||
self.N_cycle32(sb, base_addr);
|
||||
self.write_32(base_addr, self.get_reg(insn.rm()), sb);
|
||||
self.S_cycle32(sb, base_addr);
|
||||
let t = self.ldr_word(base_addr, NonSeq);
|
||||
self.store_aligned_32(base_addr, self.get_reg(insn.rm()), Seq);
|
||||
self.set_reg(rd, t as u32);
|
||||
}
|
||||
self.add_cycle();
|
||||
self.N_cycle32(sb, self.pc);
|
||||
self.idle_cycle();
|
||||
|
||||
CpuAction::AdvancePC
|
||||
CpuAction::AdvancePC(NonSeq)
|
||||
}
|
||||
|
||||
pub fn exec_arm_swi(&mut self, sb: &mut SysBus, insn: u32) -> CpuAction {
|
||||
self.software_interrupt(sb, self.pc - 4, insn.swi_comment());
|
||||
CpuAction::FlushPipeline
|
||||
/// ARM Software Interrupt
|
||||
/// Execution Time: 2S+1N
|
||||
pub fn exec_arm_swi(&mut self, insn: u32) -> CpuAction {
|
||||
self.software_interrupt(self.pc - 4, insn.swi_comment()); // Implies 2S + 1N
|
||||
CpuAction::PipelineFlushed
|
||||
}
|
||||
}
|
||||
|
|
|
@ -2,15 +2,37 @@ use serde::{Deserialize, Serialize};
|
|||
|
||||
pub use super::exception::Exception;
|
||||
|
||||
use super::CpuAction;
|
||||
use super::{psr::RegPSR, Addr, CpuMode, CpuState, arm::ArmCond};
|
||||
use super::{arm::ArmCond, psr::RegPSR, Addr, CpuMode, CpuState};
|
||||
|
||||
use crate::util::Shared;
|
||||
|
||||
use super::memory::{MemoryAccess, MemoryInterface};
|
||||
use MemoryAccess::*;
|
||||
|
||||
use cfg_if::cfg_if;
|
||||
|
||||
cfg_if! {
|
||||
if #[cfg(feature = "arm7tdmi_dispatch_table")] {
|
||||
// Include files that are auto-generated by the build script
|
||||
// See `build.rs`
|
||||
include!(concat!(env!("OUT_DIR"), "/arm_lut.rs"));
|
||||
include!(concat!(env!("OUT_DIR"), "/thumb_lut.rs"));
|
||||
|
||||
#[cfg(feature = "debugger")]
|
||||
use super::thumb::ThumbFormat;
|
||||
|
||||
#[cfg(feature = "debugger")]
|
||||
use super::arm::ArmFormat;
|
||||
|
||||
#[cfg_attr(not(feature = "debugger"), repr(transparent))]
|
||||
pub struct ThumbInstructionInfo<I: MemoryInterface> {
|
||||
pub handler_fn: fn(&mut Core<I>, insn: u16) -> CpuAction,
|
||||
#[cfg(feature = "debugger")]
|
||||
pub fmt: ThumbFormat,
|
||||
}
|
||||
|
||||
#[cfg_attr(not(feature = "debugger"), repr(transparent))]
|
||||
pub struct ArmInstructionInfo<I: MemoryInterface> {
|
||||
pub handler_fn: fn(&mut Core<I>, insn: u32) -> CpuAction,
|
||||
#[cfg(feature = "debugger")]
|
||||
pub fmt: ArmFormat,
|
||||
}
|
||||
} else {
|
||||
use super::arm::ArmFormat;
|
||||
use super::thumb::ThumbFormat;
|
||||
|
@ -31,59 +53,160 @@ cfg_if! {
|
|||
}
|
||||
}
|
||||
|
||||
use crate::bus::Bus;
|
||||
use crate::sysbus::{MemoryAccessType::*, MemoryAccessWidth::*, SysBus};
|
||||
|
||||
use bit::BitIndex;
|
||||
use num::FromPrimitive;
|
||||
|
||||
pub enum CpuAction {
|
||||
AdvancePC(MemoryAccess),
|
||||
PipelineFlushed,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Clone, Debug, Default)]
|
||||
pub struct Core {
|
||||
pub pc: u32,
|
||||
pub gpr: [u32; 15],
|
||||
pub(super) struct BankedRegisters {
|
||||
// r13 and r14 are banked for all modes. System&User mode share them
|
||||
pub(super) gpr_banked_r13: [u32; 6],
|
||||
pub(super) gpr_banked_r14: [u32; 6],
|
||||
// r8-r12 are banked for fiq mode
|
||||
pub(super) gpr_banked_old_r8_12: [u32; 5],
|
||||
pub(super) gpr_banked_fiq_r8_12: [u32; 5],
|
||||
pub(super) spsr_bank: [RegPSR; 6],
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Clone, Debug)]
|
||||
pub struct SavedCpuState {
|
||||
pub pc: u32,
|
||||
pub gpr: [u32; 15],
|
||||
next_fetch_access: MemoryAccess,
|
||||
pipeline: [u32; 2],
|
||||
|
||||
pub cpsr: RegPSR,
|
||||
pub(super) spsr: RegPSR,
|
||||
pub(super) spsr_bank: [RegPSR; 6],
|
||||
|
||||
pub(super) banks: Box<BankedRegisters>,
|
||||
|
||||
pub(super) bs_carry_out: bool,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct Core<I: MemoryInterface> {
|
||||
pub(super) bus: Shared<I>,
|
||||
|
||||
next_fetch_access: MemoryAccess,
|
||||
pipeline: [u32; 2],
|
||||
pub pc: u32,
|
||||
pub gpr: [u32; 15],
|
||||
|
||||
pub cpsr: RegPSR,
|
||||
pub(super) spsr: RegPSR,
|
||||
|
||||
// Todo - do I still need this?
|
||||
pub(super) bs_carry_out: bool,
|
||||
|
||||
pub(super) banks: Box<BankedRegisters>, // Putting these in a box so the most-used Cpu fields in the same cacheline
|
||||
|
||||
#[cfg(feature = "debugger")]
|
||||
pub last_executed: Option<DecodedInstruction>,
|
||||
|
||||
pub cycles: usize,
|
||||
|
||||
// store the gpr before executing an instruction to show diff in the Display impl
|
||||
/// store the gpr before executing an instruction to show diff in the Display impl
|
||||
#[cfg(feature = "debugger")]
|
||||
gpr_previous: [u32; 15],
|
||||
|
||||
memreq: Addr,
|
||||
#[cfg(feature = "debugger")]
|
||||
pub breakpoints: Vec<u32>,
|
||||
|
||||
#[cfg(feature = "debugger")]
|
||||
pub verbose: bool,
|
||||
|
||||
#[cfg(feature = "debugger")]
|
||||
pub trace_opcodes: bool,
|
||||
|
||||
#[cfg(feature = "debugger")]
|
||||
pub trace_exceptions: bool,
|
||||
}
|
||||
|
||||
impl Core {
|
||||
pub fn new() -> Core {
|
||||
impl<I: MemoryInterface> Core<I> {
|
||||
pub fn new(bus: Shared<I>) -> Core<I> {
|
||||
let cpsr = RegPSR::new(0x0000_00D3);
|
||||
Core {
|
||||
memreq: 0xffff_0000, // set memreq to an invalid addr so the first load cycle will be non-sequential
|
||||
cpsr: cpsr,
|
||||
..Default::default()
|
||||
bus,
|
||||
pc: 0,
|
||||
gpr: [0; 15],
|
||||
pipeline: [0; 2],
|
||||
next_fetch_access: MemoryAccess::NonSeq,
|
||||
cpsr,
|
||||
spsr: Default::default(),
|
||||
banks: Box::new(BankedRegisters::default()),
|
||||
bs_carry_out: false,
|
||||
|
||||
#[cfg(feature = "debugger")]
|
||||
last_executed: None,
|
||||
#[cfg(feature = "debugger")]
|
||||
gpr_previous: [0; 15],
|
||||
#[cfg(feature = "debugger")]
|
||||
breakpoints: Vec::new(),
|
||||
#[cfg(feature = "debugger")]
|
||||
verbose: false,
|
||||
#[cfg(feature = "debugger")]
|
||||
trace_opcodes: false,
|
||||
#[cfg(feature = "debugger")]
|
||||
trace_exceptions: false,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn from_saved_state(bus: Shared<I>, state: SavedCpuState) -> Core<I> {
|
||||
Core {
|
||||
bus,
|
||||
|
||||
pc: state.pc,
|
||||
cpsr: state.cpsr,
|
||||
gpr: state.gpr,
|
||||
banks: state.banks,
|
||||
spsr: state.spsr,
|
||||
|
||||
bs_carry_out: state.bs_carry_out,
|
||||
pipeline: state.pipeline,
|
||||
next_fetch_access: state.next_fetch_access,
|
||||
|
||||
// savestate does not keep debugger related information, so just reinitialize to default
|
||||
#[cfg(feature = "debugger")]
|
||||
last_executed: None,
|
||||
#[cfg(feature = "debugger")]
|
||||
gpr_previous: [0; 15],
|
||||
#[cfg(feature = "debugger")]
|
||||
breakpoints: Vec::new(),
|
||||
#[cfg(feature = "debugger")]
|
||||
verbose: false,
|
||||
#[cfg(feature = "debugger")]
|
||||
trace_opcodes: false,
|
||||
#[cfg(feature = "debugger")]
|
||||
trace_exceptions: false,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn save_state(&self) -> SavedCpuState {
|
||||
SavedCpuState {
|
||||
cpsr: self.cpsr,
|
||||
pc: self.pc,
|
||||
gpr: self.gpr.clone(),
|
||||
spsr: self.spsr,
|
||||
banks: self.banks.clone(),
|
||||
bs_carry_out: self.bs_carry_out,
|
||||
pipeline: self.pipeline.clone(),
|
||||
next_fetch_access: self.next_fetch_access,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn restore_state(&mut self, state: SavedCpuState) {
|
||||
self.pc = state.pc;
|
||||
self.cpsr = state.cpsr;
|
||||
self.gpr = state.gpr;
|
||||
self.spsr = state.spsr;
|
||||
self.banks = state.banks;
|
||||
self.bs_carry_out = state.bs_carry_out;
|
||||
self.pipeline = state.pipeline;
|
||||
self.next_fetch_access = state.next_fetch_access;
|
||||
}
|
||||
|
||||
pub fn set_memory_interface(&mut self, i: Shared<I>) {
|
||||
self.bus = i;
|
||||
}
|
||||
|
||||
#[cfg(feature = "debugger")]
|
||||
pub fn set_verbose(&mut self, v: bool) {
|
||||
self.verbose = v;
|
||||
}
|
||||
|
@ -115,11 +238,11 @@ impl Core {
|
|||
if self.cpsr.mode() == CpuMode::Fiq {
|
||||
self.gpr[r]
|
||||
} else {
|
||||
self.gpr_banked_old_r8_12[r - 8]
|
||||
self.banks.gpr_banked_old_r8_12[r - 8]
|
||||
}
|
||||
}
|
||||
13 => self.gpr_banked_r13[0],
|
||||
14 => self.gpr_banked_r14[0],
|
||||
13 => self.banks.gpr_banked_r13[0],
|
||||
14 => self.banks.gpr_banked_r14[0],
|
||||
_ => panic!("invalid register"),
|
||||
}
|
||||
}
|
||||
|
@ -146,62 +269,19 @@ impl Core {
|
|||
if self.cpsr.mode() == CpuMode::Fiq {
|
||||
self.gpr[r] = val;
|
||||
} else {
|
||||
self.gpr_banked_old_r8_12[r - 8] = val;
|
||||
self.banks.gpr_banked_old_r8_12[r - 8] = val;
|
||||
}
|
||||
}
|
||||
13 => {
|
||||
self.gpr_banked_r13[0] = val;
|
||||
self.banks.gpr_banked_r13[0] = val;
|
||||
}
|
||||
14 => {
|
||||
self.gpr_banked_r14[0] = val;
|
||||
self.banks.gpr_banked_r14[0] = val;
|
||||
}
|
||||
_ => panic!("invalid register"),
|
||||
}
|
||||
}
|
||||
|
||||
pub(super) fn write_32(&mut self, addr: Addr, value: u32, bus: &mut SysBus) {
|
||||
bus.write_32(addr & !0x3, value);
|
||||
}
|
||||
|
||||
pub(super) fn write_16(&mut self, addr: Addr, value: u16, bus: &mut SysBus) {
|
||||
bus.write_16(addr & !0x1, value);
|
||||
}
|
||||
|
||||
pub(super) fn write_8(&mut self, addr: Addr, value: u8, bus: &mut SysBus) {
|
||||
bus.write_8(addr, value);
|
||||
}
|
||||
|
||||
/// Helper function for "ldr" instruction that handles misaligned addresses
|
||||
pub(super) fn ldr_word(&mut self, addr: Addr, bus: &SysBus) -> u32 {
|
||||
if addr & 0x3 != 0 {
|
||||
let rotation = (addr & 0x3) << 3;
|
||||
let value = bus.read_32(addr & !0x3);
|
||||
self.ror(value, rotation, self.cpsr.C(), false, false)
|
||||
} else {
|
||||
bus.read_32(addr)
|
||||
}
|
||||
}
|
||||
|
||||
/// Helper function for "ldrh" instruction that handles misaligned addresses
|
||||
pub(super) fn ldr_half(&mut self, addr: Addr, bus: &SysBus) -> u32 {
|
||||
if addr & 0x1 != 0 {
|
||||
let rotation = (addr & 0x1) << 3;
|
||||
let value = bus.read_16(addr & !0x1);
|
||||
self.ror(value as u32, rotation, self.cpsr.C(), false, false)
|
||||
} else {
|
||||
bus.read_16(addr) as u32
|
||||
}
|
||||
}
|
||||
|
||||
/// Helper function for "ldrsh" instruction that handles misaligned addresses
|
||||
pub(super) fn ldr_sign_half(&mut self, addr: Addr, bus: &SysBus) -> u32 {
|
||||
if addr & 0x1 != 0 {
|
||||
bus.read_8(addr) as i8 as i32 as u32
|
||||
} else {
|
||||
bus.read_16(addr) as i16 as i32 as u32
|
||||
}
|
||||
}
|
||||
|
||||
pub fn get_registers(&self) -> [u32; 15] {
|
||||
self.gpr.clone()
|
||||
}
|
||||
|
@ -214,31 +294,33 @@ impl Core {
|
|||
return;
|
||||
}
|
||||
|
||||
self.spsr_bank[old_index] = self.spsr;
|
||||
self.gpr_banked_r13[old_index] = self.gpr[13];
|
||||
self.gpr_banked_r14[old_index] = self.gpr[14];
|
||||
let banks = &mut self.banks;
|
||||
|
||||
self.spsr = self.spsr_bank[new_index];
|
||||
self.gpr[13] = self.gpr_banked_r13[new_index];
|
||||
self.gpr[14] = self.gpr_banked_r14[new_index];
|
||||
banks.spsr_bank[old_index] = self.spsr;
|
||||
banks.gpr_banked_r13[old_index] = self.gpr[13];
|
||||
banks.gpr_banked_r14[old_index] = self.gpr[14];
|
||||
|
||||
self.spsr = banks.spsr_bank[new_index];
|
||||
self.gpr[13] = banks.gpr_banked_r13[new_index];
|
||||
self.gpr[14] = banks.gpr_banked_r14[new_index];
|
||||
|
||||
if new_mode == CpuMode::Fiq {
|
||||
for r in 0..5 {
|
||||
self.gpr_banked_old_r8_12[r] = self.gpr[r + 8];
|
||||
self.gpr[r + 8] = self.gpr_banked_fiq_r8_12[r];
|
||||
banks.gpr_banked_old_r8_12[r] = self.gpr[r + 8];
|
||||
self.gpr[r + 8] = banks.gpr_banked_fiq_r8_12[r];
|
||||
}
|
||||
} else if old_mode == CpuMode::Fiq {
|
||||
for r in 0..5 {
|
||||
self.gpr_banked_fiq_r8_12[r] = self.gpr[r + 8];
|
||||
self.gpr[r + 8] = self.gpr_banked_old_r8_12[r];
|
||||
banks.gpr_banked_fiq_r8_12[r] = self.gpr[r + 8];
|
||||
self.gpr[r + 8] = banks.gpr_banked_old_r8_12[r];
|
||||
}
|
||||
}
|
||||
self.cpsr.set_mode(new_mode);
|
||||
}
|
||||
|
||||
/// Resets the cpu
|
||||
pub fn reset(&mut self, sb: &mut SysBus) {
|
||||
self.exception(sb, Exception::Reset, 0);
|
||||
pub fn reset(&mut self) {
|
||||
self.exception(Exception::Reset, 0);
|
||||
}
|
||||
|
||||
pub fn word_size(&self) -> usize {
|
||||
|
@ -248,15 +330,6 @@ impl Core {
|
|||
}
|
||||
}
|
||||
|
||||
pub fn cycles(&self) -> usize {
|
||||
self.cycles
|
||||
}
|
||||
|
||||
pub(super) fn add_cycle(&mut self) {
|
||||
// println!("<cycle I-Cyclel> total: {}", self.cycles);
|
||||
self.cycles += 1;
|
||||
}
|
||||
|
||||
pub(super) fn get_required_multipiler_array_cycles(&self, rs: u32) -> usize {
|
||||
if rs & 0xff == rs {
|
||||
1
|
||||
|
@ -269,42 +342,6 @@ impl Core {
|
|||
}
|
||||
}
|
||||
|
||||
#[allow(non_snake_case)]
|
||||
#[inline(always)]
|
||||
pub(super) fn S_cycle32(&mut self, sb: &SysBus, addr: u32) {
|
||||
self.cycles += sb.get_cycles(addr, Seq, MemoryAccess32);
|
||||
}
|
||||
|
||||
#[allow(non_snake_case)]
|
||||
#[inline(always)]
|
||||
pub(super) fn S_cycle16(&mut self, sb: &SysBus, addr: u32) {
|
||||
self.cycles += sb.get_cycles(addr, Seq, MemoryAccess16);
|
||||
}
|
||||
|
||||
#[allow(non_snake_case)]
|
||||
#[inline(always)]
|
||||
pub(super) fn S_cycle8(&mut self, sb: &SysBus, addr: u32) {
|
||||
self.cycles += sb.get_cycles(addr, Seq, MemoryAccess8);
|
||||
}
|
||||
|
||||
#[allow(non_snake_case)]
|
||||
#[inline(always)]
|
||||
pub(super) fn N_cycle32(&mut self, sb: &SysBus, addr: u32) {
|
||||
self.cycles += sb.get_cycles(addr, NonSeq, MemoryAccess32);
|
||||
}
|
||||
|
||||
#[allow(non_snake_case)]
|
||||
#[inline(always)]
|
||||
pub(super) fn N_cycle16(&mut self, sb: &SysBus, addr: u32) {
|
||||
self.cycles += sb.get_cycles(addr, NonSeq, MemoryAccess16);
|
||||
}
|
||||
|
||||
#[allow(non_snake_case)]
|
||||
#[inline(always)]
|
||||
pub(super) fn N_cycle8(&mut self, sb: &SysBus, addr: u32) {
|
||||
self.cycles += sb.get_cycles(addr, NonSeq, MemoryAccess8);
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub(super) fn check_arm_cond(&self, cond: ArmCond) -> bool {
|
||||
use ArmCond::*;
|
||||
|
@ -337,80 +374,73 @@ impl Core {
|
|||
self.last_executed = Some(d);
|
||||
}
|
||||
|
||||
#[cfg(feature = "arm7tdmi_dispatch_table")]
|
||||
fn step_arm_exec(&mut self, insn: u32, sb: &mut SysBus) -> CpuAction {
|
||||
let hash = (((insn >> 16) & 0xff0) | ((insn >> 4) & 0x00f)) as usize;
|
||||
let arm_info = &ARM_LUT[hash];
|
||||
cfg_if! {
|
||||
if #[cfg(feature = "arm7tdmi_dispatch_table")] {
|
||||
fn step_arm_exec(&mut self, insn: u32) -> CpuAction {
|
||||
let hash = (((insn >> 16) & 0xff0) | ((insn >> 4) & 0x00f)) as usize;
|
||||
let arm_info = &Self::ARM_LUT[hash];
|
||||
#[cfg(feature = "debugger")]
|
||||
self.debugger_record_step(DecodedInstruction::Arm(ArmInstruction::new(
|
||||
insn,
|
||||
self.pc.wrapping_sub(8),
|
||||
arm_info.fmt,
|
||||
)));
|
||||
(arm_info.handler_fn)(self, insn)
|
||||
}
|
||||
|
||||
#[cfg(feature = "debugger")]
|
||||
self.debugger_record_step(DecodedInstruction::Arm(ArmInstruction::new(
|
||||
insn,
|
||||
self.pc.wrapping_sub(8),
|
||||
arm_info.fmt,
|
||||
)));
|
||||
fn step_thumb_exec(&mut self, insn: u16) -> CpuAction {
|
||||
let thumb_info = &Self::THUMB_LUT[(insn >> 6) as usize];
|
||||
#[cfg(feature = "debugger")]
|
||||
self.debugger_record_step(DecodedInstruction::Thumb(ThumbInstruction::new(
|
||||
insn,
|
||||
self.pc.wrapping_sub(4),
|
||||
thumb_info.fmt,
|
||||
)));
|
||||
(thumb_info.handler_fn)(self, insn)
|
||||
}
|
||||
} else {
|
||||
|
||||
(arm_info.handler_fn)(self, sb, insn)
|
||||
}
|
||||
|
||||
#[cfg(feature = "arm7tdmi_dispatch_table")]
|
||||
fn step_thumb_exec(&mut self, insn: u16, sb: &mut SysBus) -> CpuAction {
|
||||
let thumb_info = &THUMB_LUT[(insn >> 6) as usize];
|
||||
|
||||
#[cfg(feature = "debugger")]
|
||||
self.debugger_record_step(DecodedInstruction::Thumb(ThumbInstruction::new(
|
||||
insn,
|
||||
self.pc.wrapping_sub(4),
|
||||
thumb_info.fmt,
|
||||
)));
|
||||
|
||||
(thumb_info.handler_fn)(self, sb, insn)
|
||||
}
|
||||
|
||||
#[cfg(not(feature = "arm7tdmi_dispatch_table"))]
|
||||
fn step_arm_exec(&mut self, insn: u32, sb: &mut SysBus) -> CpuAction {
|
||||
let arm_fmt = ArmFormat::from(insn);
|
||||
#[cfg(feature = "debugger")]
|
||||
self.debugger_record_step(DecodedInstruction::Arm(ArmInstruction::new(
|
||||
insn,
|
||||
self.pc.wrapping_sub(8),
|
||||
arm_fmt,
|
||||
)));
|
||||
|
||||
self.exec_arm(sb, insn, arm_fmt)
|
||||
}
|
||||
|
||||
#[cfg(not(feature = "arm7tdmi_dispatch_table"))]
|
||||
fn step_thumb_exec(&mut self, insn: u16, sb: &mut SysBus) -> CpuAction {
|
||||
let thumb_fmt = ThumbFormat::from(insn);
|
||||
|
||||
#[cfg(feature = "debugger")]
|
||||
self.debugger_record_step(DecodedInstruction::Thumb(ThumbInstruction::new(
|
||||
insn,
|
||||
self.pc.wrapping_sub(4),
|
||||
thumb_fmt,
|
||||
)));
|
||||
|
||||
self.exec_thumb(sb, insn, thumb_fmt)
|
||||
fn step_arm_exec(&mut self, insn: u32) -> CpuAction {
|
||||
let arm_fmt = ArmFormat::from(insn);
|
||||
#[cfg(feature = "debugger")]
|
||||
self.debugger_record_step(DecodedInstruction::Arm(ArmInstruction::new(
|
||||
insn,
|
||||
self.pc.wrapping_sub(8),
|
||||
arm_fmt,
|
||||
)));
|
||||
self.exec_arm(insn, arm_fmt)
|
||||
}
|
||||
fn step_thumb_exec(&mut self, insn: u16) -> CpuAction {
|
||||
let thumb_fmt = ThumbFormat::from(insn);
|
||||
#[cfg(feature = "debugger")]
|
||||
self.debugger_record_step(DecodedInstruction::Thumb(ThumbInstruction::new(
|
||||
insn,
|
||||
self.pc.wrapping_sub(4),
|
||||
thumb_fmt,
|
||||
)));
|
||||
self.exec_thumb(insn, thumb_fmt)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// 2S + 1N
|
||||
#[inline(always)]
|
||||
pub fn reload_pipeline16(&mut self, sb: &mut SysBus) {
|
||||
self.pipeline[0] = sb.read_16(self.pc) as u32;
|
||||
self.N_cycle16(sb, self.pc);
|
||||
pub fn reload_pipeline16(&mut self) {
|
||||
self.pipeline[0] = self.load_16(self.pc, NonSeq) as u32;
|
||||
self.advance_thumb();
|
||||
self.pipeline[1] = sb.read_16(self.pc) as u32;
|
||||
self.S_cycle16(sb, self.pc);
|
||||
self.pipeline[1] = self.load_16(self.pc, Seq) as u32;
|
||||
self.advance_thumb();
|
||||
self.next_fetch_access = Seq;
|
||||
}
|
||||
|
||||
/// 2S + 1N
|
||||
#[inline(always)]
|
||||
pub fn reload_pipeline32(&mut self, sb: &mut SysBus) {
|
||||
self.pipeline[0] = sb.read_32(self.pc);
|
||||
self.N_cycle16(sb, self.pc);
|
||||
pub fn reload_pipeline32(&mut self) {
|
||||
self.pipeline[0] = self.load_32(self.pc, NonSeq);
|
||||
self.advance_arm();
|
||||
self.pipeline[1] = sb.read_32(self.pc);
|
||||
self.S_cycle16(sb, self.pc);
|
||||
self.pipeline[1] = self.load_32(self.pc, Seq);
|
||||
self.advance_arm();
|
||||
self.next_fetch_access = Seq;
|
||||
}
|
||||
|
||||
#[inline]
|
||||
|
@ -425,12 +455,12 @@ impl Core {
|
|||
|
||||
/// Perform a pipeline step
|
||||
/// If an instruction was executed in this step, return it.
|
||||
pub fn step(&mut self, bus: &mut SysBus) {
|
||||
let pc = self.pc;
|
||||
|
||||
pub fn step(&mut self) {
|
||||
match self.cpsr.state() {
|
||||
CpuState::ARM => {
|
||||
let fetched_now = bus.read_32(pc);
|
||||
let pc = self.pc & !3;
|
||||
|
||||
let fetched_now = self.load_32(pc, self.next_fetch_access);
|
||||
let insn = self.pipeline[0];
|
||||
self.pipeline[0] = self.pipeline[1];
|
||||
self.pipeline[1] = fetched_now;
|
||||
|
@ -438,24 +468,32 @@ impl Core {
|
|||
.unwrap_or_else(|| unsafe { std::hint::unreachable_unchecked() });
|
||||
if cond != ArmCond::AL {
|
||||
if !self.check_arm_cond(cond) {
|
||||
self.S_cycle32(bus, self.pc);
|
||||
self.advance_arm();
|
||||
self.next_fetch_access = MemoryAccess::NonSeq;
|
||||
return;
|
||||
}
|
||||
}
|
||||
match self.step_arm_exec(insn, bus) {
|
||||
CpuAction::AdvancePC => self.advance_arm(),
|
||||
CpuAction::FlushPipeline => {}
|
||||
match self.step_arm_exec(insn) {
|
||||
CpuAction::AdvancePC(access) => {
|
||||
self.next_fetch_access = access;
|
||||
self.advance_arm();
|
||||
}
|
||||
CpuAction::PipelineFlushed => {}
|
||||
}
|
||||
}
|
||||
CpuState::THUMB => {
|
||||
let fetched_now = bus.read_16(pc);
|
||||
let pc = self.pc & !1;
|
||||
|
||||
let fetched_now = self.load_16(pc, self.next_fetch_access);
|
||||
let insn = self.pipeline[0];
|
||||
self.pipeline[0] = self.pipeline[1];
|
||||
self.pipeline[1] = fetched_now as u32;
|
||||
match self.step_thumb_exec(insn as u16, bus) {
|
||||
CpuAction::AdvancePC => self.advance_thumb(),
|
||||
CpuAction::FlushPipeline => {}
|
||||
match self.step_thumb_exec(insn as u16) {
|
||||
CpuAction::AdvancePC(access) => {
|
||||
self.advance_thumb();
|
||||
self.next_fetch_access = access;
|
||||
}
|
||||
CpuAction::PipelineFlushed => {}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -472,12 +510,12 @@ impl Core {
|
|||
}
|
||||
|
||||
pub fn skip_bios(&mut self) {
|
||||
self.gpr_banked_r13[0] = 0x0300_7f00; // USR/SYS
|
||||
self.gpr_banked_r13[1] = 0x0300_7f00; // FIQ
|
||||
self.gpr_banked_r13[2] = 0x0300_7fa0; // IRQ
|
||||
self.gpr_banked_r13[3] = 0x0300_7fe0; // SVC
|
||||
self.gpr_banked_r13[4] = 0x0300_7f00; // ABT
|
||||
self.gpr_banked_r13[5] = 0x0300_7f00; // UND
|
||||
self.banks.gpr_banked_r13[0] = 0x0300_7f00; // USR/SYS
|
||||
self.banks.gpr_banked_r13[1] = 0x0300_7f00; // FIQ
|
||||
self.banks.gpr_banked_r13[2] = 0x0300_7fa0; // IRQ
|
||||
self.banks.gpr_banked_r13[3] = 0x0300_7fe0; // SVC
|
||||
self.banks.gpr_banked_r13[4] = 0x0300_7f00; // ABT
|
||||
self.banks.gpr_banked_r13[5] = 0x0300_7f00; // UND
|
||||
|
||||
self.gpr[13] = 0x0300_7f00;
|
||||
self.pc = 0x0800_0000;
|
||||
|
@ -487,10 +525,9 @@ impl Core {
|
|||
}
|
||||
|
||||
#[cfg(feature = "debugger")]
|
||||
impl fmt::Display for Core {
|
||||
impl<I: MemoryInterface> fmt::Display for Core<I> {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
writeln!(f, "ARM7TDMI Core Status:")?;
|
||||
writeln!(f, "\tCycles: {}", self.cycles)?;
|
||||
writeln!(f, "\tCPSR: {}", self.cpsr)?;
|
||||
writeln!(f, "\tGeneral Purpose Registers:")?;
|
||||
let reg_normal_style = Style::new().bold();
|
||||
|
@ -519,3 +556,8 @@ impl fmt::Display for Core {
|
|||
writeln!(f, "{}", reg_normal_style.paint(pc))
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(feature = "arm7tdmi_dispatch_table")]
|
||||
include!(concat!(env!("OUT_DIR"), "/arm_lut.rs"));
|
||||
#[cfg(feature = "arm7tdmi_dispatch_table")]
|
||||
include!(concat!(env!("OUT_DIR"), "/thumb_lut.rs"));
|
||||
|
|
|
@ -1,7 +1,6 @@
|
|||
use super::super::sysbus::SysBus;
|
||||
use super::cpu::Core;
|
||||
use super::memory::MemoryInterface;
|
||||
use super::{CpuMode, CpuState};
|
||||
use colored::*;
|
||||
|
||||
#[derive(Debug, Clone, Copy, PartialEq)]
|
||||
#[allow(dead_code)]
|
||||
|
@ -17,8 +16,8 @@ pub enum Exception {
|
|||
Fiq = 0x1c,
|
||||
}
|
||||
|
||||
impl Core {
|
||||
pub fn exception(&mut self, sb: &mut SysBus, e: Exception, lr: u32) {
|
||||
impl<I: MemoryInterface> Core<I> {
|
||||
pub fn exception(&mut self, e: Exception, lr: u32) {
|
||||
use Exception::*;
|
||||
let (new_mode, irq_disable, fiq_disable) = match e {
|
||||
Reset => (CpuMode::Supervisor, true, true),
|
||||
|
@ -30,18 +29,9 @@ impl Core {
|
|||
Irq => (CpuMode::Irq, true, false),
|
||||
Fiq => (CpuMode::Fiq, true, true),
|
||||
};
|
||||
trace!(
|
||||
"{}: {:?}, pc: {:#x}, new_mode: {:?} old_mode: {:?}",
|
||||
"Exception".cyan(),
|
||||
e,
|
||||
self.pc,
|
||||
new_mode,
|
||||
self.cpsr.mode(),
|
||||
);
|
||||
|
||||
let new_bank = new_mode.bank_index();
|
||||
self.spsr_bank[new_bank] = self.cpsr;
|
||||
self.gpr_banked_r14[new_bank] = lr;
|
||||
self.banks.spsr_bank[new_bank] = self.cpsr;
|
||||
self.banks.gpr_banked_r14[new_bank] = lr;
|
||||
self.change_mode(self.cpsr.mode(), new_mode);
|
||||
|
||||
// Set appropriate CPSR bits
|
||||
|
@ -56,21 +46,19 @@ impl Core {
|
|||
|
||||
// Set PC to vector address
|
||||
self.pc = e as u32;
|
||||
self.reload_pipeline32(sb);
|
||||
self.reload_pipeline32();
|
||||
}
|
||||
|
||||
pub fn irq(&mut self, sb: &mut SysBus) {
|
||||
#[inline]
|
||||
pub fn irq(&mut self) {
|
||||
if !self.cpsr.irq_disabled() {
|
||||
let lr = self.get_next_pc() + 4;
|
||||
self.exception(sb, Exception::Irq, lr);
|
||||
self.exception(Exception::Irq, lr);
|
||||
}
|
||||
}
|
||||
|
||||
pub fn software_interrupt(&mut self, sb: &mut SysBus, lr: u32, _cmt: u32) {
|
||||
match self.cpsr.state() {
|
||||
CpuState::ARM => self.N_cycle32(sb, self.pc),
|
||||
CpuState::THUMB => self.N_cycle16(sb, self.pc),
|
||||
};
|
||||
self.exception(sb, Exception::SoftwareInterrupt, lr);
|
||||
#[inline]
|
||||
pub fn software_interrupt(&mut self, lr: u32, _cmt: u32) {
|
||||
self.exception(Exception::SoftwareInterrupt, lr);
|
||||
}
|
||||
}
|
||||
|
|
164
core/src/arm7tdmi/memory.rs
Normal file
164
core/src/arm7tdmi/memory.rs
Normal file
|
@ -0,0 +1,164 @@
|
|||
use super::cpu::Core;
|
||||
use super::Addr;
|
||||
use std::fmt;
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Copy, Clone)]
|
||||
pub enum MemoryAccess {
|
||||
NonSeq = 0,
|
||||
Seq,
|
||||
}
|
||||
|
||||
impl Default for MemoryAccess {
|
||||
fn default() -> MemoryAccess {
|
||||
MemoryAccess::NonSeq
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Display for MemoryAccess {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
write!(
|
||||
f,
|
||||
"{}",
|
||||
match self {
|
||||
MemoryAccess::NonSeq => "N",
|
||||
MemoryAccess::Seq => "S",
|
||||
}
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, PartialEq, Copy, Clone)]
|
||||
#[repr(u8)]
|
||||
pub enum MemoryAccessWidth {
|
||||
MemoryAccess8 = 0,
|
||||
MemoryAccess16,
|
||||
MemoryAccess32,
|
||||
}
|
||||
|
||||
/// A trait meant to abstract memory accesses and report the access type back to the user of the arm7tdmi::Core
|
||||
///
|
||||
/// struct Memory {
|
||||
/// data: [u8; 0x4000]
|
||||
/// }
|
||||
///
|
||||
/// impl MemoryInterface for Memory {
|
||||
/// fn load_8(&mut self, addr: u32, access: MemoryAccess) {
|
||||
/// debug!("CPU read {:?} cycle", access);
|
||||
/// self.data[addr & 0x3fff]
|
||||
/// }
|
||||
///
|
||||
/// fn store_8(&mut self, addr: u32, value: u8, access: MemoryAccess) {
|
||||
/// debug!("CPU write {:?} cycle", access);
|
||||
/// self.data[addr & 0x3fff] = value;
|
||||
/// }
|
||||
///
|
||||
/// fn idle_cycle(&mut self) {
|
||||
/// debug!("CPU idle cycle");
|
||||
/// }
|
||||
///
|
||||
/// // implement rest of trait methods
|
||||
/// }
|
||||
///
|
||||
/// let mem = Shared::new(Memory { ... });
|
||||
/// let cpu = arm7tdmi::Core::new(mem.clone())
|
||||
///
|
||||
pub trait MemoryInterface {
|
||||
/// Read a byte
|
||||
fn load_8(&mut self, addr: u32, access: MemoryAccess) -> u8;
|
||||
/// Read a halfword
|
||||
fn load_16(&mut self, addr: u32, access: MemoryAccess) -> u16;
|
||||
/// Read a word
|
||||
fn load_32(&mut self, addr: u32, access: MemoryAccess) -> u32;
|
||||
|
||||
/// Write a byte
|
||||
fn store_8(&mut self, addr: u32, value: u8, access: MemoryAccess);
|
||||
/// Write a halfword
|
||||
fn store_16(&mut self, addr: u32, value: u16, access: MemoryAccess);
|
||||
/// Write a word
|
||||
fn store_32(&mut self, addr: u32, value: u32, access: MemoryAccess);
|
||||
|
||||
fn idle_cycle(&mut self);
|
||||
}
|
||||
|
||||
impl<I: MemoryInterface> MemoryInterface for Core<I> {
|
||||
#[inline]
|
||||
fn load_8(&mut self, addr: u32, access: MemoryAccess) -> u8 {
|
||||
self.bus.load_8(addr, access)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn load_16(&mut self, addr: u32, access: MemoryAccess) -> u16 {
|
||||
self.bus.load_16(addr, access)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn load_32(&mut self, addr: u32, access: MemoryAccess) -> u32 {
|
||||
self.bus.load_32(addr, access)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn store_8(&mut self, addr: u32, value: u8, access: MemoryAccess) {
|
||||
self.bus.store_8(addr, value, access);
|
||||
}
|
||||
#[inline]
|
||||
fn store_16(&mut self, addr: u32, value: u16, access: MemoryAccess) {
|
||||
self.bus.store_16(addr, value, access);
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn store_32(&mut self, addr: u32, value: u32, access: MemoryAccess) {
|
||||
self.bus.store_32(addr, value, access);
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn idle_cycle(&mut self) {
|
||||
self.bus.idle_cycle();
|
||||
}
|
||||
}
|
||||
|
||||
/// Implementation of memory access helpers
|
||||
impl<I: MemoryInterface> Core<I> {
|
||||
#[inline]
|
||||
pub(super) fn store_aligned_32(&mut self, addr: Addr, value: u32, access: MemoryAccess) {
|
||||
self.store_32(addr & !0x3, value, access);
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub(super) fn store_aligned_16(&mut self, addr: Addr, value: u16, access: MemoryAccess) {
|
||||
self.store_16(addr & !0x1, value, access);
|
||||
}
|
||||
|
||||
/// Helper function for "ldr" instruction that handles misaligned addresses
|
||||
#[inline]
|
||||
pub(super) fn ldr_word(&mut self, addr: Addr, access: MemoryAccess) -> u32 {
|
||||
if addr & 0x3 != 0 {
|
||||
let rotation = (addr & 0x3) << 3;
|
||||
let value = self.load_32(addr & !0x3, access);
|
||||
self.ror(value, rotation, self.cpsr.C(), false, false)
|
||||
} else {
|
||||
self.load_32(addr, access)
|
||||
}
|
||||
}
|
||||
|
||||
/// Helper function for "ldrh" instruction that handles misaligned addresses
|
||||
#[inline]
|
||||
pub(super) fn ldr_half(&mut self, addr: Addr, access: MemoryAccess) -> u32 {
|
||||
if addr & 0x1 != 0 {
|
||||
let rotation = (addr & 0x1) << 3;
|
||||
let value = self.load_16(addr & !0x1, access);
|
||||
self.ror(value as u32, rotation, self.cpsr.C(), false, false)
|
||||
} else {
|
||||
self.load_16(addr, access) as u32
|
||||
}
|
||||
}
|
||||
|
||||
/// Helper function for "ldrsh" instruction that handles misaligned addresses
|
||||
#[inline]
|
||||
pub(super) fn ldr_sign_half(&mut self, addr: Addr, access: MemoryAccess) -> u32 {
|
||||
if addr & 0x1 != 0 {
|
||||
self.load_8(addr, access) as i8 as i32 as u32
|
||||
} else {
|
||||
self.load_16(addr, access) as i16 as i32 as u32
|
||||
}
|
||||
}
|
||||
}
|
|
@ -12,6 +12,7 @@ use thumb::ThumbInstruction;
|
|||
pub mod cpu;
|
||||
pub use cpu::*;
|
||||
pub mod alu;
|
||||
pub mod memory;
|
||||
pub use alu::*;
|
||||
pub mod exception;
|
||||
pub mod psr;
|
||||
|
@ -23,11 +24,6 @@ pub const REG_SP: usize = 13;
|
|||
|
||||
pub(self) use crate::Addr;
|
||||
|
||||
pub enum CpuAction {
|
||||
AdvancePC,
|
||||
FlushPipeline,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, PartialEq, Clone)]
|
||||
pub enum DecodedInstruction {
|
||||
Arm(ArmInstruction),
|
||||
|
|
|
@ -1,30 +1,16 @@
|
|||
use crate::arm7tdmi::*;
|
||||
use crate::sysbus::SysBus;
|
||||
use crate::Bus;
|
||||
|
||||
use crate::bit::BitIndex;
|
||||
|
||||
use super::super::memory::{MemoryAccess, MemoryInterface};
|
||||
use super::ThumbDecodeHelper;
|
||||
use super::*;
|
||||
use MemoryAccess::*;
|
||||
|
||||
fn push(cpu: &mut Core, bus: &mut SysBus, r: usize) {
|
||||
cpu.gpr[REG_SP] -= 4;
|
||||
let stack_addr = cpu.gpr[REG_SP] & !3;
|
||||
bus.write_32(stack_addr, cpu.get_reg(r))
|
||||
}
|
||||
fn pop(cpu: &mut Core, bus: &mut SysBus, r: usize) {
|
||||
let val = bus.read_32(cpu.gpr[REG_SP] & !3);
|
||||
cpu.set_reg(r, val);
|
||||
cpu.gpr[REG_SP] += 4;
|
||||
}
|
||||
|
||||
impl Core {
|
||||
impl<I: MemoryInterface> Core<I> {
|
||||
/// Format 1
|
||||
pub(in super::super) fn exec_thumb_move_shifted_reg(
|
||||
&mut self,
|
||||
sb: &mut SysBus,
|
||||
insn: u16,
|
||||
) -> CpuAction {
|
||||
/// Execution Time: 1S
|
||||
pub(in super::super) fn exec_thumb_move_shifted_reg(&mut self, insn: u16) -> CpuAction {
|
||||
let rd = (insn & 0b111) as usize;
|
||||
let rs = insn.bit_range(3..6) as usize;
|
||||
|
||||
|
@ -39,13 +25,12 @@ impl Core {
|
|||
self.gpr[rd] = op2;
|
||||
self.alu_update_flags(op2, false, self.bs_carry_out, self.cpsr.V());
|
||||
|
||||
self.S_cycle16(sb, self.pc + 2);
|
||||
|
||||
CpuAction::AdvancePC
|
||||
CpuAction::AdvancePC(Seq)
|
||||
}
|
||||
|
||||
/// Format 2
|
||||
pub(in super::super) fn exec_thumb_add_sub(&mut self, sb: &mut SysBus, insn: u16) -> CpuAction {
|
||||
/// Execution Time: 1S
|
||||
pub(in super::super) fn exec_thumb_add_sub(&mut self, insn: u16) -> CpuAction {
|
||||
let rd = (insn & 0b111) as usize;
|
||||
let op1 = self.get_reg(insn.rs());
|
||||
let op2 = if insn.is_immediate_operand() {
|
||||
|
@ -64,17 +49,12 @@ impl Core {
|
|||
self.alu_update_flags(result, true, carry, overflow);
|
||||
self.set_reg(rd, result as u32);
|
||||
|
||||
self.S_cycle16(sb, self.pc + 2);
|
||||
|
||||
CpuAction::AdvancePC
|
||||
CpuAction::AdvancePC(Seq)
|
||||
}
|
||||
|
||||
/// Format 3
|
||||
pub(in super::super) fn exec_thumb_data_process_imm(
|
||||
&mut self,
|
||||
sb: &mut SysBus,
|
||||
insn: u16,
|
||||
) -> CpuAction {
|
||||
/// Execution Time: 1S
|
||||
pub(in super::super) fn exec_thumb_data_process_imm(&mut self, insn: u16) -> CpuAction {
|
||||
use OpFormat3::*;
|
||||
let op = insn.format3_op();
|
||||
let rd = insn.bit_range(8..11) as usize;
|
||||
|
@ -92,13 +72,16 @@ impl Core {
|
|||
if op != CMP {
|
||||
self.gpr[rd] = result as u32;
|
||||
}
|
||||
self.S_cycle16(sb, self.pc + 2);
|
||||
|
||||
CpuAction::AdvancePC
|
||||
CpuAction::AdvancePC(Seq)
|
||||
}
|
||||
|
||||
/// Format 4
|
||||
pub(in super::super) fn exec_thumb_alu_ops(&mut self, sb: &mut SysBus, insn: u16) -> CpuAction {
|
||||
/// Execution Time:
|
||||
/// 1S for AND,EOR,ADC,SBC,TST,NEG,CMP,CMN,ORR,BIC,MVN
|
||||
/// 1S+1I for LSL,LSR,ASR,ROR
|
||||
/// 1S+mI for MUL on ARMv4 (m=1..4; depending on MSBs of incoming Rd value)
|
||||
pub(in super::super) fn exec_thumb_alu_ops(&mut self, insn: u16) -> CpuAction {
|
||||
let rd = (insn & 0b111) as usize;
|
||||
let rs = insn.rs();
|
||||
let dst = self.get_reg(rd);
|
||||
|
@ -109,22 +92,23 @@ impl Core {
|
|||
|
||||
use ThumbAluOps::*;
|
||||
let op = insn.format4_alu_op();
|
||||
|
||||
macro_rules! shifter_op {
|
||||
($bs_op:expr) => {{
|
||||
let result = self.shift_by_register($bs_op, rd, rs, carry);
|
||||
self.idle_cycle();
|
||||
carry = self.bs_carry_out;
|
||||
result
|
||||
}};
|
||||
}
|
||||
|
||||
let result = match op {
|
||||
AND | TST => dst & src,
|
||||
EOR => dst ^ src,
|
||||
LSL | LSR | ASR | ROR => {
|
||||
// TODO optimize this second match, keeping it here for code clearity
|
||||
let bs_op = match op {
|
||||
LSL => BarrelShiftOpCode::LSL,
|
||||
LSR => BarrelShiftOpCode::LSR,
|
||||
ASR => BarrelShiftOpCode::ASR,
|
||||
ROR => BarrelShiftOpCode::ROR,
|
||||
_ => unreachable!(),
|
||||
};
|
||||
let result = self.shift_by_register(bs_op, rd, rs, carry);
|
||||
carry = self.bs_carry_out;
|
||||
result
|
||||
}
|
||||
LSL => shifter_op!(BarrelShiftOpCode::LSL),
|
||||
LSR => shifter_op!(BarrelShiftOpCode::LSR),
|
||||
ASR => shifter_op!(BarrelShiftOpCode::ASR),
|
||||
ROR => shifter_op!(BarrelShiftOpCode::ROR),
|
||||
ADC => self.alu_adc_flags(dst, src, &mut carry, &mut overflow),
|
||||
SBC => self.alu_sbc_flags(dst, src, &mut carry, &mut overflow),
|
||||
NEG => self.alu_sub_flags(0, src, &mut carry, &mut overflow),
|
||||
|
@ -134,7 +118,7 @@ impl Core {
|
|||
MUL => {
|
||||
let m = self.get_required_multipiler_array_cycles(src);
|
||||
for _ in 0..m {
|
||||
self.add_cycle();
|
||||
self.idle_cycle();
|
||||
}
|
||||
// TODO - meaningless values?
|
||||
carry = false;
|
||||
|
@ -149,17 +133,15 @@ impl Core {
|
|||
if !op.is_setting_flags() {
|
||||
self.set_reg(rd, result as u32);
|
||||
}
|
||||
self.S_cycle16(sb, self.pc + 2);
|
||||
|
||||
CpuAction::AdvancePC
|
||||
CpuAction::AdvancePC(Seq)
|
||||
}
|
||||
|
||||
/// Format 5
|
||||
pub(in super::super) fn exec_thumb_hi_reg_op_or_bx(
|
||||
&mut self,
|
||||
sb: &mut SysBus,
|
||||
insn: u16,
|
||||
) -> CpuAction {
|
||||
/// Execution Time:
|
||||
/// 1S for ADD/MOV/CMP
|
||||
/// 2S+1N for ADD/MOV with Rd=R15, and for BX
|
||||
pub(in super::super) fn exec_thumb_hi_reg_op_or_bx(&mut self, insn: u16) -> CpuAction {
|
||||
let op = insn.format5_op();
|
||||
let rd = (insn & 0b111) as usize;
|
||||
let dst_reg = if insn.bit(consts::flags::FLAG_H1) {
|
||||
|
@ -175,16 +157,16 @@ impl Core {
|
|||
let op1 = self.get_reg(dst_reg);
|
||||
let op2 = self.get_reg(src_reg);
|
||||
|
||||
let mut result = CpuAction::AdvancePC;
|
||||
let mut result = CpuAction::AdvancePC(Seq);
|
||||
match op {
|
||||
OpFormat5::BX => {
|
||||
return self.branch_exchange(sb, self.get_reg(src_reg));
|
||||
return self.branch_exchange(self.get_reg(src_reg));
|
||||
}
|
||||
OpFormat5::ADD => {
|
||||
self.set_reg(dst_reg, op1.wrapping_add(op2));
|
||||
if dst_reg == REG_PC {
|
||||
result = CpuAction::FlushPipeline;
|
||||
self.reload_pipeline16(sb);
|
||||
self.reload_pipeline16();
|
||||
result = CpuAction::PipelineFlushed;
|
||||
}
|
||||
}
|
||||
OpFormat5::CMP => {
|
||||
|
@ -196,38 +178,35 @@ impl Core {
|
|||
OpFormat5::MOV => {
|
||||
self.set_reg(dst_reg, op2 as u32);
|
||||
if dst_reg == REG_PC {
|
||||
result = CpuAction::FlushPipeline;
|
||||
self.reload_pipeline16(sb);
|
||||
self.reload_pipeline16();
|
||||
result = CpuAction::PipelineFlushed;
|
||||
}
|
||||
}
|
||||
}
|
||||
self.S_cycle16(sb, self.pc + 2);
|
||||
|
||||
result
|
||||
}
|
||||
|
||||
/// Format 6
|
||||
pub(in super::super) fn exec_thumb_ldr_pc(&mut self, sb: &mut SysBus, insn: u16) -> CpuAction {
|
||||
/// Format 6 load PC-relative (for loading immediates from literal pool)
|
||||
/// Execution Time: 1S+1N+1I
|
||||
pub(in super::super) fn exec_thumb_ldr_pc(&mut self, insn: u16) -> CpuAction {
|
||||
let rd = insn.bit_range(8..11) as usize;
|
||||
|
||||
let ofs = insn.word8() as Addr;
|
||||
let addr = (self.pc & !3) + ofs;
|
||||
|
||||
self.S_cycle16(sb, self.pc + 2);
|
||||
let data = self.ldr_word(addr, sb);
|
||||
self.N_cycle16(sb, addr);
|
||||
|
||||
self.gpr[rd] = data;
|
||||
self.gpr[rd] = self.load_32(addr, NonSeq);
|
||||
|
||||
// +1I
|
||||
self.add_cycle();
|
||||
self.idle_cycle();
|
||||
|
||||
CpuAction::AdvancePC
|
||||
CpuAction::AdvancePC(NonSeq)
|
||||
}
|
||||
|
||||
/// Helper function for various ldr/str handler
|
||||
/// Execution Time: 1S+1N+1I for LDR, or 2N for STR
|
||||
fn do_exec_thumb_ldr_str(
|
||||
&mut self,
|
||||
sb: &mut SysBus,
|
||||
insn: u16,
|
||||
|
||||
addr: Addr,
|
||||
|
@ -236,50 +215,38 @@ impl Core {
|
|||
let rd = (insn & 0b111) as usize;
|
||||
if insn.is_load() {
|
||||
let data = if is_transferring_bytes {
|
||||
self.S_cycle8(sb, addr);
|
||||
sb.read_8(addr) as u32
|
||||
self.load_8(addr, NonSeq) as u32
|
||||
} else {
|
||||
self.S_cycle32(sb, addr);
|
||||
self.ldr_word(addr, sb)
|
||||
self.ldr_word(addr, NonSeq)
|
||||
};
|
||||
|
||||
self.gpr[rd] = data;
|
||||
|
||||
// +1I
|
||||
self.add_cycle();
|
||||
self.idle_cycle();
|
||||
CpuAction::AdvancePC(Seq)
|
||||
} else {
|
||||
let value = self.get_reg(rd);
|
||||
if is_transferring_bytes {
|
||||
self.N_cycle8(sb, addr);
|
||||
self.write_8(addr, value as u8, sb);
|
||||
self.store_8(addr, value as u8, NonSeq);
|
||||
} else {
|
||||
self.N_cycle32(sb, addr);
|
||||
self.write_32(addr, value, sb);
|
||||
self.store_aligned_32(addr, value, NonSeq);
|
||||
};
|
||||
CpuAction::AdvancePC(NonSeq)
|
||||
}
|
||||
|
||||
self.N_cycle16(sb, self.pc + 2);
|
||||
|
||||
CpuAction::AdvancePC
|
||||
}
|
||||
|
||||
/// Format 7
|
||||
pub(in super::super) fn exec_thumb_ldr_str_reg_offset(
|
||||
&mut self,
|
||||
bus: &mut SysBus,
|
||||
insn: u16,
|
||||
) -> CpuAction {
|
||||
/// Format 7 load/store with register offset
|
||||
/// Execution Time: 1S+1N+1I for LDR, or 2N for STR
|
||||
pub(in super::super) fn exec_thumb_ldr_str_reg_offset(&mut self, insn: u16) -> CpuAction {
|
||||
let rb = insn.bit_range(3..6) as usize;
|
||||
let addr = self.gpr[rb].wrapping_add(self.gpr[insn.ro()]);
|
||||
self.do_exec_thumb_ldr_str(bus, insn, addr, insn.bit(10))
|
||||
self.do_exec_thumb_ldr_str(insn, addr, insn.bit(10))
|
||||
}
|
||||
|
||||
/// Format 8
|
||||
pub(in super::super) fn exec_thumb_ldr_str_shb(
|
||||
&mut self,
|
||||
sb: &mut SysBus,
|
||||
insn: u16,
|
||||
) -> CpuAction {
|
||||
/// Format 8 load/store sign-extended byte/halfword
|
||||
/// Execution Time: 1S+1N+1I for LDR, or 2N for STR
|
||||
pub(in super::super) fn exec_thumb_ldr_str_shb(&mut self, insn: u16) -> CpuAction {
|
||||
let rb = insn.bit_range(3..6) as usize;
|
||||
let rd = (insn & 0b111) as usize;
|
||||
|
||||
|
@ -291,45 +258,36 @@ impl Core {
|
|||
(false, false) =>
|
||||
/* strh */
|
||||
{
|
||||
self.write_16(addr, self.gpr[rd] as u16, sb);
|
||||
self.N_cycle16(sb, addr);
|
||||
self.store_aligned_16(addr, self.gpr[rd] as u16, NonSeq);
|
||||
}
|
||||
(false, true) =>
|
||||
/* ldrh */
|
||||
{
|
||||
self.gpr[rd] = self.ldr_half(addr, sb);
|
||||
self.S_cycle16(sb, addr);
|
||||
self.add_cycle();
|
||||
self.gpr[rd] = self.ldr_half(addr, NonSeq);
|
||||
self.idle_cycle();
|
||||
}
|
||||
(true, false) =>
|
||||
/* ldsb */
|
||||
/* ldself */
|
||||
{
|
||||
let val = sb.read_8(addr) as i8 as i32 as u32;
|
||||
let val = self.load_8(addr, NonSeq) as i8 as i32 as u32;
|
||||
self.gpr[rd] = val;
|
||||
self.S_cycle8(sb, addr);
|
||||
self.add_cycle();
|
||||
self.idle_cycle();
|
||||
}
|
||||
(true, true) =>
|
||||
/* ldsh */
|
||||
{
|
||||
let val = self.ldr_sign_half(addr, sb);
|
||||
let val = self.ldr_sign_half(addr, NonSeq);
|
||||
self.gpr[rd] = val;
|
||||
self.S_cycle16(sb, addr);
|
||||
self.add_cycle();
|
||||
self.idle_cycle();
|
||||
}
|
||||
}
|
||||
|
||||
self.N_cycle16(sb, self.pc + 2);
|
||||
|
||||
CpuAction::AdvancePC
|
||||
CpuAction::AdvancePC(NonSeq)
|
||||
}
|
||||
|
||||
/// Format 9
|
||||
pub(in super::super) fn exec_thumb_ldr_str_imm_offset(
|
||||
&mut self,
|
||||
sb: &mut SysBus,
|
||||
insn: u16,
|
||||
) -> CpuAction {
|
||||
/// Execution Time: 1S+1N+1I for LDR, or 2N for STR
|
||||
pub(in super::super) fn exec_thumb_ldr_str_imm_offset(&mut self, insn: u16) -> CpuAction {
|
||||
let rb = insn.bit_range(3..6) as usize;
|
||||
|
||||
let offset = if insn.bit(12) {
|
||||
|
@ -338,129 +296,117 @@ impl Core {
|
|||
(insn.offset5() << 3) >> 1
|
||||
};
|
||||
let addr = self.gpr[rb].wrapping_add(offset as u32);
|
||||
self.do_exec_thumb_ldr_str(sb, insn, addr, insn.bit(12))
|
||||
self.do_exec_thumb_ldr_str(insn, addr, insn.bit(12))
|
||||
}
|
||||
|
||||
/// Format 10
|
||||
pub(in super::super) fn exec_thumb_ldr_str_halfword(
|
||||
&mut self,
|
||||
sb: &mut SysBus,
|
||||
insn: u16,
|
||||
) -> CpuAction {
|
||||
/// Execution Time: 1S+1N+1I for LDR, or 2N for STR
|
||||
pub(in super::super) fn exec_thumb_ldr_str_halfword(&mut self, insn: u16) -> CpuAction {
|
||||
let rb = insn.bit_range(3..6) as usize;
|
||||
let rd = (insn & 0b111) as usize;
|
||||
let base = self.gpr[rb] as i32;
|
||||
let addr = base.wrapping_add((insn.offset5() << 1) as i32) as Addr;
|
||||
if insn.is_load() {
|
||||
let data = self.ldr_half(addr, sb);
|
||||
self.S_cycle16(sb, addr);
|
||||
self.add_cycle();
|
||||
let data = self.ldr_half(addr, NonSeq);
|
||||
self.idle_cycle();
|
||||
self.gpr[rd] = data as u32;
|
||||
CpuAction::AdvancePC(Seq)
|
||||
} else {
|
||||
self.write_16(addr, self.gpr[rd] as u16, sb);
|
||||
self.N_cycle16(sb, addr);
|
||||
self.store_aligned_16(addr, self.gpr[rd] as u16, NonSeq);
|
||||
CpuAction::AdvancePC(NonSeq)
|
||||
}
|
||||
self.N_cycle16(sb, self.pc + 2);
|
||||
|
||||
CpuAction::AdvancePC
|
||||
}
|
||||
|
||||
/// Format 11
|
||||
pub(in super::super) fn exec_thumb_ldr_str_sp(
|
||||
&mut self,
|
||||
sb: &mut SysBus,
|
||||
insn: u16,
|
||||
) -> CpuAction {
|
||||
/// Format 11 load/store SP-relative
|
||||
/// Execution Time: 1S+1N+1I for LDR, or 2N for STR
|
||||
pub(in super::super) fn exec_thumb_ldr_str_sp(&mut self, insn: u16) -> CpuAction {
|
||||
let addr = self.gpr[REG_SP] + (insn.word8() as Addr);
|
||||
let rd = insn.bit_range(8..11) as usize;
|
||||
if insn.is_load() {
|
||||
let data = self.ldr_word(addr, sb);
|
||||
self.S_cycle16(sb, addr);
|
||||
self.add_cycle();
|
||||
let data = self.ldr_word(addr, NonSeq);
|
||||
self.idle_cycle();
|
||||
self.gpr[rd] = data;
|
||||
CpuAction::AdvancePC(Seq)
|
||||
} else {
|
||||
self.write_32(addr, self.gpr[rd], sb);
|
||||
self.N_cycle16(sb, addr);
|
||||
self.store_aligned_32(addr, self.gpr[rd], NonSeq);
|
||||
CpuAction::AdvancePC(NonSeq)
|
||||
}
|
||||
self.N_cycle16(sb, self.pc + 2);
|
||||
|
||||
CpuAction::AdvancePC
|
||||
}
|
||||
|
||||
/// Format 12
|
||||
pub(in super::super) fn exec_thumb_load_address(
|
||||
&mut self,
|
||||
sb: &mut SysBus,
|
||||
insn: u16,
|
||||
) -> CpuAction {
|
||||
/// Execution Time: 1S
|
||||
pub(in super::super) fn exec_thumb_load_address(&mut self, insn: u16) -> CpuAction {
|
||||
let rd = insn.bit_range(8..11) as usize;
|
||||
let result = if insn.bit(consts::flags::FLAG_SP) {
|
||||
|
||||
self.gpr[rd] = if insn.bit(consts::flags::FLAG_SP) {
|
||||
self.gpr[REG_SP] + (insn.word8() as Addr)
|
||||
} else {
|
||||
(self.pc_thumb() & !0b10) + 4 + (insn.word8() as Addr)
|
||||
};
|
||||
self.gpr[rd] = result;
|
||||
self.S_cycle16(sb, self.pc + 2);
|
||||
|
||||
CpuAction::AdvancePC
|
||||
CpuAction::AdvancePC(Seq)
|
||||
}
|
||||
|
||||
/// Format 13
|
||||
pub(in super::super) fn exec_thumb_add_sp(&mut self, sb: &mut SysBus, insn: u16) -> CpuAction {
|
||||
/// Execution Time: 1S
|
||||
pub(in super::super) fn exec_thumb_add_sp(&mut self, insn: u16) -> CpuAction {
|
||||
let op1 = self.gpr[REG_SP] as i32;
|
||||
let op2 = insn.sword7();
|
||||
|
||||
self.gpr[REG_SP] = op1.wrapping_add(op2) as u32;
|
||||
self.S_cycle16(sb, self.pc + 2);
|
||||
|
||||
CpuAction::AdvancePC
|
||||
CpuAction::AdvancePC(Seq)
|
||||
}
|
||||
|
||||
/// Format 14
|
||||
pub(in super::super) fn exec_thumb_push_pop(
|
||||
&mut self,
|
||||
sb: &mut SysBus,
|
||||
insn: u16,
|
||||
) -> CpuAction {
|
||||
let mut result = CpuAction::AdvancePC;
|
||||
|
||||
// (From GBATEK) Execution Time: nS+1N+1I (POP), (n+1)S+2N+1I (POP PC), or (n-1)S+2N (PUSH).
|
||||
/// Execution Time: nS+1N+1I (POP), (n+1)S+2N+1I (POP PC), or (n-1)S+2N (PUSH).
|
||||
pub(in super::super) fn exec_thumb_push_pop(&mut self, insn: u16) -> CpuAction {
|
||||
macro_rules! push {
|
||||
($r:expr, $access:ident) => {
|
||||
self.gpr[REG_SP] -= 4;
|
||||
let stack_addr = self.gpr[REG_SP] & !3;
|
||||
self.store_32(stack_addr, self.get_reg($r), $access);
|
||||
$access = Seq;
|
||||
};
|
||||
}
|
||||
macro_rules! pop {
|
||||
($r:expr) => {
|
||||
let val = self.load_32(self.gpr[REG_SP] & !3, Seq);
|
||||
self.set_reg($r, val);
|
||||
self.gpr[REG_SP] += 4;
|
||||
};
|
||||
($r:expr, $access:ident) => {
|
||||
let val = self.load_32(self.gpr[REG_SP] & !3, $access);
|
||||
$access = Seq;
|
||||
self.set_reg($r, val);
|
||||
self.gpr[REG_SP] += 4;
|
||||
};
|
||||
}
|
||||
let mut result = CpuAction::AdvancePC(NonSeq);
|
||||
let is_pop = insn.is_load();
|
||||
let pc_lr_flag = insn.bit(consts::flags::FLAG_R);
|
||||
let rlist = insn.register_list();
|
||||
self.N_cycle16(sb, self.pc);
|
||||
let mut first = true;
|
||||
let mut access = MemoryAccess::NonSeq;
|
||||
if is_pop {
|
||||
for r in 0..8 {
|
||||
if rlist.bit(r) {
|
||||
pop(self, sb, r);
|
||||
if first {
|
||||
self.add_cycle();
|
||||
first = false;
|
||||
} else {
|
||||
self.S_cycle16(sb, self.gpr[REG_SP]);
|
||||
}
|
||||
pop!(r, access);
|
||||
}
|
||||
}
|
||||
if pc_lr_flag {
|
||||
pop(self, sb, REG_PC);
|
||||
pop!(REG_PC);
|
||||
self.pc = self.pc & !1;
|
||||
result = CpuAction::FlushPipeline;
|
||||
self.reload_pipeline16(sb);
|
||||
result = CpuAction::PipelineFlushed;
|
||||
self.reload_pipeline16();
|
||||
}
|
||||
self.S_cycle16(sb, self.pc + 2);
|
||||
// Idle 1 cycle
|
||||
self.idle_cycle();
|
||||
} else {
|
||||
if pc_lr_flag {
|
||||
push(self, sb, REG_LR);
|
||||
push!(REG_LR, access);
|
||||
}
|
||||
for r in (0..8).rev() {
|
||||
if rlist.bit(r) {
|
||||
push(self, sb, r);
|
||||
if first {
|
||||
first = false;
|
||||
} else {
|
||||
self.S_cycle16(sb, self.gpr[REG_SP]);
|
||||
}
|
||||
push!(r, access);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -469,10 +415,9 @@ impl Core {
|
|||
}
|
||||
|
||||
/// Format 15
|
||||
pub(in super::super) fn exec_thumb_ldm_stm(&mut self, sb: &mut SysBus, insn: u16) -> CpuAction {
|
||||
let mut result = CpuAction::AdvancePC;
|
||||
|
||||
// (From GBATEK) Execution Time: nS+1N+1I (POP), (n+1)S+2N+1I (POP PC), or (n-1)S+2N (PUSH).
|
||||
/// Execution Time: nS+1N+1I for LDM, or (n-1)S+2N for STM.
|
||||
pub(in super::super) fn exec_thumb_ldm_stm(&mut self, insn: u16) -> CpuAction {
|
||||
let mut result = CpuAction::AdvancePC(NonSeq);
|
||||
|
||||
let rb = insn.bit_range(8..11) as usize;
|
||||
let base_reg = rb;
|
||||
|
@ -481,31 +426,25 @@ impl Core {
|
|||
let align_preserve = self.gpr[base_reg] & 3;
|
||||
let mut addr = self.gpr[base_reg] & !3;
|
||||
let rlist = insn.register_list();
|
||||
self.N_cycle16(sb, self.pc);
|
||||
let mut first = true;
|
||||
|
||||
// let mut first = true;
|
||||
if rlist != 0 {
|
||||
if is_load {
|
||||
let writeback = !rlist.bit(base_reg);
|
||||
let mut access = NonSeq;
|
||||
for r in 0..8 {
|
||||
if rlist.bit(r) {
|
||||
let val = sb.read_32(addr);
|
||||
if first {
|
||||
first = false;
|
||||
self.add_cycle();
|
||||
} else {
|
||||
self.S_cycle16(sb, addr);
|
||||
}
|
||||
let val = self.load_32(addr, access);
|
||||
access = Seq;
|
||||
addr += 4;
|
||||
self.add_cycle();
|
||||
self.set_reg(r, val);
|
||||
}
|
||||
}
|
||||
self.S_cycle16(sb, self.pc + 2);
|
||||
if writeback {
|
||||
self.idle_cycle();
|
||||
if !rlist.bit(base_reg) {
|
||||
self.gpr[base_reg] = addr + align_preserve;
|
||||
}
|
||||
} else {
|
||||
let mut first = true;
|
||||
let mut access = NonSeq;
|
||||
for r in 0..8 {
|
||||
if rlist.bit(r) {
|
||||
let v = if r != base_reg {
|
||||
|
@ -519,10 +458,9 @@ impl Core {
|
|||
};
|
||||
if first {
|
||||
first = false;
|
||||
} else {
|
||||
self.S_cycle16(sb, addr);
|
||||
}
|
||||
sb.write_32(addr, v);
|
||||
self.store_32(addr, v, access);
|
||||
access = Seq;
|
||||
addr += 4;
|
||||
}
|
||||
self.gpr[base_reg] = addr + align_preserve;
|
||||
|
@ -531,12 +469,12 @@ impl Core {
|
|||
} else {
|
||||
// From gbatek.htm: Empty Rlist: R15 loaded/stored (ARMv4 only), and Rb=Rb+40h (ARMv4-v5).
|
||||
if is_load {
|
||||
let val = sb.read_32(addr);
|
||||
self.set_reg(REG_PC, val & !1);
|
||||
result = CpuAction::FlushPipeline;
|
||||
self.reload_pipeline16(sb);
|
||||
let val = self.load_32(addr, NonSeq);
|
||||
self.pc = val & !1;
|
||||
result = CpuAction::PipelineFlushed;
|
||||
self.reload_pipeline16();
|
||||
} else {
|
||||
sb.write_32(addr, self.pc + 2);
|
||||
self.store_32(addr, self.pc + 2, NonSeq);
|
||||
}
|
||||
addr += 0x40;
|
||||
self.gpr[base_reg] = addr + align_preserve;
|
||||
|
@ -546,64 +484,55 @@ impl Core {
|
|||
}
|
||||
|
||||
/// Format 16
|
||||
pub(in super::super) fn exec_thumb_branch_with_cond(
|
||||
&mut self,
|
||||
sb: &mut SysBus,
|
||||
insn: u16,
|
||||
) -> CpuAction {
|
||||
/// Execution Time:
|
||||
/// 2S+1N if condition true (jump executed)
|
||||
/// 1S if condition false
|
||||
pub(in super::super) fn exec_thumb_branch_with_cond(&mut self, insn: u16) -> CpuAction {
|
||||
if !self.check_arm_cond(insn.cond()) {
|
||||
self.S_cycle16(sb, self.pc + 2);
|
||||
CpuAction::AdvancePC
|
||||
CpuAction::AdvancePC(Seq)
|
||||
} else {
|
||||
let offset = insn.bcond_offset();
|
||||
self.S_cycle16(sb, self.pc);
|
||||
self.pc = (self.pc as i32).wrapping_add(offset) as u32;
|
||||
self.reload_pipeline16(sb);
|
||||
CpuAction::FlushPipeline
|
||||
self.reload_pipeline16();
|
||||
CpuAction::PipelineFlushed
|
||||
}
|
||||
}
|
||||
|
||||
/// Format 17
|
||||
pub(in super::super) fn exec_thumb_swi(&mut self, sb: &mut SysBus, _insn: u16) -> CpuAction {
|
||||
self.N_cycle16(sb, self.pc);
|
||||
self.exception(sb, Exception::SoftwareInterrupt, self.pc - 2);
|
||||
CpuAction::FlushPipeline
|
||||
/// Execution Time: 2S+1N
|
||||
pub(in super::super) fn exec_thumb_swi(&mut self, _insn: u16) -> CpuAction {
|
||||
self.exception(Exception::SoftwareInterrupt, self.pc - 2); // implies pipeline reload
|
||||
CpuAction::PipelineFlushed
|
||||
}
|
||||
|
||||
/// Format 18
|
||||
pub(in super::super) fn exec_thumb_branch(&mut self, sb: &mut SysBus, insn: u16) -> CpuAction {
|
||||
/// Execution Time: 2S+1N
|
||||
pub(in super::super) fn exec_thumb_branch(&mut self, insn: u16) -> CpuAction {
|
||||
let offset = ((insn.offset11() << 21) >> 20) as i32;
|
||||
self.pc = (self.pc as i32).wrapping_add(offset) as u32;
|
||||
self.S_cycle16(sb, self.pc);
|
||||
self.reload_pipeline16(sb);
|
||||
CpuAction::FlushPipeline
|
||||
self.reload_pipeline16(); // 2S + 1N
|
||||
CpuAction::PipelineFlushed
|
||||
}
|
||||
|
||||
/// Format 19
|
||||
pub(in super::super) fn exec_thumb_branch_long_with_link(
|
||||
&mut self,
|
||||
sb: &mut SysBus,
|
||||
insn: u16,
|
||||
) -> CpuAction {
|
||||
/// Execution Time: 3S+1N (first opcode 1S, second opcode 2S+1N).
|
||||
pub(in super::super) fn exec_thumb_branch_long_with_link(&mut self, insn: u16) -> CpuAction {
|
||||
let mut off = insn.offset11();
|
||||
if insn.bit(consts::flags::FLAG_LOW_OFFSET) {
|
||||
self.S_cycle16(sb, self.pc);
|
||||
off = off << 1;
|
||||
let next_pc = (self.pc - 2) | 1;
|
||||
self.pc = ((self.gpr[REG_LR] & !1) as i32).wrapping_add(off) as u32;
|
||||
self.gpr[REG_LR] = next_pc;
|
||||
self.reload_pipeline16(sb);
|
||||
CpuAction::FlushPipeline
|
||||
self.reload_pipeline16(); // implies 2S + 1N
|
||||
CpuAction::PipelineFlushed
|
||||
} else {
|
||||
off = (off << 21) >> 9;
|
||||
self.gpr[REG_LR] = (self.pc as i32).wrapping_add(off) as u32;
|
||||
self.S_cycle16(sb, self.pc);
|
||||
|
||||
CpuAction::AdvancePC
|
||||
CpuAction::AdvancePC(Seq) // 1S
|
||||
}
|
||||
}
|
||||
|
||||
pub fn thumb_undefined(&mut self, _: &mut SysBus, insn: u16) -> CpuAction {
|
||||
pub fn thumb_undefined(&mut self, insn: u16) -> CpuAction {
|
||||
panic!(
|
||||
"executing undefind thumb instruction {:04x} at @{:08x}",
|
||||
insn,
|
||||
|
@ -612,28 +541,28 @@ impl Core {
|
|||
}
|
||||
|
||||
#[cfg(not(feature = "arm7tdmi_dispatch_table"))]
|
||||
pub fn exec_thumb(&mut self, bus: &mut SysBus, insn: u16, fmt: ThumbFormat) -> CpuAction {
|
||||
pub fn exec_thumb(&mut self, insn: u16, fmt: ThumbFormat) -> CpuAction {
|
||||
match fmt {
|
||||
ThumbFormat::MoveShiftedReg => self.exec_thumb_move_shifted_reg(bus, insn),
|
||||
ThumbFormat::AddSub => self.exec_thumb_add_sub(bus, insn),
|
||||
ThumbFormat::DataProcessImm => self.exec_thumb_data_process_imm(bus, insn),
|
||||
ThumbFormat::AluOps => self.exec_thumb_alu_ops(bus, insn),
|
||||
ThumbFormat::HiRegOpOrBranchExchange => self.exec_thumb_hi_reg_op_or_bx(bus, insn),
|
||||
ThumbFormat::LdrPc => self.exec_thumb_ldr_pc(bus, insn),
|
||||
ThumbFormat::LdrStrRegOffset => self.exec_thumb_ldr_str_reg_offset(bus, insn),
|
||||
ThumbFormat::LdrStrSHB => self.exec_thumb_ldr_str_shb(bus, insn),
|
||||
ThumbFormat::LdrStrImmOffset => self.exec_thumb_ldr_str_imm_offset(bus, insn),
|
||||
ThumbFormat::LdrStrHalfWord => self.exec_thumb_ldr_str_halfword(bus, insn),
|
||||
ThumbFormat::LdrStrSp => self.exec_thumb_ldr_str_sp(bus, insn),
|
||||
ThumbFormat::LoadAddress => self.exec_thumb_load_address(bus, insn),
|
||||
ThumbFormat::AddSp => self.exec_thumb_add_sp(bus, insn),
|
||||
ThumbFormat::PushPop => self.exec_thumb_push_pop(bus, insn),
|
||||
ThumbFormat::LdmStm => self.exec_thumb_ldm_stm(bus, insn),
|
||||
ThumbFormat::BranchConditional => self.exec_thumb_branch_with_cond(bus, insn),
|
||||
ThumbFormat::Swi => self.exec_thumb_swi(bus, insn),
|
||||
ThumbFormat::Branch => self.exec_thumb_branch(bus, insn),
|
||||
ThumbFormat::BranchLongWithLink => self.exec_thumb_branch_long_with_link(bus, insn),
|
||||
ThumbFormat::Undefined => self.thumb_undefined(bus, insn),
|
||||
ThumbFormat::MoveShiftedReg => self.exec_thumb_move_shifted_reg(insn),
|
||||
ThumbFormat::AddSub => self.exec_thumb_add_sub(insn),
|
||||
ThumbFormat::DataProcessImm => self.exec_thumb_data_process_imm(insn),
|
||||
ThumbFormat::AluOps => self.exec_thumb_alu_ops(insn),
|
||||
ThumbFormat::HiRegOpOrBranchExchange => self.exec_thumb_hi_reg_op_or_bx(insn),
|
||||
ThumbFormat::LdrPc => self.exec_thumb_ldr_pc(insn),
|
||||
ThumbFormat::LdrStrRegOffset => self.exec_thumb_ldr_str_reg_offset(insn),
|
||||
ThumbFormat::LdrStrSHB => self.exec_thumb_ldr_str_shb(insn),
|
||||
ThumbFormat::LdrStrImmOffset => self.exec_thumb_ldr_str_imm_offset(insn),
|
||||
ThumbFormat::LdrStrHalfWord => self.exec_thumb_ldr_str_halfword(insn),
|
||||
ThumbFormat::LdrStrSp => self.exec_thumb_ldr_str_sp(insn),
|
||||
ThumbFormat::LoadAddress => self.exec_thumb_load_address(insn),
|
||||
ThumbFormat::AddSp => self.exec_thumb_add_sp(insn),
|
||||
ThumbFormat::PushPop => self.exec_thumb_push_pop(insn),
|
||||
ThumbFormat::LdmStm => self.exec_thumb_ldm_stm(insn),
|
||||
ThumbFormat::BranchConditional => self.exec_thumb_branch_with_cond(insn),
|
||||
ThumbFormat::Swi => self.exec_thumb_swi(insn),
|
||||
ThumbFormat::Branch => self.exec_thumb_branch(insn),
|
||||
ThumbFormat::BranchLongWithLink => self.exec_thumb_branch_long_with_link(insn),
|
||||
ThumbFormat::Undefined => self.thumb_undefined(insn),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -105,17 +105,17 @@ impl Debugger {
|
|||
}
|
||||
|
||||
println!("{}", self.gba.cpu);
|
||||
println!("IME={}", self.gba.sysbus.io.intc.interrupt_master_enable);
|
||||
println!("IE={:#?}", self.gba.sysbus.io.intc.interrupt_enable);
|
||||
println!("IF={:#?}", self.gba.sysbus.io.intc.interrupt_flags);
|
||||
println!("IME={}", self.gba.io_devs.intc.interrupt_master_enable);
|
||||
println!("IE={:#?}", self.gba.io_devs.intc.interrupt_enable);
|
||||
println!("IF={:#?}", self.gba.io_devs.intc.interrupt_flags);
|
||||
}
|
||||
GpuInfo => println!("GPU: {:#?}", self.gba.sysbus.io.gpu),
|
||||
GpuInfo => println!("GPU: {:#?}", self.gba.io_devs.gpu),
|
||||
GpioInfo => println!("GPIO: {:#?}", self.gba.sysbus.cartridge.get_gpio()),
|
||||
Step(count) => {
|
||||
for _ in 0..count {
|
||||
self.gba.cpu.step(&mut self.gba.sysbus);
|
||||
self.gba.cpu.step();
|
||||
while self.gba.cpu.last_executed.is_none() {
|
||||
self.gba.cpu.step(&mut self.gba.sysbus);
|
||||
self.gba.cpu.step();
|
||||
}
|
||||
if let Some(last_executed) = &self.gba.cpu.last_executed {
|
||||
let pc = last_executed.get_pc();
|
||||
|
@ -143,6 +143,7 @@ impl Debugger {
|
|||
);
|
||||
}
|
||||
}
|
||||
println!("cycles: {}", self.gba.scheduler.timestamp());
|
||||
println!("{}\n", self.gba.cpu);
|
||||
}
|
||||
Continue => 'running: loop {
|
||||
|
@ -218,7 +219,7 @@ impl Debugger {
|
|||
// TileView(bg) => create_tile_view(bg, &self.gba),
|
||||
Reset => {
|
||||
println!("resetting cpu...");
|
||||
self.gba.cpu.reset(&mut self.gba.sysbus);
|
||||
self.gba.cpu.reset();
|
||||
println!("cpu is restarted!")
|
||||
}
|
||||
TraceToggle(flags) => {
|
||||
|
|
261
core/src/gba.rs
261
core/src/gba.rs
|
@ -11,7 +11,7 @@ use super::dma::DmaController;
|
|||
use super::gpu::*;
|
||||
use super::interrupt::*;
|
||||
use super::iodev::*;
|
||||
use super::sched::{EventHandler, EventType, Scheduler, SharedScheduler};
|
||||
use super::sched::{EventType, Scheduler, SharedScheduler};
|
||||
use super::sound::SoundController;
|
||||
use super::sysbus::SysBus;
|
||||
use super::timer::Timers;
|
||||
|
@ -22,21 +22,15 @@ use super::VideoInterface;
|
|||
use super::{AudioInterface, InputInterface};
|
||||
|
||||
pub struct GameBoyAdvance {
|
||||
pub cpu: arm7tdmi::Core,
|
||||
pub sysbus: Box<SysBus>,
|
||||
io_devs: Shared<IoDevices>,
|
||||
|
||||
pub cpu: Box<arm7tdmi::Core<SysBus>>,
|
||||
pub sysbus: Shared<SysBus>,
|
||||
pub io_devs: Shared<IoDevices>,
|
||||
pub scheduler: SharedScheduler,
|
||||
interrupt_flags: SharedInterruptFlags,
|
||||
#[cfg(not(feature = "no_video_interface"))]
|
||||
pub video_device: Rc<RefCell<dyn VideoInterface>>,
|
||||
pub audio_device: Rc<RefCell<dyn AudioInterface>>,
|
||||
pub input_device: Rc<RefCell<dyn InputInterface>>,
|
||||
|
||||
pub cycles_to_next_event: usize,
|
||||
|
||||
scheduler: SharedScheduler,
|
||||
|
||||
overshoot_cycles: usize,
|
||||
interrupt_flags: SharedInterruptFlags,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize)]
|
||||
|
@ -47,7 +41,13 @@ struct SaveState {
|
|||
ewram: Box<[u8]>,
|
||||
iwram: Box<[u8]>,
|
||||
interrupt_flags: u16,
|
||||
cpu: arm7tdmi::Core,
|
||||
cpu_state: arm7tdmi::SavedCpuState,
|
||||
}
|
||||
|
||||
#[derive(Debug, PartialEq)]
|
||||
enum BusMaster {
|
||||
Dma,
|
||||
Cpu,
|
||||
}
|
||||
|
||||
/// Checks if the bios provided is the real one
|
||||
|
@ -83,20 +83,20 @@ impl GameBoyAdvance {
|
|||
let intc = InterruptController::new(interrupt_flags.clone());
|
||||
let gpu = Box::new(Gpu::new(scheduler.clone(), interrupt_flags.clone()));
|
||||
let dmac = DmaController::new(interrupt_flags.clone(), scheduler.clone());
|
||||
let timers = Timers::new(interrupt_flags.clone());
|
||||
let timers = Timers::new(interrupt_flags.clone(), scheduler.clone());
|
||||
let sound_controller = Box::new(SoundController::new(
|
||||
scheduler.clone(),
|
||||
audio_device.borrow().get_sample_rate() as f32,
|
||||
));
|
||||
let io_devs = Shared::new(IoDevices::new(intc, gpu, dmac, timers, sound_controller));
|
||||
let sysbus = Box::new(SysBus::new(
|
||||
let sysbus = Shared::new(SysBus::new(
|
||||
scheduler.clone(),
|
||||
io_devs.clone(),
|
||||
bios_rom,
|
||||
gamepak,
|
||||
));
|
||||
|
||||
let cpu = arm7tdmi::Core::new();
|
||||
let cpu = Box::new(arm7tdmi::Core::new(sysbus.clone()));
|
||||
|
||||
let mut gba = GameBoyAdvance {
|
||||
cpu,
|
||||
|
@ -110,8 +110,6 @@ impl GameBoyAdvance {
|
|||
|
||||
scheduler: scheduler,
|
||||
|
||||
cycles_to_next_event: 1,
|
||||
overshoot_cycles: 0,
|
||||
interrupt_flags: interrupt_flags,
|
||||
};
|
||||
|
||||
|
@ -130,7 +128,6 @@ impl GameBoyAdvance {
|
|||
) -> bincode::Result<GameBoyAdvance> {
|
||||
let decoded: Box<SaveState> = bincode::deserialize_from(savestate)?;
|
||||
|
||||
let arm7tdmi = decoded.cpu;
|
||||
let interrupts = Rc::new(Cell::new(IrqBitmask(decoded.interrupt_flags)));
|
||||
let scheduler = decoded.scheduler.make_shared();
|
||||
let mut io_devs = Shared::new(decoded.io_devs);
|
||||
|
@ -139,8 +136,7 @@ impl GameBoyAdvance {
|
|||
io_devs.connect_irq(interrupts.clone());
|
||||
io_devs.gpu.set_scheduler(scheduler.clone());
|
||||
io_devs.sound.set_scheduler(scheduler.clone());
|
||||
|
||||
let sysbus = Box::new(SysBus::new_with_memories(
|
||||
let sysbus = Shared::new(SysBus::new_with_memories(
|
||||
scheduler.clone(),
|
||||
io_devs.clone(),
|
||||
cartridge,
|
||||
|
@ -148,6 +144,10 @@ impl GameBoyAdvance {
|
|||
decoded.ewram,
|
||||
decoded.iwram,
|
||||
));
|
||||
let arm7tdmi = Box::new(arm7tdmi::Core::from_saved_state(
|
||||
sysbus.clone(),
|
||||
decoded.cpu_state,
|
||||
));
|
||||
|
||||
Ok(GameBoyAdvance {
|
||||
cpu: arm7tdmi,
|
||||
|
@ -161,17 +161,13 @@ impl GameBoyAdvance {
|
|||
audio_device: audio_device,
|
||||
input_device: input_device,
|
||||
|
||||
cycles_to_next_event: 1,
|
||||
|
||||
overshoot_cycles: 0,
|
||||
|
||||
scheduler,
|
||||
})
|
||||
}
|
||||
|
||||
pub fn save_state(&self) -> bincode::Result<Vec<u8>> {
|
||||
let s = SaveState {
|
||||
cpu: self.cpu.clone(),
|
||||
cpu_state: self.cpu.save_state(),
|
||||
io_devs: self.io_devs.clone_inner(),
|
||||
cartridge: self.sysbus.cartridge.thin_copy(),
|
||||
iwram: Box::from(self.sysbus.get_iwram()),
|
||||
|
@ -186,11 +182,12 @@ impl GameBoyAdvance {
|
|||
pub fn restore_state(&mut self, bytes: &[u8], bios: Box<[u8]>) -> bincode::Result<()> {
|
||||
let decoded: Box<SaveState> = bincode::deserialize_from(bytes)?;
|
||||
|
||||
self.cpu = decoded.cpu;
|
||||
self.cpu.restore_state(decoded.cpu_state);
|
||||
self.scheduler = Scheduler::make_shared(decoded.scheduler);
|
||||
self.interrupt_flags = Rc::new(Cell::new(IrqBitmask(decoded.interrupt_flags)));
|
||||
self.io_devs = Shared::new(decoded.io_devs);
|
||||
// Restore memory state
|
||||
self.cpu.set_memory_interface(self.sysbus.clone());
|
||||
self.sysbus.set_bios(bios);
|
||||
self.sysbus.set_iwram(decoded.iwram);
|
||||
self.sysbus.set_ewram(decoded.ewram);
|
||||
|
@ -202,7 +199,6 @@ impl GameBoyAdvance {
|
|||
self.sysbus.set_io_devices(self.io_devs.clone());
|
||||
self.sysbus.cartridge.update_from(decoded.cartridge);
|
||||
self.sysbus.created();
|
||||
self.cycles_to_next_event = 1;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
@ -222,24 +218,113 @@ impl GameBoyAdvance {
|
|||
|
||||
pub fn frame(&mut self) {
|
||||
self.key_poll();
|
||||
static mut OVERSHOOT: usize = 0;
|
||||
unsafe {
|
||||
OVERSHOOT = self.run(CYCLES_FULL_REFRESH - OVERSHOOT);
|
||||
}
|
||||
}
|
||||
|
||||
let mut scheduler = self.scheduler.clone();
|
||||
#[inline]
|
||||
fn dma_step(&mut self) {
|
||||
self.io_devs.dmac.perform_work(&mut self.sysbus);
|
||||
}
|
||||
|
||||
let mut remaining_cycles = CYCLES_FULL_REFRESH - self.overshoot_cycles;
|
||||
#[inline]
|
||||
pub fn cpu_step(&mut self) {
|
||||
if self.io_devs.intc.irq_pending() {
|
||||
self.cpu.irq();
|
||||
self.io_devs.haltcnt = HaltState::Running;
|
||||
}
|
||||
self.cpu.step();
|
||||
}
|
||||
|
||||
while remaining_cycles > 0 {
|
||||
let cycles = self.step(&mut scheduler);
|
||||
if remaining_cycles >= cycles {
|
||||
remaining_cycles -= cycles;
|
||||
} else {
|
||||
self.overshoot_cycles = cycles - remaining_cycles;
|
||||
return;
|
||||
#[inline]
|
||||
fn get_bus_master(&mut self) -> Option<BusMaster> {
|
||||
match (self.io_devs.dmac.is_active(), self.io_devs.haltcnt) {
|
||||
(true, _) => Some(BusMaster::Dma),
|
||||
(false, HaltState::Running) => Some(BusMaster::Cpu),
|
||||
(false, _) => None,
|
||||
}
|
||||
}
|
||||
|
||||
/// Runs the emulation for a given amount of cycles
|
||||
/// @return number of extra cycle ran in this iteration
|
||||
#[inline]
|
||||
fn run(&mut self, cycles_to_run: usize) -> usize {
|
||||
let run_start_time = self.scheduler.timestamp();
|
||||
|
||||
// Register an event to mark the end of this run
|
||||
self.scheduler
|
||||
.push(EventType::RunLimitReached, cycles_to_run);
|
||||
|
||||
let mut running = true;
|
||||
while running {
|
||||
// The tricky part is to avoid unnecessary calls for Scheduler::process_pending,
|
||||
// performance-wise it would be best to run as many cycles as fast as possible while we know there are no pending events.
|
||||
// Fast forward emulation until an event occurs
|
||||
while self.scheduler.timestamp() <= self.scheduler.timestamp_of_next_event() {
|
||||
// 3 Options:
|
||||
// 1. DMA is active - thus CPU is blocked
|
||||
// 2. DMA inactive and halt state is RUN - CPU can run
|
||||
// 3. DMA inactive and halt state is HALT - CPU is blocked
|
||||
match self.get_bus_master() {
|
||||
Some(BusMaster::Dma) => self.dma_step(),
|
||||
Some(BusMaster::Cpu) => self.cpu_step(),
|
||||
None => {
|
||||
if self.io_devs.intc.irq_pending() {
|
||||
self.io_devs.haltcnt = HaltState::Running;
|
||||
} else {
|
||||
self.scheduler.fast_forward_to_next();
|
||||
let (event, cycles_late) = self
|
||||
.scheduler
|
||||
.pop_pending_event()
|
||||
.unwrap_or_else(|| unreachable!());
|
||||
self.handle_event(event, cycles_late, &mut running);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
while let Some((event, cycles_late)) = self.scheduler.pop_pending_event() {
|
||||
self.handle_event(event, cycles_late, &mut running);
|
||||
}
|
||||
}
|
||||
|
||||
self.overshoot_cycles = 0;
|
||||
let total_cycles_ran = self.scheduler.timestamp() - run_start_time;
|
||||
total_cycles_ran - cycles_to_run
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn handle_event(&mut self, event: EventType, cycles_late: usize, running: &mut bool) {
|
||||
let io = &mut (*self.io_devs);
|
||||
match event {
|
||||
EventType::RunLimitReached => {
|
||||
*running = false;
|
||||
}
|
||||
EventType::DmaActivateChannel(channel_id) => io.dmac.activate_channel(channel_id),
|
||||
EventType::TimerOverflow(channel_id) => {
|
||||
let timers = &mut io.timers;
|
||||
let dmac = &mut io.dmac;
|
||||
let apu = &mut io.sound;
|
||||
timers.handle_overflow_event(channel_id, cycles_late, apu, dmac);
|
||||
}
|
||||
EventType::Gpu(event) => io.gpu.on_event(
|
||||
event,
|
||||
cycles_late,
|
||||
&mut *self.sysbus,
|
||||
#[cfg(not(feature = "no_video_interface"))]
|
||||
&self.video_device,
|
||||
),
|
||||
EventType::Apu(event) => io.sound.on_event(event, cycles_late, &self.audio_device),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn skip_bios(&mut self) {
|
||||
self.cpu.skip_bios();
|
||||
self.sysbus.io.gpu.skip_bios();
|
||||
}
|
||||
|
||||
#[cfg(feature = "debugger")]
|
||||
pub fn add_breakpoint(&mut self, addr: u32) -> Option<usize> {
|
||||
if !self.cpu.breakpoints.contains(&addr) {
|
||||
let new_index = self.cpu.breakpoints.len();
|
||||
|
@ -250,6 +335,7 @@ impl GameBoyAdvance {
|
|||
}
|
||||
}
|
||||
|
||||
#[cfg(feature = "debugger")]
|
||||
pub fn check_breakpoint(&self) -> Option<u32> {
|
||||
let next_pc = self.cpu.get_next_pc();
|
||||
for bp in &self.cpu.breakpoints {
|
||||
|
@ -261,82 +347,23 @@ impl GameBoyAdvance {
|
|||
None
|
||||
}
|
||||
|
||||
pub fn skip_bios(&mut self) {
|
||||
self.cpu.skip_bios();
|
||||
self.sysbus.io.gpu.skip_bios();
|
||||
}
|
||||
|
||||
pub fn step_cpu(&mut self, io: &mut IoDevices) -> usize {
|
||||
if io.intc.irq_pending() {
|
||||
self.cpu.irq(&mut self.sysbus);
|
||||
io.haltcnt = HaltState::Running;
|
||||
}
|
||||
let previous_cycles = self.cpu.cycles;
|
||||
self.cpu.step(&mut self.sysbus);
|
||||
self.cpu.cycles - previous_cycles
|
||||
}
|
||||
|
||||
pub fn step(&mut self, scheduler: &mut Scheduler) -> usize {
|
||||
// I hate myself for doing this, but rust left me no choice.
|
||||
let io = unsafe {
|
||||
let ptr = &mut *self.sysbus as *mut SysBus;
|
||||
&mut (*ptr).io as &mut IoDevices
|
||||
};
|
||||
|
||||
let available_cycles = self.scheduler.get_cycles_to_next_event();
|
||||
let mut cycles_left = available_cycles;
|
||||
let mut cycles = 0;
|
||||
|
||||
while cycles_left > 0 {
|
||||
let _cycles = if !io.dmac.is_active() {
|
||||
if HaltState::Running == io.haltcnt {
|
||||
self.step_cpu(io)
|
||||
} else {
|
||||
cycles = cycles_left;
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
io.dmac.perform_work(&mut self.sysbus);
|
||||
return cycles;
|
||||
};
|
||||
|
||||
cycles += _cycles;
|
||||
if cycles_left < _cycles {
|
||||
break;
|
||||
}
|
||||
cycles_left -= _cycles;
|
||||
}
|
||||
|
||||
io.timers.update(cycles, &mut self.sysbus);
|
||||
|
||||
scheduler.run(cycles, self);
|
||||
|
||||
cycles
|
||||
}
|
||||
|
||||
#[cfg(feature = "debugger")]
|
||||
/// 'step' function that checks for breakpoints
|
||||
/// TODO avoid code duplication
|
||||
pub fn step_debugger(&mut self) -> Option<u32> {
|
||||
// I hate myself for doing this, but rust left me no choice.
|
||||
let io = unsafe {
|
||||
let ptr = &mut *self.sysbus as *mut SysBus;
|
||||
&mut (*ptr).io as &mut IoDevices
|
||||
};
|
||||
|
||||
// clear any pending DMAs
|
||||
while io.dmac.is_active() {
|
||||
io.dmac.perform_work(&mut self.sysbus);
|
||||
}
|
||||
self.dma_step();
|
||||
|
||||
// Run the CPU
|
||||
let _cycles = self.scheduler.measure_cycles(|| {
|
||||
self.cpu_step();
|
||||
});
|
||||
|
||||
let cycles = self.step_cpu(io);
|
||||
let breakpoint = self.check_breakpoint();
|
||||
|
||||
io.timers.update(cycles, &mut self.sysbus);
|
||||
|
||||
// update gpu & sound
|
||||
let mut scheduler = self.scheduler.clone();
|
||||
scheduler.run(cycles, self);
|
||||
while let Some((event, cycles_late)) = self.scheduler.pop_pending_event() {
|
||||
self.handle_event(event, cycles_late, &mut running);
|
||||
}
|
||||
|
||||
breakpoint
|
||||
}
|
||||
|
@ -349,27 +376,7 @@ impl GameBoyAdvance {
|
|||
|
||||
/// Reset the emulator
|
||||
pub fn soft_reset(&mut self) {
|
||||
self.cpu.reset(&mut self.sysbus);
|
||||
}
|
||||
}
|
||||
|
||||
impl EventHandler for GameBoyAdvance {
|
||||
fn handle_event(&mut self, event: EventType, extra_cycles: usize) {
|
||||
let io = unsafe {
|
||||
let ptr = &mut *self.sysbus as *mut SysBus;
|
||||
&mut (*ptr).io as &mut IoDevices
|
||||
};
|
||||
match event {
|
||||
EventType::DmaActivateChannel(channel_id) => io.dmac.activate_channel(channel_id),
|
||||
EventType::Gpu(event) => io.gpu.on_event(
|
||||
event,
|
||||
extra_cycles,
|
||||
self.sysbus.as_mut(),
|
||||
#[cfg(not(feature = "no_video_interface"))]
|
||||
&self.video_device,
|
||||
),
|
||||
EventType::Apu(event) => io.sound.on_event(event, extra_cycles, &self.audio_device),
|
||||
}
|
||||
self.cpu.reset();
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -231,7 +231,7 @@ impl InterruptConnect for Gpu {
|
|||
|
||||
impl Gpu {
|
||||
pub fn new(mut scheduler: SharedScheduler, interrupt_flags: SharedInterruptFlags) -> Gpu {
|
||||
scheduler.add_gpu_event(GpuEvent::HDraw, CYCLES_HDRAW);
|
||||
scheduler.push_gpu_event(GpuEvent::HDraw, CYCLES_HDRAW);
|
||||
Gpu {
|
||||
interrupt_flags,
|
||||
scheduler,
|
||||
|
@ -654,7 +654,7 @@ impl Gpu {
|
|||
GpuEvent::VBlankHBlank => self.handle_vblank_hblank(),
|
||||
};
|
||||
self.scheduler
|
||||
.schedule(EventType::Gpu(next_event), cycles - extra_cycles);
|
||||
.push(EventType::Gpu(next_event), cycles - extra_cycles);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -1,3 +1,7 @@
|
|||
use std::cell::Cell;
|
||||
use std::cmp::Ordering;
|
||||
use std::collections::BinaryHeap;
|
||||
|
||||
use super::util::Shared;
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
@ -5,7 +9,7 @@ use serde::{Deserialize, Serialize};
|
|||
const NUM_EVENTS: usize = 32;
|
||||
|
||||
#[repr(u32)]
|
||||
#[derive(Serialize, Deserialize, Debug, PartialEq, Copy, Clone)]
|
||||
#[derive(Serialize, Deserialize, Debug, PartialOrd, PartialEq, Eq, Copy, Clone)]
|
||||
pub enum GpuEvent {
|
||||
HDraw,
|
||||
HBlank,
|
||||
|
@ -14,7 +18,7 @@ pub enum GpuEvent {
|
|||
}
|
||||
|
||||
#[repr(u32)]
|
||||
#[derive(Serialize, Deserialize, Debug, PartialEq, Copy, Clone)]
|
||||
#[derive(Serialize, Deserialize, Debug, PartialOrd, PartialEq, Eq, Copy, Clone)]
|
||||
pub enum ApuEvent {
|
||||
Psg1Generate,
|
||||
Psg2Generate,
|
||||
|
@ -24,48 +28,98 @@ pub enum ApuEvent {
|
|||
}
|
||||
|
||||
#[repr(u32)]
|
||||
#[derive(Serialize, Deserialize, Debug, PartialEq, Copy, Clone)]
|
||||
#[derive(Serialize, Deserialize, Debug, PartialOrd, PartialEq, Eq, Copy, Clone)]
|
||||
pub enum EventType {
|
||||
RunLimitReached,
|
||||
Gpu(GpuEvent),
|
||||
Apu(ApuEvent),
|
||||
DmaActivateChannel(usize),
|
||||
TimerOverflow(usize),
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone)]
|
||||
struct Event {
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, Eq)]
|
||||
pub struct Event {
|
||||
typ: EventType,
|
||||
/// Timestamp in cycles
|
||||
time: usize,
|
||||
cancel: Cell<bool>,
|
||||
}
|
||||
|
||||
impl Event {
|
||||
fn new(typ: EventType, time: usize) -> Event {
|
||||
Event { typ, time }
|
||||
Event {
|
||||
typ,
|
||||
time,
|
||||
cancel: Cell::new(false),
|
||||
}
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn get_type(&self) -> EventType {
|
||||
self.typ
|
||||
}
|
||||
|
||||
fn is_canceled(&self) -> bool {
|
||||
self.cancel.get()
|
||||
}
|
||||
}
|
||||
|
||||
impl Ord for Event {
|
||||
fn cmp(&self, other: &Self) -> Ordering {
|
||||
self.time.cmp(&other.time).reverse()
|
||||
}
|
||||
}
|
||||
|
||||
/// Implement custom reverse ordering
|
||||
impl PartialOrd for Event {
|
||||
#[inline]
|
||||
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
|
||||
other.time.partial_cmp(&self.time)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn lt(&self, other: &Self) -> bool {
|
||||
other.time < self.time
|
||||
}
|
||||
#[inline]
|
||||
fn le(&self, other: &Self) -> bool {
|
||||
other.time <= self.time
|
||||
}
|
||||
#[inline]
|
||||
fn gt(&self, other: &Self) -> bool {
|
||||
other.time > self.time
|
||||
}
|
||||
#[inline]
|
||||
fn ge(&self, other: &Self) -> bool {
|
||||
other.time >= self.time
|
||||
}
|
||||
}
|
||||
|
||||
impl PartialEq for Event {
|
||||
fn eq(&self, other: &Self) -> bool {
|
||||
self.time == other.time
|
||||
}
|
||||
}
|
||||
|
||||
/// Event scheduelr for cycle aware components
|
||||
/// The scheduler should be "shared" to all event generating components.
|
||||
/// Each event generator software component can call Scheduler::schedule to generate an event later in the emulation.
|
||||
/// The scheduler should be updated for each increment in CPU cycles,
|
||||
///
|
||||
/// The main emulation loop can then call Scheduler::process_pending to handle the events.
|
||||
#[derive(Serialize, Deserialize, Debug, Clone)]
|
||||
pub struct Scheduler {
|
||||
timestamp: usize,
|
||||
events: Vec<Event>,
|
||||
events: BinaryHeap<Event>,
|
||||
}
|
||||
|
||||
pub type SharedScheduler = Shared<Scheduler>;
|
||||
|
||||
pub trait EventHandler {
|
||||
/// Handle the scheduler event
|
||||
fn handle_event(&mut self, e: EventType, extra_cycles: usize);
|
||||
}
|
||||
|
||||
impl Scheduler {
|
||||
pub fn new_shared() -> SharedScheduler {
|
||||
let sched = Scheduler {
|
||||
timestamp: 0,
|
||||
events: Vec::with_capacity(NUM_EVENTS),
|
||||
events: BinaryHeap::with_capacity(NUM_EVENTS),
|
||||
};
|
||||
SharedScheduler::new(sched)
|
||||
}
|
||||
|
@ -74,46 +128,88 @@ impl Scheduler {
|
|||
SharedScheduler::new(self)
|
||||
}
|
||||
|
||||
pub fn schedule(&mut self, typ: EventType, cycles: usize) {
|
||||
/// Schedule an event to be executed in `cycles` cycles from now
|
||||
pub fn push(&mut self, typ: EventType, cycles: usize) {
|
||||
let event = Event::new(typ, self.timestamp + cycles);
|
||||
let idx = self
|
||||
.events
|
||||
.binary_search_by(|e| e.time.cmp(&event.time))
|
||||
.unwrap_or_else(|x| x);
|
||||
self.events.insert(idx, event);
|
||||
self.events.push(event);
|
||||
}
|
||||
|
||||
pub fn add_gpu_event(&mut self, e: GpuEvent, cycles: usize) {
|
||||
self.schedule(EventType::Gpu(e), cycles);
|
||||
/// Cancel all events with type `typ`
|
||||
/// This method is rather expansive to call
|
||||
pub fn cancel(&mut self, typ: EventType) {
|
||||
self.events
|
||||
.iter()
|
||||
.filter(|e| e.typ == typ)
|
||||
.for_each(|e| e.cancel.set(true));
|
||||
}
|
||||
|
||||
pub fn add_apu_event(&mut self, e: ApuEvent, cycles: usize) {
|
||||
self.schedule(EventType::Apu(e), cycles);
|
||||
pub fn push_gpu_event(&mut self, e: GpuEvent, cycles: usize) {
|
||||
self.push(EventType::Gpu(e), cycles);
|
||||
}
|
||||
|
||||
pub fn run<H: EventHandler>(&mut self, cycles: usize, handler: &mut H) {
|
||||
let run_to = self.timestamp + cycles;
|
||||
self.timestamp = run_to;
|
||||
pub fn push_apu_event(&mut self, e: ApuEvent, cycles: usize) {
|
||||
self.push(EventType::Apu(e), cycles);
|
||||
}
|
||||
|
||||
while self.events.len() > 0 {
|
||||
if run_to >= self.events[0].time {
|
||||
let event = self.events.remove(0);
|
||||
handler.handle_event(event.get_type(), run_to - event.time);
|
||||
/// Updates the scheduler timestamp
|
||||
#[inline]
|
||||
pub fn update(&mut self, cycles: usize) {
|
||||
self.timestamp += cycles;
|
||||
}
|
||||
|
||||
pub fn pop_pending_event(&mut self) -> Option<(EventType, usize)> {
|
||||
if let Some(event) = self.events.peek() {
|
||||
if self.timestamp >= event.time {
|
||||
// remove the event
|
||||
let event = self.events.pop().unwrap_or_else(|| unreachable!());
|
||||
if !event.is_canceled() {
|
||||
Some((event.get_type(), self.timestamp - event.time))
|
||||
} else {
|
||||
None
|
||||
}
|
||||
} else {
|
||||
return;
|
||||
None
|
||||
}
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn fast_forward_to_next(&mut self) {
|
||||
self.timestamp += self.get_cycles_to_next_event();
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn get_cycles_to_next_event(&self) -> usize {
|
||||
assert_ne!(self.events.len(), 0);
|
||||
self.events[0].time - self.timestamp
|
||||
if let Some(event) = self.events.peek() {
|
||||
event.time - self.timestamp
|
||||
} else {
|
||||
0
|
||||
}
|
||||
}
|
||||
|
||||
#[inline]
|
||||
/// The event queue is assumed to be not empty
|
||||
pub fn timestamp_of_next_event(&self) -> usize {
|
||||
self.events.peek().unwrap_or_else(|| unreachable!()).time
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn timestamp(&self) -> usize {
|
||||
self.timestamp
|
||||
}
|
||||
|
||||
#[allow(unused)]
|
||||
fn is_empty(&self) -> bool {
|
||||
self.events.is_empty()
|
||||
}
|
||||
|
||||
pub fn measure_cycles<F: FnMut()>(&mut self, mut f: F) -> usize {
|
||||
let start = self.timestamp;
|
||||
f();
|
||||
self.timestamp - start
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
|
@ -158,15 +254,36 @@ mod test {
|
|||
fn is_event_done(&self, e: EventType) -> bool {
|
||||
(self.event_bitmask & get_event_bit(e)) != 0
|
||||
}
|
||||
}
|
||||
|
||||
impl EventHandler for Holder {
|
||||
fn handle_event(&mut self, e: EventType, extra_cycles: usize) {
|
||||
println!("[holder] got event {:?} extra_cycles {}", e, extra_cycles);
|
||||
self.event_bitmask |= get_event_bit(e);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_scheduler_ordering() {
|
||||
let mut holder = Holder::new();
|
||||
let mut sched = holder.sched.clone();
|
||||
holder
|
||||
.sched
|
||||
.push(EventType::Gpu(GpuEvent::VBlankHDraw), 240);
|
||||
holder
|
||||
.sched
|
||||
.push(EventType::Apu(ApuEvent::Psg1Generate), 60);
|
||||
holder.sched.push(EventType::Apu(ApuEvent::Sample), 512);
|
||||
holder
|
||||
.sched
|
||||
.push(EventType::Apu(ApuEvent::Psg2Generate), 13);
|
||||
holder
|
||||
.sched
|
||||
.push(EventType::Apu(ApuEvent::Psg4Generate), 72);
|
||||
|
||||
assert_eq!(
|
||||
sched.events.pop(),
|
||||
Some(Event::new(EventType::Apu(ApuEvent::Psg2Generate), 13))
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_scheduler() {
|
||||
let mut holder = Holder::new();
|
||||
|
@ -178,17 +295,17 @@ mod test {
|
|||
let mut sched = holder.sched.clone();
|
||||
holder
|
||||
.sched
|
||||
.schedule(EventType::Gpu(GpuEvent::VBlankHDraw), 240);
|
||||
.push(EventType::Gpu(GpuEvent::VBlankHDraw), 240);
|
||||
holder
|
||||
.sched
|
||||
.schedule(EventType::Apu(ApuEvent::Psg1Generate), 60);
|
||||
holder.sched.schedule(EventType::Apu(ApuEvent::Sample), 512);
|
||||
.push(EventType::Apu(ApuEvent::Psg1Generate), 60);
|
||||
holder.sched.push(EventType::Apu(ApuEvent::Sample), 512);
|
||||
holder
|
||||
.sched
|
||||
.schedule(EventType::Apu(ApuEvent::Psg2Generate), 13);
|
||||
.push(EventType::Apu(ApuEvent::Psg2Generate), 13);
|
||||
holder
|
||||
.sched
|
||||
.schedule(EventType::Apu(ApuEvent::Psg4Generate), 72);
|
||||
.push(EventType::Apu(ApuEvent::Psg4Generate), 72);
|
||||
|
||||
println!("all events");
|
||||
for e in sched.events.iter() {
|
||||
|
@ -199,7 +316,10 @@ mod test {
|
|||
macro_rules! run_for {
|
||||
($cycles:expr) => {
|
||||
println!("running the scheduler for {} cycles", $cycles);
|
||||
sched.run($cycles, &mut holder);
|
||||
sched.update($cycles);
|
||||
while let Some((event, cycles_late)) = sched.pop_pending_event() {
|
||||
holder.handle_event(event, cycles_late);
|
||||
}
|
||||
if (!sched.is_empty()) {
|
||||
println!(
|
||||
"cycles for next event: {}",
|
||||
|
@ -211,6 +331,7 @@ mod test {
|
|||
|
||||
run_for!(100);
|
||||
|
||||
println!("{:?}", *sched);
|
||||
assert_eq!(
|
||||
holder.is_event_done(EventType::Apu(ApuEvent::Psg1Generate)),
|
||||
true
|
||||
|
|
|
@ -109,7 +109,7 @@ impl SoundController {
|
|||
pub fn new(mut scheduler: SharedScheduler, audio_device_sample_rate: f32) -> SoundController {
|
||||
let resampler = CosineResampler::new(32768_f32, audio_device_sample_rate);
|
||||
let cycles_per_sample = 512;
|
||||
scheduler.schedule(EventType::Apu(ApuEvent::Sample), cycles_per_sample);
|
||||
scheduler.push(EventType::Apu(ApuEvent::Sample), cycles_per_sample);
|
||||
SoundController {
|
||||
scheduler,
|
||||
cycles_per_sample,
|
||||
|
@ -363,7 +363,7 @@ impl SoundController {
|
|||
});
|
||||
|
||||
self.scheduler
|
||||
.add_apu_event(ApuEvent::Sample, self.cycles_per_sample - extra_cycles);
|
||||
.push_apu_event(ApuEvent::Sample, self.cycles_per_sample - extra_cycles);
|
||||
}
|
||||
|
||||
pub fn on_event(
|
||||
|
|
|
@ -1,7 +1,6 @@
|
|||
use std::fmt;
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use super::arm7tdmi::memory::*;
|
||||
use super::bus::*;
|
||||
use super::cartridge::Cartridge;
|
||||
use super::dma::DmaNotifer;
|
||||
|
@ -45,32 +44,6 @@ pub mod consts {
|
|||
|
||||
use consts::*;
|
||||
|
||||
#[derive(Debug, Copy, Clone)]
|
||||
pub enum MemoryAccessType {
|
||||
NonSeq,
|
||||
Seq,
|
||||
}
|
||||
|
||||
impl fmt::Display for MemoryAccessType {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
write!(
|
||||
f,
|
||||
"{}",
|
||||
match self {
|
||||
MemoryAccessType::NonSeq => "N",
|
||||
MemoryAccessType::Seq => "S",
|
||||
}
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, PartialEq, Copy, Clone)]
|
||||
pub enum MemoryAccessWidth {
|
||||
MemoryAccess8,
|
||||
MemoryAccess16,
|
||||
MemoryAccess32,
|
||||
}
|
||||
|
||||
const CYCLE_LUT_SIZE: usize = 0x10;
|
||||
|
||||
#[derive(Serialize, Deserialize, Clone)]
|
||||
|
@ -261,33 +234,34 @@ impl SysBus {
|
|||
pub fn on_waitcnt_written(&mut self, waitcnt: WaitControl) {
|
||||
self.cycle_luts.update_gamepak_waitstates(waitcnt);
|
||||
}
|
||||
pub fn idle_cycle(&mut self) {
|
||||
self.scheduler.update(1);
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
pub fn get_cycles(
|
||||
&self,
|
||||
addr: Addr,
|
||||
access: MemoryAccessType,
|
||||
width: MemoryAccessWidth,
|
||||
) -> usize {
|
||||
use MemoryAccessType::*;
|
||||
pub fn add_cycles(&mut self, addr: Addr, access: MemoryAccess, width: MemoryAccessWidth) {
|
||||
use MemoryAccess::*;
|
||||
use MemoryAccessWidth::*;
|
||||
let page = (addr >> 24) as usize;
|
||||
|
||||
// TODO optimize out by making the LUTs have 0x100 entries for each possible page ?
|
||||
if page > 0xF {
|
||||
let cycles = if page > 0xF {
|
||||
// open bus
|
||||
return 1;
|
||||
}
|
||||
match width {
|
||||
MemoryAccess8 | MemoryAccess16 => match access {
|
||||
NonSeq => self.cycle_luts.n_cycles16[page],
|
||||
Seq => self.cycle_luts.s_cycles16[page],
|
||||
},
|
||||
MemoryAccess32 => match access {
|
||||
NonSeq => self.cycle_luts.n_cycles32[page],
|
||||
Seq => self.cycle_luts.s_cycles32[page],
|
||||
},
|
||||
}
|
||||
1
|
||||
} else {
|
||||
match width {
|
||||
MemoryAccess8 | MemoryAccess16 => match access {
|
||||
NonSeq => self.cycle_luts.n_cycles16[page],
|
||||
Seq => self.cycle_luts.s_cycles16[page],
|
||||
},
|
||||
MemoryAccess32 => match access {
|
||||
NonSeq => self.cycle_luts.n_cycles32[page],
|
||||
Seq => self.cycle_luts.s_cycles32[page],
|
||||
},
|
||||
}
|
||||
};
|
||||
|
||||
self.scheduler.update(cycles);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -477,6 +451,49 @@ impl DebugRead for SysBus {
|
|||
}
|
||||
}
|
||||
|
||||
impl MemoryInterface for SysBus {
|
||||
#[inline]
|
||||
fn load_8(&mut self, addr: u32, access: MemoryAccess) -> u8 {
|
||||
self.add_cycles(addr, access, MemoryAccessWidth::MemoryAccess8);
|
||||
self.read_8(addr)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn load_16(&mut self, addr: u32, access: MemoryAccess) -> u16 {
|
||||
self.add_cycles(addr, access, MemoryAccessWidth::MemoryAccess16);
|
||||
self.read_16(addr)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn load_32(&mut self, addr: u32, access: MemoryAccess) -> u32 {
|
||||
self.add_cycles(addr, access, MemoryAccessWidth::MemoryAccess32);
|
||||
self.read_32(addr)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn store_8(&mut self, addr: u32, value: u8, access: MemoryAccess) {
|
||||
self.add_cycles(addr, access, MemoryAccessWidth::MemoryAccess8);
|
||||
self.write_8(addr, value);
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn store_16(&mut self, addr: u32, value: u16, access: MemoryAccess) {
|
||||
self.add_cycles(addr, access, MemoryAccessWidth::MemoryAccess8);
|
||||
self.write_16(addr, value);
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn store_32(&mut self, addr: u32, value: u32, access: MemoryAccess) {
|
||||
self.add_cycles(addr, access, MemoryAccessWidth::MemoryAccess8);
|
||||
self.write_32(addr, value);
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn idle_cycle(&mut self) {
|
||||
self.scheduler.update(1)
|
||||
}
|
||||
}
|
||||
|
||||
impl DmaNotifer for SysBus {
|
||||
fn notify(&mut self, timing: u16) {
|
||||
self.io.dmac.notify_from_gpu(timing);
|
||||
|
|
|
@ -1,6 +1,8 @@
|
|||
use super::dma::DmaController;
|
||||
use super::interrupt::{self, Interrupt, InterruptConnect, SharedInterruptFlags};
|
||||
use super::iodev::consts::*;
|
||||
use super::sysbus::SysBus;
|
||||
use super::sched::{EventType, Scheduler, SharedScheduler};
|
||||
use super::sound::SoundController;
|
||||
|
||||
use num::FromPrimitive;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
@ -14,10 +16,12 @@ pub struct Timer {
|
|||
pub data: u16,
|
||||
pub initial_data: u16,
|
||||
|
||||
start_time: usize,
|
||||
is_scheduled: bool,
|
||||
|
||||
irq: Interrupt,
|
||||
interrupt_flags: SharedInterruptFlags,
|
||||
timer_id: usize,
|
||||
cycles: usize,
|
||||
prescalar_shift: usize,
|
||||
}
|
||||
|
||||
|
@ -33,8 +37,9 @@ impl Timer {
|
|||
data: 0,
|
||||
ctl: TimerCtl(0),
|
||||
initial_data: 0,
|
||||
cycles: 0,
|
||||
prescalar_shift: 0,
|
||||
start_time: 0,
|
||||
is_scheduled: false,
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -43,6 +48,21 @@ impl Timer {
|
|||
0x1_0000 - (self.data as u32)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn sync_timer_data(&mut self, timestamp: usize) {
|
||||
let ticks_passed = (timestamp - self.start_time) >> self.prescalar_shift;
|
||||
self.data += ticks_passed as u16;
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn overflow(&mut self) {
|
||||
// reload counter
|
||||
self.data = self.initial_data;
|
||||
if self.ctl.irq_enabled() {
|
||||
interrupt::signal_irq(&self.interrupt_flags, self.irq);
|
||||
}
|
||||
}
|
||||
|
||||
/// increments the timer with an amount of ticks
|
||||
/// returns the number of times it overflowed
|
||||
fn update(&mut self, ticks: usize) -> usize {
|
||||
|
@ -73,6 +93,9 @@ impl Timer {
|
|||
|
||||
#[derive(Serialize, Deserialize, Clone, Debug)]
|
||||
pub struct Timers {
|
||||
#[serde(skip)]
|
||||
#[serde(default = "Scheduler::new_shared")]
|
||||
scheduler: SharedScheduler,
|
||||
timers: [Timer; 4],
|
||||
running_timers: u8,
|
||||
pub trace: bool,
|
||||
|
@ -100,8 +123,9 @@ impl std::ops::IndexMut<usize> for Timers {
|
|||
}
|
||||
|
||||
impl Timers {
|
||||
pub fn new(interrupt_flags: SharedInterruptFlags) -> Timers {
|
||||
pub fn new(interrupt_flags: SharedInterruptFlags, scheduler: SharedScheduler) -> Timers {
|
||||
Timers {
|
||||
scheduler,
|
||||
timers: [
|
||||
Timer::new(0, interrupt_flags.clone()),
|
||||
Timer::new(1, interrupt_flags.clone()),
|
||||
|
@ -113,18 +137,75 @@ impl Timers {
|
|||
}
|
||||
}
|
||||
|
||||
fn add_timer_event(&mut self, id: usize) {
|
||||
let timer = &mut self.timers[id];
|
||||
timer.is_scheduled = true;
|
||||
timer.start_time = self.scheduler.timestamp();
|
||||
let cycles = (timer.ticks_to_overflow() as usize) << timer.prescalar_shift;
|
||||
self.scheduler
|
||||
.push(EventType::TimerOverflow(id), cycles);
|
||||
}
|
||||
|
||||
fn cancel_timer_event(&mut self, id: usize) {
|
||||
self.scheduler.cancel(EventType::TimerOverflow(id));
|
||||
self[id].is_scheduled = false;
|
||||
}
|
||||
|
||||
fn handle_timer_overflow(
|
||||
&mut self,
|
||||
id: usize,
|
||||
apu: &mut SoundController,
|
||||
dmac: &mut DmaController,
|
||||
) {
|
||||
self[id].overflow();
|
||||
if id != 3 {
|
||||
let next_timer_id = id + 1;
|
||||
let next_timer = &mut self.timers[next_timer_id];
|
||||
if next_timer.ctl.cascade() {
|
||||
if next_timer.update(1) > 0 {
|
||||
drop(next_timer);
|
||||
self.handle_timer_overflow(next_timer_id, apu, dmac);
|
||||
}
|
||||
}
|
||||
}
|
||||
if id == 0 || id == 1 {
|
||||
apu.handle_timer_overflow(dmac, id, 1);
|
||||
}
|
||||
}
|
||||
|
||||
pub fn handle_overflow_event(
|
||||
&mut self,
|
||||
id: usize,
|
||||
extra_cycles: usize,
|
||||
apu: &mut SoundController,
|
||||
dmac: &mut DmaController,
|
||||
) {
|
||||
self.handle_timer_overflow(id, apu, dmac);
|
||||
|
||||
// TODO: re-use add_timer_event function
|
||||
let timer = &mut self.timers[id];
|
||||
timer.is_scheduled = true;
|
||||
timer.start_time = self.scheduler.timestamp() - extra_cycles;
|
||||
let cycles = (timer.ticks_to_overflow() as usize) << timer.prescalar_shift;
|
||||
self.scheduler
|
||||
.push(EventType::TimerOverflow(id), cycles - extra_cycles);
|
||||
}
|
||||
|
||||
pub fn write_timer_ctl(&mut self, id: usize, value: u16) {
|
||||
let timer = &mut self.timers[id];
|
||||
let new_ctl = TimerCtl(value);
|
||||
let old_enabled = self[id].ctl.enabled();
|
||||
let old_enabled = timer.ctl.enabled();
|
||||
let new_enabled = new_ctl.enabled();
|
||||
let cascade = new_ctl.cascade();
|
||||
self[id].cycles = 0;
|
||||
self[id].prescalar_shift = SHIFT_LUT[new_ctl.prescalar() as usize];
|
||||
self[id].ctl = new_ctl;
|
||||
timer.prescalar_shift = SHIFT_LUT[new_ctl.prescalar() as usize];
|
||||
timer.ctl = new_ctl;
|
||||
if new_enabled && !cascade {
|
||||
self.running_timers |= 1 << id;
|
||||
self.cancel_timer_event(id);
|
||||
self.add_timer_event(id);
|
||||
} else {
|
||||
self.running_timers &= !(1 << id);
|
||||
self.cancel_timer_event(id);
|
||||
}
|
||||
if old_enabled != new_enabled {
|
||||
trace!(
|
||||
|
@ -135,16 +216,28 @@ impl Timers {
|
|||
}
|
||||
}
|
||||
|
||||
pub fn handle_read(&self, io_addr: u32) -> u16 {
|
||||
#[inline]
|
||||
fn read_timer_data(&mut self, id: usize) -> u16 {
|
||||
let timer = &mut self.timers[id];
|
||||
if timer.is_scheduled {
|
||||
// this timer is controlled by the scheduler so we need to manually calculate
|
||||
// the current value of the counter
|
||||
timer.sync_timer_data(self.scheduler.timestamp());
|
||||
}
|
||||
|
||||
timer.data
|
||||
}
|
||||
|
||||
pub fn handle_read(&mut self, io_addr: u32) -> u16 {
|
||||
match io_addr {
|
||||
REG_TM0CNT_L => self.timers[0].data,
|
||||
REG_TM0CNT_H => self.timers[0].ctl.0,
|
||||
REG_TM1CNT_L => self.timers[1].data,
|
||||
REG_TM1CNT_H => self.timers[1].ctl.0,
|
||||
REG_TM2CNT_L => self.timers[2].data,
|
||||
REG_TM2CNT_H => self.timers[2].ctl.0,
|
||||
REG_TM3CNT_L => self.timers[3].data,
|
||||
REG_TM3CNT_H => self.timers[3].ctl.0,
|
||||
REG_TM0CNT_L => self.read_timer_data(0),
|
||||
REG_TM1CNT_L => self.read_timer_data(1),
|
||||
REG_TM2CNT_L => self.read_timer_data(2),
|
||||
REG_TM3CNT_L => self.read_timer_data(3),
|
||||
_ => unreachable!(),
|
||||
}
|
||||
}
|
||||
|
@ -177,37 +270,6 @@ impl Timers {
|
|||
_ => unreachable!(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn update(&mut self, cycles: usize, sb: &mut SysBus) {
|
||||
for id in 0..4 {
|
||||
if self.running_timers & (1 << id) == 0 {
|
||||
continue;
|
||||
}
|
||||
|
||||
if !self.timers[id].ctl.cascade() {
|
||||
let timer = &mut self.timers[id];
|
||||
|
||||
let cycles = timer.cycles + cycles;
|
||||
let inc = cycles >> timer.prescalar_shift;
|
||||
let num_overflows = timer.update(inc);
|
||||
timer.cycles = cycles & ((1 << timer.prescalar_shift) - 1);
|
||||
|
||||
if num_overflows > 0 {
|
||||
if id != 3 {
|
||||
let next_timer = &mut self.timers[id + 1];
|
||||
if next_timer.ctl.cascade() {
|
||||
next_timer.update(num_overflows);
|
||||
}
|
||||
}
|
||||
if id == 0 || id == 1 {
|
||||
let io = unsafe { sb.io.inner_unsafe() };
|
||||
io.sound
|
||||
.handle_timer_overflow(&mut io.dmac, id, num_overflows);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
bitfield! {
|
||||
|
|
Reference in a new issue