Move cycle counting to CPU Core

This isn't accurate, I'm probably missing something but at least it'll
make the instruction implementation more clean for now..


Former-commit-id: de24b15e1a51e1998207e5ea96fc8543f2553a26
This commit is contained in:
Michel Heily 2019-07-05 13:07:07 +03:00
parent f8ebe26e5e
commit be9499c76d
7 changed files with 152 additions and 244 deletions

View file

@ -6,30 +6,22 @@ use crate::arm7tdmi::exception::Exception;
use crate::arm7tdmi::psr::RegPSR;
use crate::arm7tdmi::{Addr, CpuError, CpuResult, CpuState, DecodedInstruction, REG_PC};
use crate::sysbus::SysBus;
use super::{
ArmFormat, ArmInstruction, ArmOpCode, ArmRegisterShift, ArmShiftType, ArmShiftedValue,
};
impl Core {
pub fn exec_arm(&mut self, sysbus: &mut Bus, insn: ArmInstruction) -> CpuExecResult {
pub fn exec_arm(&mut self, bus: &mut Bus, insn: ArmInstruction) -> CpuExecResult {
if !self.check_arm_cond(insn.cond) {
self.add_cycles(
insn.pc + (self.word_size() as u32),
sysbus,
Seq + MemoryAccess32,
);
self.add_cycle();
return Ok(CpuPipelineAction::IncPC);
}
match insn.fmt {
ArmFormat::BX => self.exec_bx(sysbus, insn),
ArmFormat::B_BL => self.exec_b_bl(sysbus, insn),
ArmFormat::DP => self.exec_data_processing(sysbus, insn),
ArmFormat::SWI => self.exec_swi(sysbus, insn),
ArmFormat::LDR_STR => self.exec_ldr_str(sysbus, insn),
ArmFormat::MSR_REG => self.exec_msr_reg(sysbus, insn),
ArmFormat::BX => self.exec_bx(bus, insn),
ArmFormat::B_BL => self.exec_b_bl(bus, insn),
ArmFormat::DP => self.exec_data_processing(bus, insn),
ArmFormat::SWI => self.exec_swi(bus, insn),
ArmFormat::LDR_STR => self.exec_ldr_str(bus, insn),
ArmFormat::MSR_REG => self.exec_msr_reg(bus, insn),
_ => Err(CpuError::UnimplementedCpuInstruction(
insn.pc,
insn.raw,
@ -39,33 +31,18 @@ impl Core {
}
/// Cycles 2S+1N
fn exec_b_bl(
&mut self,
sysbus: &mut Bus,
insn: ArmInstruction,
) -> CpuResult<CpuPipelineAction> {
fn exec_b_bl(&mut self, bus: &mut Bus, insn: ArmInstruction) -> CpuResult<CpuPipelineAction> {
if insn.link_flag() {
self.set_reg(14, (insn.pc + (self.word_size() as u32)) & !0b1);
}
// +1N
self.add_cycles(self.pc, sysbus, NonSeq + MemoryAccess32);
self.pc = (self.pc as i32).wrapping_add(insn.branch_offset()) as u32 & !1;
// +2S
self.add_cycles(self.pc, sysbus, Seq + MemoryAccess32);
self.add_cycles(
self.pc + (self.word_size() as u32),
sysbus,
Seq + MemoryAccess32,
);
Ok(CpuPipelineAction::Flush)
}
/// Cycles 2S+1N
fn exec_bx(&mut self, sysbus: &mut Bus, insn: ArmInstruction) -> CpuResult<CpuPipelineAction> {
fn exec_bx(&mut self, bus: &mut Bus, insn: ArmInstruction) -> CpuResult<CpuPipelineAction> {
let rn = self.get_reg(insn.rn());
if rn.bit(0) {
self.cpsr.set_state(CpuState::THUMB);
@ -73,34 +50,19 @@ impl Core {
self.cpsr.set_state(CpuState::ARM);
}
// +1N
self.add_cycles(self.pc, sysbus, NonSeq + MemoryAccess32);
self.pc = rn & !1;
// +2S
self.add_cycles(self.pc, sysbus, Seq + MemoryAccess32);
self.add_cycles(
self.pc + (self.word_size() as u32),
sysbus,
Seq + MemoryAccess32,
);
Ok(CpuPipelineAction::Flush)
}
fn exec_swi(
&mut self,
_sysbus: &mut Bus,
_insn: ArmInstruction,
) -> CpuResult<CpuPipelineAction> {
fn exec_swi(&mut self, _bus: &mut Bus, _insn: ArmInstruction) -> CpuResult<CpuPipelineAction> {
self.exception(Exception::SoftwareInterrupt);
Ok(CpuPipelineAction::Flush)
}
fn exec_msr_reg(
&mut self,
sysbus: &mut Bus,
bus: &mut Bus,
insn: ArmInstruction,
) -> CpuResult<CpuPipelineAction> {
let new_psr = RegPSR::new(self.get_reg(insn.rm()));
@ -117,7 +79,6 @@ impl Core {
}
self.cpsr = new_psr;
}
self.add_cycles(insn.pc, sysbus, Seq + MemoryAccess32);
Ok(CpuPipelineAction::IncPC)
}
@ -204,7 +165,7 @@ impl Core {
/// Add x=1I cycles if Op2 shifted-by-register. Add y=1S+1N cycles if Rd=R15.
fn exec_data_processing(
&mut self,
sysbus: &mut Bus,
bus: &mut Bus,
insn: ArmInstruction,
) -> CpuResult<CpuPipelineAction> {
// TODO handle carry flag
@ -219,10 +180,6 @@ impl Core {
let op2 = insn.operand2()?;
let rd = insn.rd();
if rd == REG_PC {
// +1N
self.add_cycles(self.pc, sysbus, NonSeq + MemoryAccess32);
}
let op2: i32 = match op2 {
ArmShiftedValue::RotatedImmediate(immediate, rotate) => {
@ -246,17 +203,9 @@ impl Core {
self.set_reg(rd, result as u32);
if rd == REG_PC {
pipeline_action = CpuPipelineAction::Flush;
// +1S
self.add_cycles(self.pc, sysbus, Seq + MemoryAccess32);
}
}
// +1S
self.add_cycles(
self.pc + (self.word_size() as u32),
sysbus,
Seq + MemoryAccess32,
);
Ok(pipeline_action)
}
@ -289,7 +238,7 @@ impl Core {
/// For LDR, add y=1S+1N if Rd=R15.
fn exec_ldr_str(
&mut self,
sysbus: &mut Bus,
bus: &mut Bus,
insn: ArmInstruction,
) -> CpuResult<CpuPipelineAction> {
if insn.write_back_flag() && insn.rd() == insn.rn() {
@ -316,19 +265,11 @@ impl Core {
if insn.load_flag() {
let data = if insn.transfer_size() == 1 {
// +1N
self.add_cycles(addr, sysbus, NonSeq + MemoryAccess8);
sysbus.read_8(addr) as u32
self.load_8(addr, bus) as u32
} else {
// +1N
self.add_cycles(addr, sysbus, NonSeq + MemoryAccess32);
sysbus.read_32(addr)
self.load_32(addr, bus)
};
// +1S
self.add_cycles(
self.pc + (self.word_size() as u32),
sysbus,
Seq + MemoryAccess32,
);
self.set_reg(insn.rd(), data);
@ -336,27 +277,15 @@ impl Core {
self.add_cycle();
// +y
if insn.rd() == REG_PC {
// +1S
self.add_cycles(self.pc, sysbus, Seq + MemoryAccess32);
// +1N
self.add_cycles(
self.pc + (self.word_size() as u32),
sysbus,
NonSeq + MemoryAccess32,
);
pipeline_action = CpuPipelineAction::Flush;
}
} else {
self.add_cycles(addr, sysbus, NonSeq + MemoryAccess32);
let value = self.get_reg(insn.rd());
if insn.transfer_size() == 1 {
// +1N
self.add_cycles(dest, sysbus, NonSeq + MemoryAccess8);
sysbus.write_8(addr, value as u8).expect("bus error");
self.store_8(addr, value as u8, bus);
} else {
// +1N
self.add_cycles(dest, sysbus, NonSeq + MemoryAccess32);
sysbus.write_32(addr, value).expect("bus error");
self.store_32(addr, value, bus);
};
}

View file

@ -538,17 +538,17 @@ mod tests {
core.set_reg(REG_SP, 0);
let bytes = vec![
/* 0: */ 0xaa, 0xbb, 0xcc, 0xdd,
/* 4: */ 0xaa, 0xbb, 0xcc, 0xdd,
/* 8: */ 0xaa, 0xbb, 0xcc, 0xdd,
/* c: */ 0xaa, 0xbb, 0xcc, 0xdd,
/* 0: */ 0xaa, 0xbb, 0xcc, 0xdd, /* 4: */ 0xaa, 0xbb, 0xcc, 0xdd,
/* 8: */ 0xaa, 0xbb, 0xcc, 0xdd, /* c: */ 0xaa, 0xbb, 0xcc, 0xdd,
/* 10: */ 0xaa, 0xbb, 0xcc, 0xdd,
];
let mut mem = BoxedMemory::new(bytes.into_boxed_slice());
assert_ne!(mem.read_32(core.get_reg(REG_SP) + 0x10), 0x12345678);
assert_eq!(core.exec_arm(&mut mem, decoded), Ok(CpuPipelineAction::IncPC));
assert_eq!(
core.exec_arm(&mut mem, decoded),
Ok(CpuPipelineAction::IncPC)
);
assert_eq!(mem.read_32(core.get_reg(REG_SP) + 0x10), 0x12345678);
}
}

View file

@ -1,14 +1,31 @@
use super::Addr;
use std::fmt;
use std::io;
use std::ops::Add;
use byteorder::{LittleEndian, ReadBytesExt, WriteBytesExt};
use super::Addr;
#[derive(Debug)]
pub enum MemoryAccessType {
NonSeq,
Seq,
}
impl fmt::Display for MemoryAccessType {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(
f,
"{}",
match self {
MemoryAccessType::NonSeq => "N",
MemoryAccessType::Seq => "S",
}
)
}
}
#[derive(Debug)]
pub enum MemoryAccessWidth {
MemoryAccess8,
MemoryAccess16,
@ -23,8 +40,15 @@ impl Add<MemoryAccessWidth> for MemoryAccessType {
}
}
#[derive(Debug)]
pub struct MemoryAccess(pub MemoryAccessType, pub MemoryAccessWidth);
impl fmt::Display for MemoryAccess {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}-Cycle ({:?})", self.0, self.1)
}
}
pub trait Bus {
fn read_32(&self, addr: Addr) -> u32 {
self.get_bytes(addr).read_u32::<LittleEndian>().unwrap()

View file

@ -1,16 +1,12 @@
use std::convert::TryFrom;
use std::fmt;
use std::ops::Add;
use ansi_term::{Colour, Style};
use num_traits::Num;
use crate::sysbus::SysBus;
pub use super::exception::Exception;
use super::{
arm::*,
bus::{Bus, MemoryAccess, MemoryAccessType::*, MemoryAccessWidth::*},
bus::{Bus, MemoryAccess, MemoryAccessType, MemoryAccessType::*, MemoryAccessWidth::*},
psr::RegPSR,
reg_string,
thumb::ThumbInstruction,
@ -84,6 +80,8 @@ pub struct Core {
// store the gpr before executing an instruction to show diff in the Display impl
gpr_previous: [u32; 15],
memreq: Addr,
pub verbose: bool,
}
@ -98,6 +96,7 @@ pub type CpuExecResult = CpuResult<CpuPipelineAction>;
impl Core {
pub fn new() -> Core {
Core {
memreq: 0xffff_0000, // set memreq to an invalid addr so the first load cycle will be non-sequential
..Default::default()
}
}
@ -157,8 +156,10 @@ impl Core {
}
self.map_banked_registers(curr_mode, new_mode);
let next_index = new_mode.bank_index();
self.gpr_banked_r14[next_index] = self.pc.wrapping_sub(self.word_size() as u32).wrapping_add(4);
self.gpr_banked_r14[next_index] = self
.pc
.wrapping_sub(self.word_size() as u32)
.wrapping_add(4);
}
/// Resets the cpu
@ -182,11 +183,62 @@ impl Core {
}
pub fn add_cycle(&mut self) {
println!("<cycle I-Cyclel> total: {}", self.cycles);
self.cycles += 1;
}
pub fn add_cycles(&mut self, addr: Addr, sysbus: &Bus, access: MemoryAccess) {
self.cycles += sysbus.get_cycles(addr, access);
pub fn add_cycles(&mut self, addr: Addr, bus: &Bus, access: MemoryAccess) {
println!("<cycle {:#x} {}> total: {}", addr, access, self.cycles);
self.cycles += bus.get_cycles(addr, access);
}
pub fn cycle_type(&self, addr: Addr) -> MemoryAccessType {
if addr == self.memreq || addr == self.memreq + (self.word_size() as Addr) {
Seq
} else {
NonSeq
}
}
pub fn load_32(&mut self, addr: Addr, bus: &mut Bus) -> u32 {
self.add_cycles(addr, bus, self.cycle_type(addr) + MemoryAccess32);
self.memreq = addr;
bus.read_32(addr)
}
pub fn load_16(&mut self, addr: Addr, bus: &mut Bus) -> u16 {
let cycle_type = self.cycle_type(addr);
self.add_cycles(addr, bus, cycle_type + MemoryAccess16);
self.memreq = addr;
bus.read_16(addr)
}
pub fn load_8(&mut self, addr: Addr, bus: &mut Bus) -> u8 {
let cycle_type = self.cycle_type(addr);
self.add_cycles(addr, bus, cycle_type + MemoryAccess8);
self.memreq = addr;
bus.read_8(addr)
}
pub fn store_32(&mut self, addr: Addr, value: u32, bus: &mut Bus) {
let cycle_type = self.cycle_type(addr);
self.add_cycles(addr, bus, cycle_type + MemoryAccess32);
self.memreq = addr;
bus.write_32(addr, value).expect("store_32 error");
}
pub fn store_16(&mut self, addr: Addr, value: u16, bus: &mut Bus) {
let cycle_type = self.cycle_type(addr);
self.add_cycles(addr, bus, cycle_type + MemoryAccess16);
self.memreq = addr;
bus.write_16(addr, value).expect("store_16 error");
}
pub fn store_8(&mut self, addr: Addr, value: u8, bus: &mut Bus) {
let cycle_type = self.cycle_type(addr);
self.add_cycles(addr, bus, cycle_type + MemoryAccess8);
self.memreq = addr;
bus.write_8(addr, value).expect("store_16 error");
}
pub fn check_arm_cond(&self, cond: ArmCond) -> bool {
@ -212,10 +264,11 @@ impl Core {
fn step_thumb(
&mut self,
sysbus: &mut Bus,
bus: &mut Bus,
) -> CpuResult<(Option<DecodedInstruction>, CpuPipelineAction)> {
// fetch
let new_fetched = sysbus.read_16(self.pc);
// let new_fetched = bus.read_16(self.pc);
let new_fetched = self.load_16(self.pc, bus);
// decode
let new_decoded = match self.pipeline_thumb.fetched {
@ -230,7 +283,7 @@ impl Core {
let result = match self.pipeline_thumb.decoded {
Some(d) => {
self.gpr_previous = self.get_registers();
let action = self.exec_thumb(sysbus, d)?;
let action = self.exec_thumb(bus, d)?;
Ok((Some(DecodedInstruction::Thumb(d)), action))
}
None => Ok((None, CpuPipelineAction::IncPC)),
@ -246,10 +299,10 @@ impl Core {
fn step_arm(
&mut self,
sysbus: &mut Bus,
bus: &mut Bus,
) -> CpuResult<(Option<DecodedInstruction>, CpuPipelineAction)> {
// fetch
let new_fetched = sysbus.read_32(self.pc);
// let new_fetched = bus.read_32(self.pc);
let new_fetched = self.load_32(self.pc, bus);
// decode
let new_decoded = match self.pipeline_arm.fetched {
@ -264,7 +317,7 @@ impl Core {
let result = match self.pipeline_arm.decoded {
Some(d) => {
self.gpr_previous = self.get_registers();
let action = self.exec_arm(sysbus, d)?;
let action = self.exec_arm(bus, d)?;
Ok((Some(DecodedInstruction::Arm(d)), action))
}
None => Ok((None, CpuPipelineAction::IncPC)),
@ -280,10 +333,10 @@ impl Core {
/// Perform a pipeline step
/// If an instruction was executed in this step, return it.
pub fn step(&mut self, sysbus: &mut Bus) -> CpuResult<Option<DecodedInstruction>> {
pub fn step(&mut self, bus: &mut Bus) -> CpuResult<Option<DecodedInstruction>> {
let (executed_instruction, pipeline_action) = match self.cpsr.state() {
CpuState::ARM => self.step_arm(sysbus),
CpuState::THUMB => self.step_thumb(sysbus),
CpuState::ARM => self.step_arm(bus),
CpuState::THUMB => self.step_thumb(bus),
}?;
match pipeline_action {
@ -328,9 +381,9 @@ impl Core {
/// A step that returns only once an instruction was executed.
/// Returns the address of PC before executing an instruction,
/// and the address of the next instruction to be executed;
pub fn step_debugger(&mut self, sysbus: &mut Bus) -> CpuResult<DecodedInstruction> {
pub fn step_debugger(&mut self, bus: &mut Bus) -> CpuResult<DecodedInstruction> {
loop {
if let Some(i) = self.step(sysbus)? {
if let Some(i) = self.step(bus)? {
return Ok(i);
}
}

View file

@ -92,11 +92,7 @@ impl ThumbInstruction {
}
fn fmt_thumb_add_sp(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(
f,
"add\tsp, #{imm:x}",
imm = self.sword7()
)
write!(f, "add\tsp, #{imm:x}", imm = self.sword7())
}
fn fmt_thumb_push_pop(&self, f: &mut fmt::Formatter) -> fmt::Result {
@ -110,7 +106,7 @@ impl ThumbInstruction {
for reg in register_list {
has_reg = true;
write!(f, ", {}", reg_string(reg))?;
};
}
if self.flag(ThumbInstruction::FLAG_R) {
let r = if self.is_load() { "pc" } else { "lr" };
if has_reg {

View file

@ -1,20 +1,20 @@
use crate::arm7tdmi::arm::*;
use crate::arm7tdmi::bus::{Bus, MemoryAccessType::*, MemoryAccessWidth::*};
use crate::arm7tdmi::cpu::{Core, CpuExecResult, CpuPipelineAction};
use crate::arm7tdmi::{Addr, REG_PC, REG_LR, REG_SP, CpuState, reg_string};
use crate::arm7tdmi::{reg_string, Addr, CpuState, REG_LR, REG_PC, REG_SP};
use super::*;
fn push(cpu: &mut Core, bus: &mut Bus, r: usize) {
cpu.gpr[REG_SP] -= 4;
let stack_addr = cpu.gpr[REG_SP];
bus.write_32(stack_addr, cpu.get_reg(r)).expect("bus error");
}
cpu.gpr[REG_SP] -= 4;
let stack_addr = cpu.gpr[REG_SP];
cpu.store_32(stack_addr, cpu.get_reg(r), bus)
}
fn pop(cpu: &mut Core, bus: &mut Bus, r: usize) {
let stack_addr = cpu.gpr[REG_SP];
let val = bus.read_32(stack_addr);
cpu.set_reg(r, val);
cpu.gpr[REG_SP] = stack_addr + 4;
}
let stack_addr = cpu.gpr[REG_SP];
let val = cpu.load_32(stack_addr, bus);
cpu.set_reg(r, val);
cpu.gpr[REG_SP] = stack_addr + 4;
}
impl Core {
fn exec_thumb_add_sub(&mut self, bus: &mut Bus, insn: ThumbInstruction) -> CpuExecResult {
@ -34,12 +34,7 @@ impl Core {
if let Some(result) = result {
self.set_reg(insn.rd(), result as u32);
}
// +1S
self.add_cycles(
insn.pc + (self.word_size() as u32),
bus,
Seq + MemoryAccess32,
);
Ok(CpuPipelineAction::IncPC)
}
@ -55,21 +50,12 @@ impl Core {
if let Some(result) = result {
self.set_reg(insn.rd(), result as u32);
}
// +1S
self.add_cycles(
insn.pc + (self.word_size() as u32),
bus,
Seq + MemoryAccess16,
);
Ok(CpuPipelineAction::IncPC)
}
/// Cycles 2S+1N
fn exec_thumb_bx(
&mut self,
bus: &mut Bus,
insn: ThumbInstruction,
) -> CpuExecResult {
fn exec_thumb_bx(&mut self, bus: &mut Bus, insn: ThumbInstruction) -> CpuExecResult {
let src_reg = if insn.flag(ThumbInstruction::FLAG_H2) {
insn.rs() + 8
} else {
@ -83,19 +69,8 @@ impl Core {
self.cpsr.set_state(CpuState::ARM);
}
// +1N
self.add_cycles(self.pc, bus, NonSeq + MemoryAccess32);
self.pc = addr & !1;
// +2S
self.add_cycles(self.pc, bus, Seq + MemoryAccess32);
self.add_cycles(
self.pc + (self.word_size() as u32),
bus,
Seq + MemoryAccess32,
);
Ok(CpuPipelineAction::Flush)
}
@ -108,21 +83,14 @@ impl Core {
self.exec_thumb_bx(bus, insn)
} else {
unimplemented!("Sorry, I'm tired");
Ok(CpuPipelineAction::IncPC)
// Ok(CpuPipelineAction::IncPC)
}
}
fn exec_thumb_ldr_pc(&mut self, bus: &mut Bus, insn: ThumbInstruction) -> CpuExecResult {
let addr = (insn.pc & !0b10) + 4 + (insn.word8() as Addr);
let data = bus.read_32(addr);
// +1N
self.add_cycles(addr, bus, NonSeq + MemoryAccess32);
// +1S
self.add_cycles(
insn.pc + (self.word_size() as u32),
bus,
Seq + MemoryAccess16,
);
let data = self.load_32(addr, bus);
self.set_reg(insn.rd(), data);
// +1I
self.add_cycle();
@ -140,37 +108,21 @@ impl Core {
.wrapping_add(self.get_reg(insn.ro()));
if insn.is_load() {
let data = if insn.is_transfering_bytes() {
// +1N
self.add_cycles(addr, bus, NonSeq + MemoryAccess8);
bus.read_8(addr) as u32
self.load_8(addr, bus) as u32
} else {
// +1N
self.add_cycles(addr, bus, NonSeq + MemoryAccess32);
bus.read_32(addr)
self.load_32(addr, bus)
};
// +1S
self.add_cycles(
insn.pc + (self.word_size() as u32),
bus,
Seq + MemoryAccess32,
);
self.set_reg(insn.rd(), data);
// +1I
self.add_cycle();
} else {
self.add_cycles(addr, bus, NonSeq + MemoryAccess32);
let value = self.get_reg(insn.rd());
if insn.is_transfering_bytes() {
// +1N
self.add_cycles(addr, bus, NonSeq + MemoryAccess8);
bus.write_8(addr, value as u8).expect("bus error");
self.store_8(addr, value as u8, bus);
} else {
// +1N
self.add_cycles(addr, bus, NonSeq + MemoryAccess32);
bus.write_32(addr, value).expect("bus error");
self.store_32(addr, value, bus);
};
}
@ -186,27 +138,16 @@ impl Core {
if let Some(result) = result {
self.gpr[REG_SP] = result as u32;
}
// +1S
self.add_cycles(
insn.pc + (self.word_size() as u32),
bus,
Seq + MemoryAccess32,
);
Ok(CpuPipelineAction::IncPC)
}
fn exec_thumb_push_pop(
&mut self,
bus: &mut Bus,
insn: ThumbInstruction,
) -> CpuExecResult {
fn exec_thumb_push_pop(&mut self, bus: &mut Bus, insn: ThumbInstruction) -> CpuExecResult {
// (From GBATEK) Execution Time: nS+1N+1I (POP), (n+1)S+2N+1I (POP PC), or (n-1)S+2N (PUSH).
let is_pop = insn.is_load();
let mut pipeline_action = CpuPipelineAction::IncPC;
self.add_cycles(insn.pc, bus, NonSeq + MemoryAccess16);
let pc_lr_flag = insn.flag(ThumbInstruction::FLAG_R);
let rlist = insn.register_list();
if is_pop {
@ -216,62 +157,30 @@ impl Core {
if pc_lr_flag {
pop(self, bus, REG_PC);
pipeline_action = CpuPipelineAction::Flush;
self.add_cycles(
self.pc,
bus,
Seq + MemoryAccess32
);
self.add_cycles(
self.pc + (self.word_size() as u32),
bus,
NonSeq + MemoryAccess16
);
}
self.add_cycle();
} else {
self.add_cycles(
self.gpr[REG_SP],
bus,
NonSeq + MemoryAccess32
);
if pc_lr_flag {
push(self, bus, REG_LR);
}
for r in rlist.into_iter().rev() {
push(self, bus ,r);
push(self, bus, r);
}
self.add_cycles(self.gpr[REG_SP], bus, NonSeq + MemoryAccess32);
}
Ok(CpuPipelineAction::IncPC)
Ok(pipeline_action)
}
fn exec_thumb_branch_with_cond(
&mut self,
bus: &mut Bus,
_bus: &mut Bus,
insn: ThumbInstruction,
) -> CpuExecResult {
if !self.check_arm_cond(insn.cond()) {
self.add_cycles(
insn.pc + (self.word_size() as u32),
bus,
Seq + MemoryAccess16,
);
Ok(CpuPipelineAction::IncPC)
} else {
// +1N
self.add_cycles(insn.pc, bus, NonSeq + MemoryAccess32);
let offset = insn.offset8() as i8 as i32;
self.pc = (insn.pc as i32).wrapping_add(offset) as u32;
// +2S
self.add_cycles(self.pc, bus, Seq + MemoryAccess32);
self.add_cycles(
self.pc + (self.word_size() as u32),
bus,
Seq + MemoryAccess32,
);
Ok(CpuPipelineAction::Flush)
}
}

View file

@ -325,12 +325,9 @@ mod tests {
let insn = ThumbInstruction::decode(0x4801, 0x6).unwrap();
let bytes = vec![
/* 0: */ 0x00, 0x00,
/* 2: */ 0x00, 0x00,
/* 4: */ 0x00, 0x00,
/* 6: <pc> */ 0x00, 0x00,
/* 8: */ 0x00, 0x00, 0x00, 0x00,
/* c: */ 0x78, 0x56, 0x34, 0x12,
/* 0: */ 0x00, 0x00, /* 2: */ 0x00, 0x00, /* 4: */ 0x00, 0x00,
/* 6: <pc> */ 0x00, 0x00, /* 8: */ 0x00, 0x00, 0x00, 0x00,
/* c: */ 0x78, 0x56, 0x34, 0x12,
];
let mut mem = BoxedMemory::new(bytes.into_boxed_slice());
let mut core = Core::new();