chore: clean unused directory

Former-commit-id: 46120f40cc46143b3c7cf85130fdcb28e3af837c [formerly c344087beacd01a48175dbea614038a15e801350]
Former-commit-id: b1b5f6b362ec39f640077f0257c60fca374d9dd3
This commit is contained in:
Muhammad Nauman Raza 2024-03-23 20:29:49 +00:00
parent e72e7636a3
commit 68e7e714d0
19 changed files with 0 additions and 5583 deletions

View file

@ -1,27 +0,0 @@
[package]
name = "arm7tdmi"
version = "0.1.0"
edition = "2021"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
rustboyadvance-utils = {"path" = "./utils" }
log = "0.4.8"
bit = "^0.1"
cfg-if = "1.0.0"
serde = { version = "1.0.104", features = ["derive", "rc"] }
ansi_term = "0.12.1"
colored = "1.9"
byteorder = "1"
num = "0.2.1"
num-traits = "0.2"
enum-primitive-derive = "^0.1"
gdbstub = "0.6.3"
gdbstub_arch = "0.2.4"
[dev-dependencies]
simple_logger = "2.3.0" # For the examples
[build-dependencies]
bit = "^0.1"

View file

@ -1,435 +0,0 @@
/// This build script creates ARM_LUT and THUMB_LUT opcode lookup table used cpu.rs
use std::env;
use std::fs;
use std::io::Write;
use std::path::Path;
extern crate bit;
use bit::BitIndex;
// copied and slightly adjusted from arm7tdmi/thumb/mod.rs
fn thumb_decode(i: u16) -> (&'static str, String) {
let offset5 = i.bit_range(6..11) as u8;
let load = i.bit(11);
if i & 0xf800 == 0x1800 {
(
"AddSub",
format!(
"exec_thumb_add_sub::<{SUB}, {IMM}, {RN}>",
SUB = i.bit(9),
IMM = i.bit(10),
RN = i.bit_range(6..9) as usize
),
)
} else if i & 0xe000 == 0x0000 {
(
"MoveShiftedReg",
format!(
"exec_thumb_move_shifted_reg::<{BS_OP}, {IMM}>",
BS_OP = i.bit_range(11..13) as u8,
IMM = i.bit_range(6..11) as u8
),
)
} else if i & 0xe000 == 0x2000 {
(
"DataProcessImm",
format!(
"exec_thumb_data_process_imm::<{OP}, {RD}>",
OP = i.bit_range(11..13) as u8,
RD = i.bit_range(8..11)
),
)
} else if i & 0xfc00 == 0x4000 {
(
"AluOps",
format!("exec_thumb_alu_ops::<{OP}>", OP = i.bit_range(6..10)),
)
} else if i & 0xfc00 == 0x4400 {
(
"HiRegOpOrBranchExchange",
format!(
"exec_thumb_hi_reg_op_or_bx::<{OP}, {FLAG_H1}, {FLAG_H2}>",
OP = i.bit_range(8..10) as u8,
FLAG_H1 = i.bit(7),
FLAG_H2 = i.bit(6),
),
)
} else if i & 0xf800 == 0x4800 {
(
"LdrPc",
format!(
"exec_thumb_ldr_pc::<{RD}>",
RD = i.bit_range(8..11) as usize
),
)
} else if i & 0xf200 == 0x5000 {
(
"LdrStrRegOffset",
format!(
"exec_thumb_ldr_str_reg_offset::<{LOAD}, {RO}, {BYTE}>",
LOAD = load,
RO = i.bit_range(6..9) as usize,
BYTE = i.bit(10),
),
)
} else if i & 0xf200 == 0x5200 {
(
"LdrStrSHB",
format!(
"exec_thumb_ldr_str_shb::<{RO}, {SIGN_EXTEND}, {HALFWORD}>",
RO = i.bit_range(6..9) as usize,
SIGN_EXTEND = i.bit(10),
HALFWORD = i.bit(11),
),
)
} else if i & 0xe000 == 0x6000 {
let is_transferring_bytes = i.bit(12);
let offset = if is_transferring_bytes {
offset5
} else {
(offset5 << 3) >> 1
};
(
"LdrStrImmOffset",
format!(
"exec_thumb_ldr_str_imm_offset::<{LOAD}, {BYTE}, {OFFSET}>",
LOAD = load,
BYTE = is_transferring_bytes,
OFFSET = offset
),
)
} else if i & 0xf000 == 0x8000 {
(
"LdrStrHalfWord",
format!(
"exec_thumb_ldr_str_halfword::<{LOAD}, {OFFSET}>",
LOAD = load,
OFFSET = (offset5 << 1) as i32
),
)
} else if i & 0xf000 == 0x9000 {
(
"LdrStrSp",
format!(
"exec_thumb_ldr_str_sp::<{LOAD}, {RD}>",
LOAD = load,
RD = i.bit_range(8..11)
),
)
} else if i & 0xf000 == 0xa000 {
(
"LoadAddress",
format!(
"exec_thumb_load_address::<{SP}, {RD}>",
SP = i.bit(11),
RD = i.bit_range(8..11)
),
)
} else if i & 0xff00 == 0xb000 {
(
"AddSp",
format!("exec_thumb_add_sp::<{FLAG_S}>", FLAG_S = i.bit(7)),
)
} else if i & 0xf600 == 0xb400 {
(
"PushPop",
format!(
"exec_thumb_push_pop::<{POP}, {FLAG_R}>",
POP = load,
FLAG_R = i.bit(8)
),
)
} else if i & 0xf000 == 0xc000 {
(
"LdmStm",
format!(
"exec_thumb_ldm_stm::<{LOAD}, {RB}>",
LOAD = load,
RB = i.bit_range(8..11) as usize
),
)
} else if i & 0xff00 == 0xdf00 {
("Swi", String::from("exec_thumb_swi"))
} else if i & 0xf000 == 0xd000 {
(
"BranchConditional",
format!(
"exec_thumb_branch_with_cond::<{COND}>",
COND = i.bit_range(8..12) as u8
),
)
} else if i & 0xf800 == 0xe000 {
("Branch", String::from("exec_thumb_branch"))
} else if i & 0xf000 == 0xf000 {
(
"BranchLongWithLink",
format!(
"exec_thumb_branch_long_with_link::<{FLAG_LOW_OFFSET}>",
FLAG_LOW_OFFSET = i.bit(11),
),
)
} else {
("Undefined", String::from("thumb_undefined"))
}
}
trait BitAsInt<T: From<bool>>: BitIndex {
fn ibit(&self, i: usize) -> T {
self.bit(i).into()
}
}
impl BitAsInt<u32> for u32 {}
/// Returns a string representation of arm7tdmi::arm::ArmFormat enum member
/// # Arguments
/// * `i` - A 32bit ARM instruction
///
/// Decoding is according to this table from http://problemkaputt.de/gbatek.htm#ARMBinaryOpcodeFormat
/// ```
/// |..3 ..................2 ..................1 ..................0|
/// |1_0_9_8_7_6_5_4_3_2_1_0_9_8_7_6_5_4_3_2_1_0_9_8_7_6_5_4_3_2_1_0|
/// |_Cond__|0_0_0|___Op__|S|__Rn___|__Rd___|__Shift__|Typ|0|__Rm___| DataProc
/// |_Cond__|0_0_0|___Op__|S|__Rn___|__Rd___|__Rs___|0|Typ|1|__Rm___| DataProc
/// |_Cond__|0_0_1|___Op__|S|__Rn___|__Rd___|_Shift_|___Immediate___| DataProc
/// |_Cond__|0_0_1_1_0_0_1_0_0_0_0_0_1_1_1_1_0_0_0_0|_____Hint______| ARM11:Hint
/// |_Cond__|0_0_1_1_0|P|1|0|_Field_|__Rd___|_Shift_|___Immediate___| PSR Imm
/// |_Cond__|0_0_0_1_0|P|L|0|_Field_|__Rd___|0_0_0_0|0_0_0_0|__Rm___| PSR Reg
/// |_Cond__|0_0_0_1_0_0_1_0_1_1_1_1_1_1_1_1_1_1_1_1|0_0|L|1|__Rn___| BX,BLX
/// |1_1_1_0|0_0_0_1_0_0_1_0|_____immediate_________|0_1_1_1|_immed_| ARM9:BKPT
/// |_Cond__|0_0_0_1_0_1_1_0_1_1_1_1|__Rd___|1_1_1_1|0_0_0_1|__Rm___| ARM9:CLZ
/// |_Cond__|0_0_0_1_0|Op_|0|__Rn___|__Rd___|0_0_0_0|0_1_0_1|__Rm___| ARM9:QALU
/// |_Cond__|0_0_0_0_0_0|A|S|__Rd___|__Rn___|__Rs___|1_0_0_1|__Rm___| Multiply
/// |_Cond__|0_0_0_0_0_1_0_0|_RdHi__|_RdLo__|__Rs___|1_0_0_1|__Rm___| ARM11:UMAAL
/// |_Cond__|0_0_0_0_1|U|A|S|_RdHi__|_RdLo__|__Rs___|1_0_0_1|__Rm___| MulLong
/// |_Cond__|0_0_0_1_0|Op_|0|Rd/RdHi|Rn/RdLo|__Rs___|1|y|x|0|__Rm___| MulHalfARM9
/// |_Cond__|0_0_0_1_0|B|0_0|__Rn___|__Rd___|0_0_0_0|1_0_0_1|__Rm___| TransSwp12
/// |_Cond__|0_0_0_1_1|_Op__|__Rn___|__Rd___|1_1_1_1|1_0_0_1|__Rm___| ARM11:LDREX
/// |_Cond__|0_0_0|P|U|0|W|L|__Rn___|__Rd___|0_0_0_0|1|S|H|1|__Rm___| TransReg10
/// |_Cond__|0_0_0|P|U|1|W|L|__Rn___|__Rd___|OffsetH|1|S|H|1|OffsetL| TransImm10
/// |_Cond__|0_1_0|P|U|B|W|L|__Rn___|__Rd___|_________Offset________| TransImm9
/// |_Cond__|0_1_1|P|U|B|W|L|__Rn___|__Rd___|__Shift__|Typ|0|__Rm___| TransReg9
/// |_Cond__|0_1_1|________________xxx____________________|1|__xxx__| Undefined
/// |_Cond__|0_1_1|Op_|x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x_x|1|x_x_x_x| ARM11:Media
/// |1_1_1_1_0_1_0_1_0_1_1_1_1_1_1_1_1_1_1_1_0_0_0_0_0_0_0_1_1_1_1_1| ARM11:CLREX
/// |_Cond__|1_0_0|P|U|S|W|L|__Rn___|__________Register_List________| BlockTrans
/// |_Cond__|1_0_1|L|___________________Offset______________________| B,BL,BLX
/// |_Cond__|1_1_0|P|U|N|W|L|__Rn___|__CRd__|__CP#__|____Offset_____| CoDataTrans
/// |_Cond__|1_1_0_0_0_1_0|L|__Rn___|__Rd___|__CP#__|_CPopc_|__CRm__| CoRR ARM9
/// |_Cond__|1_1_1_0|_CPopc_|__CRn__|__CRd__|__CP#__|_CP__|0|__CRm__| CoDataOp
/// |_Cond__|1_1_1_0|CPopc|L|__CRn__|__Rd___|__CP#__|_CP__|1|__CRm__| CoRegTrans
/// |_Cond__|1_1_1_1|_____________Ignored_by_Processor______________| SWI
/// ```
fn arm_decode(i: u32) -> (&'static str, String) {
const T: bool = true;
const F: bool = false;
// First, decode the the top-most non-condition bits
match i.bit_range(26..28) {
0b00 => {
/* DataProcessing and friends */
let result = match (i.bit_range(23..26), i.bit_range(4..8)) {
(0b000, 0b1001) => {
if 0b0 == i.ibit(22) {
Some((
"Multiply",
format!(
"exec_arm_mul_mla::<{UPDATE_FLAGS}, {ACCUMULATE}>",
UPDATE_FLAGS = i.bit(20),
ACCUMULATE = i.bit(21),
),
))
} else {
None
}
}
(0b001, 0b1001) => Some((
"MultiplyLong",
format!(
"exec_arm_mull_mlal::<{UPDATE_FLAGS}, {ACCUMULATE}, {U_FLAG}>",
UPDATE_FLAGS = i.bit(20),
ACCUMULATE = i.bit(21),
U_FLAG = i.bit(22),
),
)),
(0b010, 0b1001) => {
if 0b00 == i.bit_range(20..22) {
Some((
"SingleDataSwap",
format!("exec_arm_swp::<{BYTE}>", BYTE = i.bit(22)),
))
} else {
None
}
}
(0b010, 0b0001) => {
if 0b010 == i.bit_range(20..23) {
Some(("BranchExchange", "exec_arm_bx".to_string()))
} else {
None
}
}
_ => None,
};
if let Some(result) = result {
result
} else {
match (i.ibit(25), i.ibit(22), i.ibit(7), i.ibit(4)) {
(0, 0, 1, 1) => (
"HalfwordDataTransferRegOffset",
format!(
"exec_arm_ldr_str_hs_reg::<{HS}, {LOAD}, {WRITEBACK}, {PRE_INDEX}, {ADD}>",
HS = (i & 0b1100000) >> 5,
LOAD = i.bit(20),
WRITEBACK = i.bit(21),
ADD = i.bit(23),
PRE_INDEX = i.bit(24),
),
),
(0, 1, 1, 1) => (
"HalfwordDataTransferImmediateOffset",
format!(
"exec_arm_ldr_str_hs_imm::<{HS}, {LOAD}, {WRITEBACK}, {PRE_INDEX}, {ADD}>",
HS = (i & 0b1100000) >> 5,
LOAD = i.bit(20),
WRITEBACK = i.bit(21),
ADD = i.bit(23),
PRE_INDEX = i.bit(24)
),
),
_ => {
let set_cond_flags = i.bit(20);
// PSR Transfers are encoded as a subset of Data Processing,
// with S bit OFF and the encode opcode is one of TEQ,CMN,TST,CMN
let is_op_not_touching_rd = i.bit_range(21..25) & 0b1100 == 0b1000;
if !set_cond_flags && is_op_not_touching_rd {
if i.bit(21) {
("MoveToStatus", format!("exec_arm_transfer_to_status::<{IMM}, {SPSR_FLAG}>",
IMM = i.bit(25), SPSR_FLAG = i.bit(22)))
} else {
("MoveFromStatus", format!("exec_arm_mrs::<{SPSR_FLAG}>", SPSR_FLAG = i.bit(22)))
}
} else {
("DataProcessing", format!("exec_arm_data_processing::<{OP}, {IMM}, {SET_FLAGS}, {SHIFT_BY_REG}>",
OP=i.bit_range(21..25),
IMM=i.bit(25),
SET_FLAGS=i.bit(20),
SHIFT_BY_REG=i.bit(4)))
}
}
}
}
}
0b01 => {
match (i.bit(25), i.bit(4)) {
(_, F) | (F, T) => ("SingleDataTransfer", format!(
"exec_arm_ldr_str::<{LOAD}, {WRITEBACK}, {PRE_INDEX}, {BYTE}, {SHIFT}, {ADD}, {BS_OP}, {SHIFT_BY_REG}>",
LOAD = i.bit(20),
WRITEBACK = i.bit(21),
BYTE = i.bit(22),
ADD = i.bit(23),
PRE_INDEX = i.bit(24),
SHIFT = i.bit(25),
BS_OP = i.bit_range(5..7) as u8,
SHIFT_BY_REG = i.bit(4),
)),
(T, T) => ("Undefined", String::from("arm_undefined")), /* Possible ARM11 but we don't implement these */
}
}
0b10 => match i.bit(25) {
F => (
"BlockDataTransfer",
format!(
"exec_arm_ldm_stm::<{LOAD}, {WRITEBACK}, {FLAG_S}, {ADD}, {PRE_INDEX}>",
LOAD = i.bit(20),
WRITEBACK = i.bit(21),
FLAG_S = i.bit(22),
ADD = i.bit(23),
PRE_INDEX = i.bit(24),
),
),
T => (
"BranchLink",
format!("exec_arm_b_bl::<{LINK}>", LINK = i.bit(24)),
),
},
0b11 => {
match (i.ibit(25), i.ibit(24), i.ibit(4)) {
(0b0, _, _) => ("Undefined", String::from("arm_undefined")), /* CoprocessorDataTransfer not implemented */
(0b1, 0b0, 0b0) => ("Undefined", String::from("arm_undefined")), /* CoprocessorDataOperation not implemented */
(0b1, 0b0, 0b1) => ("Undefined", String::from("arm_undefined")), /* CoprocessorRegisterTransfer not implemented */
(0b1, 0b1, _) => ("SoftwareInterrupt", String::from("exec_arm_swi")),
_ => ("Undefined", String::from("arm_undefined")),
}
}
_ => unreachable!(),
}
}
fn generate_thumb_lut(file: &mut fs::File) -> Result<(), std::io::Error> {
writeln!(file, "impl<I: MemoryInterface> Arm7tdmiCore<I> {{")?;
writeln!(
file,
" pub const THUMB_LUT: [ThumbInstructionInfo<I>; 1024] = ["
)?;
for i in 0..1024 {
let (thumb_fmt, handler_name) = thumb_decode(i << 6);
writeln!(
file,
" /* {:#x} */
ThumbInstructionInfo {{
handler_fn: Arm7tdmiCore::{},
#[cfg(feature = \"debugger\")]
fmt: ThumbFormat::{},
}},",
i, handler_name, thumb_fmt
)?;
}
writeln!(file, " ];")?;
writeln!(file, "}}")?;
Ok(())
}
fn generate_arm_lut(file: &mut fs::File) -> Result<(), std::io::Error> {
writeln!(file, "impl<I: MemoryInterface> Arm7tdmiCore<I> {{")?;
writeln!(
file,
" pub const ARM_LUT: [ArmInstructionInfo<I>; 4096] = ["
)?;
for i in 0..4096 {
let (arm_fmt, handler_name) = arm_decode(((i & 0xff0) << 16) | ((i & 0x00f) << 4));
writeln!(
file,
" /* {:#x} */
ArmInstructionInfo {{
handler_fn: Arm7tdmiCore::{},
#[cfg(feature = \"debugger\")]
fmt: ArmFormat::{},
}} ,",
i, handler_name, arm_fmt
)?;
}
writeln!(file, " ];")?;
writeln!(file, "}}")?;
Ok(())
}
fn main() {
// TODO - don't do this in the build script and use `const fn` instead when it becomes stable
let out_dir = env::var_os("OUT_DIR").unwrap();
let thumb_lut_path = Path::new(&out_dir).join("thumb_lut.rs");
let mut thumb_lut_file = fs::File::create(thumb_lut_path).expect("failed to create file");
generate_thumb_lut(&mut thumb_lut_file).expect("failed to generate thumb table");
let arm_lut_path = Path::new(&out_dir).join("arm_lut.rs");
let mut arm_lut_file = fs::File::create(arm_lut_path).expect("failed to create file");
generate_arm_lut(&mut arm_lut_file).expect("failed to generate arm table");
println!("cargo:rerun-if-changed=build.rs");
}

View file

@ -1,362 +0,0 @@
use bit::BitIndex;
use crate::{memory::MemoryInterface, registers_consts::REG_PC, Arm7tdmiCore};
#[derive(Debug, Primitive, Eq, PartialEq)]
pub enum AluOpCode {
AND = 0b0000,
EOR = 0b0001,
SUB = 0b0010,
RSB = 0b0011,
ADD = 0b0100,
ADC = 0b0101,
SBC = 0b0110,
RSC = 0b0111,
TST = 0b1000,
TEQ = 0b1001,
CMP = 0b1010,
CMN = 0b1011,
ORR = 0b1100,
MOV = 0b1101,
BIC = 0b1110,
MVN = 0b1111,
}
impl AluOpCode {
pub fn is_setting_flags(&self) -> bool {
use AluOpCode::*;
matches!(self, TST | TEQ | CMP | CMN)
}
pub fn is_logical(&self) -> bool {
use AluOpCode::*;
matches!(self, MOV | MVN | ORR | EOR | AND | BIC | TST | TEQ)
}
pub fn is_arithmetic(&self) -> bool {
use AluOpCode::*;
matches!(self, ADD | ADC | SUB | SBC | RSB | RSC | CMP | CMN)
}
}
#[derive(Debug, PartialEq, Eq, Primitive, Copy, Clone)]
pub enum BarrelShiftOpCode {
LSL = 0,
LSR = 1,
ASR = 2,
ROR = 3,
}
#[derive(Debug, PartialEq, Eq, Copy, Clone)]
pub enum ShiftRegisterBy {
ByAmount(u32),
ByRegister(usize),
}
#[derive(Debug, PartialEq, Eq, Copy, Clone)]
pub struct ShiftedRegister {
pub reg: usize,
pub shift_by: ShiftRegisterBy,
pub bs_op: BarrelShiftOpCode,
pub added: Option<bool>,
}
impl ShiftedRegister {
pub fn is_shifted_by_reg(&self) -> bool {
matches!(self.shift_by, ShiftRegisterBy::ByRegister(_))
}
}
#[derive(Debug, PartialEq, Eq, Copy, Clone)]
pub enum BarrelShifterValue {
ImmediateValue(u32),
RotatedImmediate(u32, u32),
ShiftedRegister(ShiftedRegister),
}
impl BarrelShifterValue {
/// Decode operand2 as an immediate value
pub fn decode_rotated_immediate(&self) -> Option<u32> {
if let BarrelShifterValue::RotatedImmediate(immediate, rotate) = self {
return Some(immediate.rotate_right(*rotate));
}
None
}
pub fn shifted_register(
reg: usize,
shift_by: ShiftRegisterBy,
bs_op: BarrelShiftOpCode,
added: Option<bool>,
) -> BarrelShifterValue {
let shft_reg = ShiftedRegister {
reg,
shift_by,
bs_op,
added,
};
BarrelShifterValue::ShiftedRegister(shft_reg)
}
}
impl<I: MemoryInterface> Arm7tdmiCore<I> {
pub fn lsl(&mut self, val: u32, amount: u32, carry: &mut bool) -> u32 {
match amount {
0 => val,
x if x < 32 => {
*carry = val.wrapping_shr(32 - x) & 1 == 1;
val << x
}
32 => {
*carry = val & 1 == 1;
0
}
_ => {
*carry = false;
0
}
}
}
pub fn lsr(&mut self, val: u32, amount: u32, carry: &mut bool, immediate: bool) -> u32 {
if amount != 0 {
match amount {
x if x < 32 => {
*carry = (val >> (amount - 1) & 1) == 1;
val >> amount
}
32 => {
*carry = val.bit(31);
0
}
_ => {
*carry = false;
0
}
}
} else if immediate {
*carry = val.bit(31);
0
} else {
val
}
}
pub fn asr(&mut self, val: u32, amount: u32, carry: &mut bool, immediate: bool) -> u32 {
let amount = if immediate && amount == 0 { 32 } else { amount };
match amount {
0 => val,
x if x < 32 => {
*carry = val.wrapping_shr(amount - 1) & 1 == 1;
(val as i32).wrapping_shr(amount) as u32
}
_ => {
let bit31 = val.bit(31);
*carry = bit31;
if bit31 {
0xffffffff
} else {
0
}
}
}
}
pub fn rrx(&mut self, val: u32, carry: &mut bool) -> u32 {
let old_c = *carry as i32;
*carry = val & 0b1 != 0;
((val >> 1) as i32 | (old_c << 31)) as u32
}
pub fn ror(
&mut self,
val: u32,
amount: u32,
carry: &mut bool,
immediate: bool,
rrx: bool,
) -> u32 {
match amount {
0 => {
if immediate & rrx {
self.rrx(val, carry)
} else {
val
}
}
_ => {
let amount = amount % 32;
let val = if amount != 0 {
val.rotate_right(amount)
} else {
val
};
*carry = (val).bit(31);
val
}
}
}
/// Performs a generic barrel shifter operation
#[inline]
pub fn barrel_shift_op(
&mut self,
shift: BarrelShiftOpCode,
val: u32,
amount: u32,
carry: &mut bool,
immediate: bool,
) -> u32 {
//
// From GBATEK:
// Zero Shift Amount (Shift Register by Immediate, with Immediate=0)
// LSL#0: No shift performed, ie. directly Op2=Rm, the C flag is NOT affected.
// LSR#0: Interpreted as LSR#32, ie. Op2 becomes zero, C becomes Bit 31 of Rm.
// ASR#0: Interpreted as ASR#32, ie. Op2 and C are filled by Bit 31 of Rm.
// ROR#0: Interpreted as RRX#1 (RCR), like ROR#1, but Op2 Bit 31 set to old C.
//
// From ARM7TDMI Datasheet:
// 1 LSL by 32 has result zero, carry out equal to bit 0 of Rm.
// 2 LSL by more than 32 has result zero, carry out zero.
// 3 LSR by 32 has result zero, carry out equal to bit 31 of Rm.
// 4 LSR by more than 32 has result zero, carry out zero.
// 5 ASR by 32 or more has result filled with and carry out equal to bit 31 of Rm.
// 6 ROR by 32 has result equal to Rm, carry out equal to bit 31 of Rm.
// 7 ROR by n where n is greater than 32 will give the same result and carry out
// as ROR by n-32; therefore repeatedly subtract 32 from n until the amount is
// in the range 1 to 32 and see above.
//
match shift {
BarrelShiftOpCode::LSL => self.lsl(val, amount, carry),
BarrelShiftOpCode::LSR => self.lsr(val, amount, carry, immediate),
BarrelShiftOpCode::ASR => self.asr(val, amount, carry, immediate),
BarrelShiftOpCode::ROR => self.ror(val, amount, carry, immediate, true),
}
}
#[inline]
pub fn shift_by_register(
&mut self,
bs_op: BarrelShiftOpCode,
reg: usize,
rs: usize,
carry: &mut bool,
) -> u32 {
let mut val = self.get_reg(reg);
if reg == REG_PC {
val += 4; // PC prefetching
}
let amount = self.get_reg(rs) & 0xff;
self.barrel_shift_op(bs_op, val, amount, carry, false)
}
pub fn register_shift_const<const BS_OP: u8, const SHIFT_BY_REG: bool>(
&mut self,
offset: u32,
reg: usize,
carry: &mut bool,
) -> u32 {
let op = match BS_OP {
0 => BarrelShiftOpCode::LSL,
1 => BarrelShiftOpCode::LSR,
2 => BarrelShiftOpCode::ASR,
3 => BarrelShiftOpCode::ROR,
_ => unsafe { std::hint::unreachable_unchecked() },
};
if SHIFT_BY_REG {
let rs = offset.bit_range(8..12) as usize;
self.shift_by_register(op, reg, rs, carry)
} else {
let amount = offset.bit_range(7..12);
self.barrel_shift_op(op, self.get_reg(reg), amount, carry, true)
}
}
pub fn register_shift(&mut self, shift: &ShiftedRegister, carry: &mut bool) -> u32 {
match shift.shift_by {
ShiftRegisterBy::ByAmount(amount) => {
self.barrel_shift_op(shift.bs_op, self.get_reg(shift.reg), amount, carry, true)
}
ShiftRegisterBy::ByRegister(rs) => {
self.shift_by_register(shift.bs_op, shift.reg, rs, carry)
}
}
}
pub fn get_barrel_shifted_value(&mut self, sval: &BarrelShifterValue, carry: &mut bool) -> u32 {
// TODO decide if error handling or panic here
match sval {
BarrelShifterValue::ImmediateValue(offset) => *offset,
BarrelShifterValue::ShiftedRegister(shifted_reg) => {
let added = (shifted_reg).added.unwrap_or(true);
let abs = self.register_shift(shifted_reg, carry);
if added {
abs
} else {
(-(abs as i32)) as u32
}
}
_ => panic!("bad barrel shift"),
}
}
pub(super) fn alu_sub_flags(
&self,
a: u32,
b: u32,
carry: &mut bool,
overflow: &mut bool,
) -> u32 {
let res = a.wrapping_sub(b);
*carry = b <= a;
*overflow = (a as i32).overflowing_sub(b as i32).1;
res
}
pub(super) fn alu_add_flags(
&self,
a: u32,
b: u32,
carry: &mut bool,
overflow: &mut bool,
) -> u32 {
let res = a.wrapping_add(b);
*carry = add_carry_result(a as u64, b as u64);
*overflow = (a as i32).overflowing_add(b as i32).1;
res
}
pub(super) fn alu_adc_flags(
&self,
a: u32,
b: u32,
carry: &mut bool,
overflow: &mut bool,
) -> u32 {
let c = self.cpsr.C() as u64;
let res = (a as u64) + (b as u64) + c;
*carry = res > 0xffffffff;
*overflow = (!(a ^ b) & (b ^ (res as u32))).bit(31);
res as u32
}
pub(super) fn alu_sbc_flags(
&self,
a: u32,
b: u32,
carry: &mut bool,
overflow: &mut bool,
) -> u32 {
self.alu_adc_flags(a, !b, carry, overflow)
}
pub fn alu_update_flags(&mut self, result: u32, _is_arithmetic: bool, c: bool, v: bool) {
self.cpsr.set_N((result as i32) < 0);
self.cpsr.set_Z(result == 0);
self.cpsr.set_C(c);
self.cpsr.set_V(v);
}
}
#[inline]
fn add_carry_result(a: u64, b: u64) -> bool {
a.wrapping_add(b) > 0xffffffff
}

View file

@ -1,481 +0,0 @@
use std::fmt;
use crate::bit::BitIndex;
use super::{ArmDecodeHelper, ArmFormat, ArmInstruction};
use super::{AluOpCode, ArmCond, ArmHalfwordTransferType};
use crate::arm7tdmi::*;
impl fmt::Display for ArmCond {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
use ArmCond::*;
match self {
Invalid => panic!("Invalid condition code"),
EQ => write!(f, "eq"),
NE => write!(f, "ne"),
HS => write!(f, "cs"),
LO => write!(f, "cc"),
MI => write!(f, "mi"),
PL => write!(f, "pl"),
VS => write!(f, "vs"),
VC => write!(f, "vc"),
HI => write!(f, "hi"),
LS => write!(f, "ls"),
GE => write!(f, "ge"),
LT => write!(f, "lt"),
GT => write!(f, "gt"),
LE => write!(f, "le"),
AL => write!(f, ""), // the dissasembly should ignore this
}
}
}
impl fmt::Display for AluOpCode {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
use AluOpCode::*;
match self {
AND => write!(f, "and"),
EOR => write!(f, "eor"),
SUB => write!(f, "sub"),
RSB => write!(f, "rsb"),
ADD => write!(f, "add"),
ADC => write!(f, "adc"),
SBC => write!(f, "sbc"),
RSC => write!(f, "rsc"),
TST => write!(f, "tst"),
TEQ => write!(f, "teq"),
CMP => write!(f, "cmp"),
CMN => write!(f, "cmn"),
ORR => write!(f, "orr"),
MOV => write!(f, "mov"),
BIC => write!(f, "bic"),
MVN => write!(f, "mvn"),
}
}
}
impl fmt::Display for BarrelShiftOpCode {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
use BarrelShiftOpCode::*;
match self {
LSL => write!(f, "lsl"),
LSR => write!(f, "lsr"),
ASR => write!(f, "asr"),
ROR => write!(f, "ror"),
}
}
}
impl fmt::Display for ArmHalfwordTransferType {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
use ArmHalfwordTransferType::*;
match self {
UnsignedHalfwords => write!(f, "h"),
SignedHalfwords => write!(f, "sh"),
SignedByte => write!(f, "sb"),
}
}
}
fn is_lsl0(shift: &ShiftedRegister) -> bool {
if let ShiftRegisterBy::ByAmount(val) = shift.shift_by {
return !(val == 0 && shift.bs_op == BarrelShiftOpCode::LSL);
}
true
}
impl fmt::Display for ShiftedRegister {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let reg = reg_string(self.reg).to_string();
if !is_lsl0(&self) {
write!(f, "{}", reg)
} else {
match self.shift_by {
ShiftRegisterBy::ByAmount(imm) => write!(f, "{}, {} #{}", reg, self.bs_op, imm),
ShiftRegisterBy::ByRegister(rs) => {
write!(f, "{}, {} {}", reg, self.bs_op, reg_string(rs))
}
}
}
}
}
impl ArmInstruction {
fn fmt_bx(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(
f,
"bx\t{Rn}",
Rn = reg_string(self.raw.bit_range(0..4) as usize)
)
}
fn fmt_branch(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(
f,
"b{link}{cond}\t{ofs:#x}",
link = if self.raw.link_flag() { "l" } else { "" },
cond = self.raw.cond(),
ofs = 8 + self.pc.wrapping_add(self.raw.branch_offset() as Addr)
)
}
fn set_cond_mark(&self) -> &str {
if self.raw.set_cond_flag() {
"s"
} else {
""
}
}
fn fmt_operand2(&self, f: &mut fmt::Formatter<'_>) -> Result<Option<u32>, fmt::Error> {
let operand2 = self.raw.operand2();
match operand2 {
BarrelShifterValue::RotatedImmediate(_, _) => {
let value = operand2.decode_rotated_immediate().unwrap();
write!(f, "#{}\t; {:#x}", value, value)?;
Ok(Some(value as u32))
}
BarrelShifterValue::ShiftedRegister(shift) => {
write!(f, "{}", shift)?;
Ok(None)
}
_ => panic!("invalid operand2"),
}
}
fn fmt_data_processing(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
use AluOpCode::*;
let opcode = self.raw.opcode();
let rd = self.raw.bit_range(16..20) as usize;
let rn = self.raw.bit_range(16..20) as usize;
match opcode {
MOV | MVN => write!(
f,
"{opcode}{S}{cond}\t{Rd}, ",
opcode = opcode,
cond = self.raw.cond(),
S = self.set_cond_mark(),
Rd = reg_string(rd)
),
CMP | CMN | TEQ | TST => write!(
f,
"{opcode}{cond}\t{Rn}, ",
opcode = opcode,
cond = self.raw.cond(),
Rn = reg_string(rn)
),
_ => write!(
f,
"{opcode}{S}{cond}\t{Rd}, {Rn}, ",
opcode = opcode,
cond = self.raw.cond(),
S = self.set_cond_mark(),
Rd = reg_string(rd),
Rn = reg_string(rn)
),
}?;
self.fmt_operand2(f).unwrap();
Ok(())
}
fn auto_incremenet_mark(&self) -> &str {
if self.raw.write_back_flag() {
"!"
} else {
""
}
}
fn fmt_rn_offset(
&self,
f: &mut fmt::Formatter<'_>,
offset: BarrelShifterValue,
rn: usize,
) -> fmt::Result {
write!(f, "[{Rn}", Rn = reg_string(rn))?;
let (ofs_string, comment) = match offset {
BarrelShifterValue::ImmediateValue(value) => {
let value_for_commnet = if rn == REG_PC {
value + self.pc + 8 // account for pipelining
} else {
value
};
(
format!("#{}", value),
Some(format!("\t; {:#x}", value_for_commnet)),
)
}
BarrelShifterValue::ShiftedRegister(shift) => (
format!(
"{}{}",
if shift.added.unwrap_or(true) { "" } else { "-" },
shift
),
None,
),
_ => panic!("bad barrel shifter"),
};
if self.raw.pre_index_flag() {
write!(f, ", {}]{}", ofs_string, self.auto_incremenet_mark())?;
} else {
write!(f, "], {}", ofs_string)?;
}
if let Some(comment) = comment {
write!(f, "{}", comment)
} else {
Ok(())
}
}
fn fmt_ldr_str(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(
f,
"{mnem}{B}{T}{cond}\t{Rd}, ",
mnem = if self.raw.load_flag() { "ldr" } else { "str" },
B = if self.raw.transfer_size() == 1 {
"b"
} else {
""
},
cond = self.raw.cond(),
T = if !self.raw.pre_index_flag() && self.raw.write_back_flag() {
"t"
} else {
""
},
Rd = reg_string(self.raw.bit_range(12..16) as usize),
)?;
self.fmt_rn_offset(
f,
self.raw.ldr_str_offset(),
self.raw.bit_range(16..20) as usize,
)
}
fn fmt_ldm_stm(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(
f,
"{mnem}{inc_dec}{pre_post}{cond}\t{Rn}{auto_inc}, {{",
mnem = if self.raw.load_flag() { "ldm" } else { "stm" },
inc_dec = if self.raw.add_offset_flag() { 'i' } else { 'd' },
pre_post = if self.raw.pre_index_flag() { 'b' } else { 'a' },
cond = self.raw.cond(),
Rn = reg_string(self.raw.bit_range(16..20) as usize),
auto_inc = if self.raw.write_back_flag() { "!" } else { "" }
)?;
let register_list = self.raw.register_list();
let mut has_first = false;
for i in 0..16 {
if register_list.bit(i) {
if has_first {
write!(f, ", {}", reg_string(i))?;
} else {
write!(f, "{}", reg_string(i))?;
has_first = true;
}
}
}
write!(
f,
"}}{}",
if self.raw.psr_and_force_user_flag() {
"^"
} else {
""
}
)
}
/// MRS - transfer PSR contents to a register
fn fmt_mrs(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(
f,
"mrs{cond}\t{Rd}, {psr}",
cond = self.raw.cond(),
Rd = reg_string(self.raw.bit_range(12..16) as usize),
psr = if self.raw.spsr_flag() { "SPSR" } else { "CPSR" }
)
}
/// MSR - transfer register/immediate contents to PSR
fn fmt_msr(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(
f,
"msr{cond}\t{psr}, ",
cond = self.raw.cond(),
psr = if self.raw.spsr_flag() { "SPSR" } else { "CPSR" },
)?;
self.fmt_operand2(f).unwrap();
Ok(())
}
fn fmt_msr_flags(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(
f,
"msr{cond}\t{psr}, ",
cond = self.raw.cond(),
psr = if self.raw.spsr_flag() {
"SPSR_f"
} else {
"CPSR_f"
},
)?;
if let Ok(Some(op)) = self.fmt_operand2(f) {
let psr = RegPSR::new(op & 0xf000_0000);
write!(
f,
"\t; N={} Z={} C={} V={}",
psr.N(),
psr.Z(),
psr.C(),
psr.V()
)?;
}
Ok(())
}
fn fmt_mul_mla(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let rd = self.raw.bit_range(16..20) as usize;
if self.raw.accumulate_flag() {
write!(
f,
"mla{S}{cond}\t{Rd}, {Rm}, {Rs}, {Rn}",
S = self.set_cond_mark(),
cond = self.raw.cond(),
Rd = reg_string(rd),
Rm = reg_string(self.raw.rm()),
Rs = reg_string(self.raw.rs()),
Rn = reg_string(self.raw.bit_range(12..16) as usize),
)
} else {
write!(
f,
"mul{S}{cond}\t{Rd}, {Rm}, {Rs}",
S = self.set_cond_mark(),
cond = self.raw.cond(),
Rd = reg_string(rd),
Rm = reg_string(self.raw.rm()),
Rs = reg_string(self.raw.rs()),
)
}
}
fn sign_mark(&self) -> &str {
if self.raw.u_flag() {
"s"
} else {
"u"
}
}
fn fmt_mull_mlal(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(
f,
"{sign}{mnem}{S}{cond}\t{RdLo}, {RdHi}, {Rm}, {Rs}",
sign = self.sign_mark(),
mnem = if self.raw.accumulate_flag() {
"mlal"
} else {
"mull"
},
S = self.set_cond_mark(),
cond = self.raw.cond(),
RdLo = reg_string(self.raw.rd_lo()),
RdHi = reg_string(self.raw.rd_hi()),
Rm = reg_string(self.raw.rm()),
Rs = reg_string(self.raw.rs()),
)
}
fn fmt_ldr_str_hs_imm_offset(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let transfer_type = self.raw.halfword_data_transfer_type();
write!(
f,
"{mnem}{type}{cond}\t{Rd}, ",
mnem = if self.raw.load_flag() { "ldr" } else { "str" },
cond = self.raw.cond(),
type = transfer_type,
Rd = reg_string(self.raw.bit_range(12..16) as usize),
)?;
self.fmt_rn_offset(
f,
self.raw.ldr_str_hs_imm_offset(),
self.raw.bit_range(16..20) as usize,
)
}
fn fmt_ldr_str_hs_reg_offset(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let transfer_type = self.raw.halfword_data_transfer_type();
write!(
f,
"{mnem}{type}{cond}\t{Rd}, ",
mnem = if self.raw.load_flag() { "ldr" } else { "str" },
cond = self.raw.cond(),
type = transfer_type,
Rd = reg_string(self.raw.bit_range(12..16) as usize),
)?;
self.fmt_rn_offset(
f,
self.raw.ldr_str_hs_reg_offset(),
self.raw.bit_range(16..20) as usize,
)
}
fn fmt_swi(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(
f,
"swi{cond}\t#{comm:#x}",
cond = self.raw.cond(),
comm = self.raw.swi_comment()
)
}
fn fmt_swp(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(
f,
"swp{B}{cond}\t{Rd}, {Rm}, [{Rn}]",
B = if self.raw.transfer_size() == 1 {
"b"
} else {
""
},
cond = self.raw.cond(),
Rd = reg_string(self.raw.bit_range(12..16) as usize),
Rm = reg_string(self.raw.rm()),
Rn = reg_string(self.raw.bit_range(16..20) as usize),
)
}
}
impl fmt::Display for ArmInstruction {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
use ArmFormat::*;
match self.fmt {
BranchExchange => self.fmt_bx(f),
BranchLink => self.fmt_branch(f),
DataProcessing => self.fmt_data_processing(f),
SingleDataTransfer => self.fmt_ldr_str(f),
BlockDataTransfer => self.fmt_ldm_stm(f),
MoveFromStatus => self.fmt_mrs(f),
MoveToStatus => self.fmt_msr(f),
MoveToFlags => self.fmt_msr_flags(f),
Multiply => self.fmt_mul_mla(f),
MultiplyLong => self.fmt_mull_mlal(f),
HalfwordDataTransferImmediateOffset => self.fmt_ldr_str_hs_imm_offset(f),
HalfwordDataTransferRegOffset => self.fmt_ldr_str_hs_reg_offset(f),
SoftwareInterrupt => self.fmt_swi(f),
SingleDataSwap => self.fmt_swp(f),
Undefined => write!(f, "<Undefined>"),
}
}
}

View file

@ -1,720 +0,0 @@
use crate::{
alu::*,
memory::{MemoryAccess, MemoryInterface},
psr::RegPSR,
registers_consts::{REG_LR, REG_PC},
Arm7tdmiCore, CpuAction, CpuMode, CpuState,
};
use MemoryAccess::*;
use super::*;
impl<I: MemoryInterface> Arm7tdmiCore<I> {
pub fn arm_undefined(&mut self, insn: u32) -> CpuAction {
panic!(
"executing undefined arm instruction {:08x} at @{:08x}",
insn,
self.pc_arm()
)
}
/// Branch and Branch with Link (B, BL)
/// Execution Time: 2S + 1N
pub fn exec_arm_b_bl<const LINK: bool>(&mut self, insn: u32) -> CpuAction {
if LINK {
self.set_reg(REG_LR, (self.pc_arm() + (self.word_size() as u32)) & !0b1);
}
self.pc = (self.pc as i32).wrapping_add(insn.branch_offset()) as u32 & !1;
self.reload_pipeline32(); // Implies 2S + 1N
CpuAction::PipelineFlushed
}
pub fn branch_exchange(&mut self, mut addr: Addr) -> CpuAction {
if addr.bit(0) {
addr &= !0x1;
self.cpsr.set_state(CpuState::THUMB);
self.pc = addr;
self.reload_pipeline16();
} else {
addr &= !0x3;
self.cpsr.set_state(CpuState::ARM);
self.pc = addr;
self.reload_pipeline32();
}
CpuAction::PipelineFlushed
}
/// Branch and Exchange (BX)
/// Cycles 2S+1N
pub fn exec_arm_bx(&mut self, insn: u32) -> CpuAction {
self.branch_exchange(self.get_reg(insn.bit_range(0..4) as usize))
}
/// Move from status register
/// 1S
pub fn exec_arm_mrs<const SPSR_FLAG: bool>(&mut self, insn: u32) -> CpuAction {
let rd = insn.bit_range(12..16) as usize;
let result = if SPSR_FLAG {
self.spsr.get()
} else {
self.cpsr.get()
};
self.set_reg(rd, result);
CpuAction::AdvancePC(Seq)
}
/// Move to status register
/// 1S
pub fn exec_arm_transfer_to_status<const IMM: bool, const SPSR_FLAG: bool>(
&mut self,
insn: u32,
) -> CpuAction {
let value = if IMM {
let immediate = insn & 0xff;
let rotate = 2 * insn.bit_range(8..12);
let mut carry = self.cpsr.C();
let v = self.ror(immediate, rotate, &mut carry, false, true);
self.cpsr.set_C(carry);
v
} else {
self.get_reg((insn & 0b1111) as usize)
};
let f = insn.bit(19);
let s = insn.bit(18);
let x = insn.bit(17);
let c = insn.bit(16);
let mut mask = 0;
if f {
mask |= 0xff << 24;
}
if s {
mask |= 0xff << 16;
}
if x {
mask |= 0xff << 8;
}
if c {
mask |= 0xff;
}
match self.cpsr.mode() {
CpuMode::User => {
if SPSR_FLAG {
panic!("User mode can't access SPSR")
}
self.cpsr.set_flag_bits(value);
}
_ => {
if SPSR_FLAG {
self.spsr.set(value);
} else {
let old_mode = self.cpsr.mode();
let new_psr = RegPSR::new((self.cpsr.get() & !mask) | (value & mask));
let new_mode = new_psr.mode();
if old_mode != new_mode {
self.change_mode(old_mode, new_mode);
}
self.cpsr = new_psr;
}
}
}
CpuAction::AdvancePC(Seq)
}
fn transfer_spsr_mode(&mut self) {
let spsr = self.spsr;
if self.cpsr.mode() != spsr.mode() {
self.change_mode(self.cpsr.mode(), spsr.mode());
}
self.cpsr = spsr;
}
/// Logical/Arithmetic ALU operations
///
/// Cycles: 1S+x+y (from GBATEK)
/// Add x=1I cycles if Op2 shifted-by-register. Add y=1S+1N cycles if Rd=R15.
pub fn exec_arm_data_processing<
const OP: u8,
const IMM: bool,
const SET_FLAGS: bool,
const SHIFT_BY_REG: bool,
>(
&mut self,
insn: u32,
) -> CpuAction {
use AluOpCode::*;
let rn = insn.bit_range(16..20) as usize;
let rd = insn.bit_range(12..16) as usize;
let mut op1 = if rn == REG_PC {
self.pc_arm() + 8
} else {
self.get_reg(rn)
};
let mut s_flag = SET_FLAGS;
let opcode =
AluOpCode::from_u8(OP).unwrap_or_else(|| unsafe { std::hint::unreachable_unchecked() });
let mut carry = self.cpsr.C();
let op2 = if IMM {
let immediate = insn & 0xff;
let rotate = 2 * insn.bit_range(8..12);
// TODO refactor out
self.ror(immediate, rotate, &mut carry, false, true)
} else {
let reg = insn & 0xf;
let shift_by = if SHIFT_BY_REG {
if rn == REG_PC {
op1 += 4;
}
self.idle_cycle();
let rs = insn.bit_range(8..12) as usize;
ShiftRegisterBy::ByRegister(rs)
} else {
let amount = insn.bit_range(7..12);
ShiftRegisterBy::ByAmount(amount)
};
let shifted_reg = ShiftedRegister {
reg: reg as usize,
bs_op: insn.get_bs_op(),
shift_by,
added: None,
};
self.register_shift(&shifted_reg, &mut carry)
};
if rd == REG_PC && s_flag {
self.transfer_spsr_mode();
s_flag = false;
}
let alu_res = if s_flag {
let mut overflow = self.cpsr.V();
let result = match opcode {
AND | TST => op1 & op2,
EOR | TEQ => op1 ^ op2,
SUB | CMP => self.alu_sub_flags(op1, op2, &mut carry, &mut overflow),
RSB => self.alu_sub_flags(op2, op1, &mut carry, &mut overflow),
ADD | CMN => self.alu_add_flags(op1, op2, &mut carry, &mut overflow),
ADC => self.alu_adc_flags(op1, op2, &mut carry, &mut overflow),
SBC => self.alu_sbc_flags(op1, op2, &mut carry, &mut overflow),
RSC => self.alu_sbc_flags(op2, op1, &mut carry, &mut overflow),
ORR => op1 | op2,
MOV => op2,
BIC => op1 & (!op2),
MVN => !op2,
};
self.alu_update_flags(result, opcode.is_arithmetic(), carry, overflow);
if opcode.is_setting_flags() {
None
} else {
Some(result)
}
} else {
let c = carry as u32;
Some(match opcode {
AND => op1 & op2,
EOR => op1 ^ op2,
SUB => op1.wrapping_sub(op2),
RSB => op2.wrapping_sub(op1),
ADD => op1.wrapping_add(op2),
ADC => op1.wrapping_add(op2).wrapping_add(c),
SBC => op1.wrapping_sub(op2.wrapping_add(1 - c)),
RSC => op2.wrapping_sub(op1.wrapping_add(1 - c)),
ORR => op1 | op2,
MOV => op2,
BIC => op1 & (!op2),
MVN => !op2,
_ => panic!("DataProcessing should be a PSR transfer"),
})
};
let mut result = CpuAction::AdvancePC(Seq);
if let Some(alu_res) = alu_res {
self.set_reg(rd, alu_res);
if rd == REG_PC {
// T bit might have changed
match self.cpsr.state() {
CpuState::ARM => self.reload_pipeline32(),
CpuState::THUMB => self.reload_pipeline16(),
};
result = CpuAction::PipelineFlushed;
}
}
result
}
/// Memory Load/Store
/// Instruction | Cycles | Flags | Expl.
/// ------------------------------------------------------------------------------
/// LDR{cond}{B}{T} Rd,<Address> | 1S+1N+1I+y | ---- | Rd=[Rn+/-<offset>]
/// STR{cond}{B}{T} Rd,<Address> | 2N | ---- | [Rn+/-<offset>]=Rd
/// ------------------------------------------------------------------------------
/// For LDR, add y=1S+1N if Rd=R15.
pub fn exec_arm_ldr_str<
const LOAD: bool,
const WRITEBACK: bool,
const PRE_INDEX: bool,
const BYTE: bool,
const SHIFT: bool,
const ADD: bool,
const BS_OP: u8,
const SHIFT_BY_REG: bool,
>(
&mut self,
insn: u32,
) -> CpuAction {
let mut result = CpuAction::AdvancePC(NonSeq);
let base_reg = insn.bit_range(16..20) as usize;
let dest_reg = insn.bit_range(12..16) as usize;
let mut addr = self.get_reg(base_reg);
if base_reg == REG_PC {
addr = self.pc_arm() + 8; // prefetching
}
let mut offset = insn.bit_range(0..12);
if SHIFT {
let mut carry = self.cpsr.C();
let rm = offset & 0xf;
offset =
self.register_shift_const::<BS_OP, SHIFT_BY_REG>(offset, rm as usize, &mut carry);
}
let offset = if ADD {
offset
} else {
(-(offset as i32)) as u32
};
let effective_addr = (addr as i32).wrapping_add(offset as i32) as Addr;
// TODO - confirm this
let old_mode = self.cpsr.mode();
if !PRE_INDEX && WRITEBACK {
self.change_mode(old_mode, CpuMode::User);
}
addr = if PRE_INDEX { effective_addr } else { addr };
if LOAD {
let data = if BYTE {
self.load_8(addr, NonSeq) as u32
} else {
self.ldr_word(addr, NonSeq)
};
self.set_reg(dest_reg, data);
// +1I
self.idle_cycle();
if dest_reg == REG_PC {
self.reload_pipeline32();
result = CpuAction::PipelineFlushed;
}
} else {
let value = if dest_reg == REG_PC {
self.pc_arm() + 12
} else {
self.get_reg(dest_reg)
};
if BYTE {
self.store_8(addr, value as u8, NonSeq);
} else {
self.store_aligned_32(addr & !0x3, value, NonSeq);
};
}
if (!LOAD || base_reg != dest_reg) && (!PRE_INDEX || WRITEBACK) {
self.set_reg(base_reg, effective_addr);
}
if !PRE_INDEX && WRITEBACK {
self.change_mode(self.cpsr.mode(), old_mode);
}
result
}
pub fn exec_arm_ldr_str_hs_reg<
const HS: u8,
const LOAD: bool,
const WRITEBACK: bool,
const PRE_INDEX: bool,
const ADD: bool,
>(
&mut self,
insn: u32,
) -> CpuAction {
let offset = self.get_reg((insn & 0xf) as usize);
self.ldr_str_hs_common::<HS, LOAD, WRITEBACK, PRE_INDEX, ADD>(insn, offset)
}
pub fn exec_arm_ldr_str_hs_imm<
const HS: u8,
const LOAD: bool,
const WRITEBACK: bool,
const PRE_INDEX: bool,
const ADD: bool,
>(
&mut self,
insn: u32,
) -> CpuAction {
let offset8 = (insn.bit_range(8..12) << 4) + insn.bit_range(0..4);
self.ldr_str_hs_common::<HS, LOAD, WRITEBACK, PRE_INDEX, ADD>(insn, offset8)
}
#[inline(always)]
pub fn ldr_str_hs_common<
const HS: u8,
const LOAD: bool,
const WRITEBACK: bool,
const PRE_INDEX: bool,
const ADD: bool,
>(
&mut self,
insn: u32,
offset: u32,
) -> CpuAction {
let mut result = CpuAction::AdvancePC(NonSeq);
let offset = if ADD {
offset
} else {
(-(offset as i32)) as u32
};
let base_reg = insn.bit_range(16..20) as usize;
let dest_reg = insn.bit_range(12..16) as usize;
let mut addr = self.get_reg(base_reg);
if base_reg == REG_PC {
addr = self.pc_arm() + 8; // prefetching
}
// TODO - confirm this
let old_mode = self.cpsr.mode();
if !PRE_INDEX && WRITEBACK {
self.change_mode(old_mode, CpuMode::User);
}
let effective_addr = (addr as i32).wrapping_add(offset as i32) as Addr;
addr = if PRE_INDEX { effective_addr } else { addr };
let transfer_type = ArmHalfwordTransferType::from_u8(HS).unwrap();
if LOAD {
let data = match transfer_type {
ArmHalfwordTransferType::SignedByte => self.load_8(addr, NonSeq) as u32,
ArmHalfwordTransferType::SignedHalfwords => self.ldr_sign_half(addr, NonSeq),
ArmHalfwordTransferType::UnsignedHalfwords => self.ldr_half(addr, NonSeq),
};
self.set_reg(dest_reg, data);
// +1I
self.idle_cycle();
if dest_reg == REG_PC {
self.reload_pipeline32();
result = CpuAction::PipelineFlushed;
}
} else {
let value = if dest_reg == REG_PC {
self.pc_arm() + 12
} else {
self.get_reg(dest_reg)
};
match transfer_type {
ArmHalfwordTransferType::UnsignedHalfwords => {
self.store_aligned_16(addr, value as u16, NonSeq);
}
_ => panic!("invalid HS flags for L=0"),
};
}
if (!LOAD || base_reg != dest_reg) && (!PRE_INDEX || WRITEBACK) {
self.set_reg(base_reg, effective_addr);
}
result
}
pub fn exec_arm_ldm_stm<
const LOAD: bool,
const WRITEBACK: bool,
const FLAG_S: bool,
const ADD: bool,
const PRE_INDEX: bool,
>(
&mut self,
insn: u32,
) -> CpuAction {
let mut result = CpuAction::AdvancePC(NonSeq);
let mut full = PRE_INDEX;
let ascending = ADD;
let mut writeback = WRITEBACK;
let base_reg = insn.bit_range(16..20) as usize;
let mut base_addr = self.get_reg(base_reg);
let rlist = insn.register_list();
if FLAG_S {
match self.cpsr.mode() {
CpuMode::User | CpuMode::System => {
panic!("LDM/STM with S bit in unprivileged mode")
}
_ => {}
};
}
let user_bank_transfer = if FLAG_S {
if LOAD {
!rlist.bit(REG_PC)
} else {
true
}
} else {
false
};
let old_mode = self.cpsr.mode();
if user_bank_transfer {
self.change_mode(old_mode, CpuMode::User);
}
let psr_transfer = FLAG_S & LOAD & rlist.bit(REG_PC);
let rlist_count = rlist.count_ones();
let old_base = base_addr;
if rlist != 0 && !ascending {
base_addr = base_addr.wrapping_sub(rlist_count * 4);
if writeback {
self.set_reg(base_reg, base_addr);
writeback = false;
}
full = !full;
}
let mut addr = base_addr;
if rlist != 0 {
if LOAD {
let mut access = NonSeq;
for r in 0..16 {
if rlist.bit(r) {
if r == base_reg {
writeback = false;
}
if full {
addr = addr.wrapping_add(4);
}
let val = self.load_32(addr, access);
access = Seq;
self.set_reg(r, val);
if r == REG_PC {
if psr_transfer {
self.transfer_spsr_mode();
}
self.reload_pipeline32();
result = CpuAction::PipelineFlushed;
}
if !full {
addr = addr.wrapping_add(4);
}
}
}
self.idle_cycle();
} else {
let mut first = true;
let mut access = NonSeq;
for r in 0..16 {
if rlist.bit(r) {
let val = if r != base_reg {
if r == REG_PC {
self.pc_arm() + 12
} else {
self.get_reg(r)
}
} else if first {
old_base
} else {
let x = rlist_count * 4;
if ascending {
old_base + x
} else {
old_base - x
}
};
if full {
addr = addr.wrapping_add(4);
}
first = false;
self.store_aligned_32(addr, val, access);
access = Seq;
if !full {
addr = addr.wrapping_add(4);
}
}
}
}
} else {
if LOAD {
let val = self.ldr_word(addr, NonSeq);
self.set_reg(REG_PC, val & !3);
self.reload_pipeline32();
result = CpuAction::PipelineFlushed;
} else {
// block data store with empty rlist
let addr = match (ascending, full) {
(false, false) => addr.wrapping_sub(0x3c),
(false, true) => addr.wrapping_sub(0x40),
(true, false) => addr,
(true, true) => addr.wrapping_add(4),
};
self.store_aligned_32(addr, self.pc + 4, NonSeq);
}
addr = if ascending {
addr.wrapping_add(0x40)
} else {
addr.wrapping_sub(0x40)
};
}
if user_bank_transfer {
self.change_mode(self.cpsr.mode(), old_mode);
}
if writeback {
self.set_reg(base_reg, addr);
}
result
}
/// Multiply and Multiply-Accumulate (MUL, MLA)
/// Execution Time: 1S+mI for MUL, and 1S+(m+1)I for MLA.
pub fn exec_arm_mul_mla<const UPDATE_FLAGS: bool, const ACCUMULATE: bool>(
&mut self,
insn: u32,
) -> CpuAction {
let rd = insn.bit_range(16..20) as usize;
let rn = insn.bit_range(12..16) as usize;
let rs = insn.bit_range(8..12) as usize;
let rm = insn.bit_range(0..4) as usize;
// // check validity
// assert!(!(REG_PC == rd || REG_PC == rn || REG_PC == rs || REG_PC == rm));
// assert!(rd != rm);
let op1 = self.get_reg(rm);
let op2 = self.get_reg(rs);
let mut result = op1.wrapping_mul(op2);
if ACCUMULATE {
result = result.wrapping_add(self.get_reg(rn));
self.idle_cycle();
}
self.set_reg(rd, result);
let m = self.get_required_multipiler_array_cycles(op2);
for _ in 0..m {
self.idle_cycle();
}
if UPDATE_FLAGS {
self.cpsr.set_N((result as i32) < 0);
self.cpsr.set_Z(result == 0);
self.cpsr.set_C(false);
self.cpsr.set_V(false);
}
CpuAction::AdvancePC(Seq)
}
/// Multiply Long and Multiply-Accumulate Long (MULL, MLAL)
/// Execution Time: 1S+(m+1)I for MULL, and 1S+(m+2)I for MLAL
pub fn exec_arm_mull_mlal<
const UPDATE_FLAGS: bool,
const ACCUMULATE: bool,
const U_FLAG: bool,
>(
&mut self,
insn: u32,
) -> CpuAction {
let rd_hi = insn.rd_hi();
let rd_lo = insn.rd_lo();
let rs = insn.rs();
let rm = insn.rm();
let op1 = self.get_reg(rm);
let op2 = self.get_reg(rs);
let mut result: u64 = if U_FLAG {
// signed
(op1 as i32 as i64).wrapping_mul(op2 as i32 as i64) as u64
} else {
(op1 as u64).wrapping_mul(op2 as u64)
};
if ACCUMULATE {
let hi = self.get_reg(rd_hi) as u64;
let lo = self.get_reg(rd_lo) as u64;
result = result.wrapping_add(hi << 32 | lo);
self.idle_cycle();
}
self.set_reg(rd_hi, (result >> 32) as i32 as u32);
self.set_reg(rd_lo, (result & 0xffffffff) as i32 as u32);
self.idle_cycle();
let m = self.get_required_multipiler_array_cycles(self.get_reg(rs));
for _ in 0..m {
self.idle_cycle();
}
if UPDATE_FLAGS {
self.cpsr.set_N(result.bit(63));
self.cpsr.set_Z(result == 0);
self.cpsr.set_C(false);
self.cpsr.set_V(false);
}
CpuAction::AdvancePC(Seq)
}
/// ARM Opcodes: Memory: Single Data Swap (SWP)
/// Execution Time: 1S+2N+1I. That is, 2N data cycles, 1S code cycle, plus 1I.
pub fn exec_arm_swp<const BYTE: bool>(&mut self, insn: u32) -> CpuAction {
let base_addr = self.get_reg(insn.bit_range(16..20) as usize);
let rd = insn.bit_range(12..16) as usize;
if BYTE {
let t = self.load_8(base_addr, NonSeq);
self.store_8(base_addr, self.get_reg(insn.rm()) as u8, Seq);
self.set_reg(rd, t as u32);
} else {
let t = self.ldr_word(base_addr, NonSeq);
self.store_aligned_32(base_addr, self.get_reg(insn.rm()), Seq);
self.set_reg(rd, t);
}
self.idle_cycle();
CpuAction::AdvancePC(NonSeq)
}
/// ARM Software Interrupt
/// Execution Time: 2S+1N
pub fn exec_arm_swi(&mut self, insn: u32) -> CpuAction {
self.software_interrupt(self.pc - 4, insn.swi_comment()); // Implies 2S + 1N
CpuAction::PipelineFlushed
}
}

View file

@ -1,604 +0,0 @@
#[cfg(feature = "debugger")]
pub mod disass;
pub mod exec;
use serde::{Deserialize, Serialize};
use super::alu::*;
use super::memory::Addr;
use super::InstructionDecoder;
use bit::BitIndex;
use byteorder::{LittleEndian, ReadBytesExt};
use num::FromPrimitive;
use std::io;
#[derive(Debug, PartialEq, Eq)]
pub enum ArmDecodeErrorKind {
UnknownInstructionFormat,
DecodedPartDoesNotBelongToInstruction,
UndefinedConditionCode(u32),
InvalidShiftType(u32),
InvalidHSBits(u32),
IoError(io::ErrorKind),
}
#[derive(Debug, PartialEq, Eq)]
pub struct ArmDecodeError {
pub kind: ArmDecodeErrorKind,
pub insn: u32,
pub addr: Addr,
}
#[allow(dead_code)]
impl ArmDecodeError {
fn new(kind: ArmDecodeErrorKind, insn: u32, addr: Addr) -> ArmDecodeError {
ArmDecodeError { kind, insn, addr }
}
}
#[derive(Serialize, Deserialize, Debug, Copy, Clone, PartialEq, Eq, Primitive)]
pub enum ArmCond {
EQ = 0b0000,
NE = 0b0001,
HS = 0b0010,
LO = 0b0011,
MI = 0b0100,
PL = 0b0101,
VS = 0b0110,
VC = 0b0111,
HI = 0b1000,
LS = 0b1001,
GE = 0b1010,
LT = 0b1011,
GT = 0b1100,
LE = 0b1101,
AL = 0b1110,
Invalid = 0b1111,
}
#[derive(Serialize, Deserialize, Debug, Copy, Clone, PartialEq, Eq)]
pub enum ArmFormat {
BranchExchange = 0,
BranchLink,
SoftwareInterrupt,
Multiply,
MultiplyLong,
SingleDataTransfer,
HalfwordDataTransferRegOffset,
HalfwordDataTransferImmediateOffset,
DataProcessing,
BlockDataTransfer,
SingleDataSwap,
/// Transfer PSR contents to a register
MoveFromStatus,
/// Transfer register contents to PSR
MoveToStatus,
/// Tanssfer immediate/register to PSR flags only
MoveToFlags,
Undefined,
}
impl From<u32> for ArmFormat {
fn from(raw: u32) -> ArmFormat {
use ArmFormat::*;
if (0x0fff_fff0 & raw) == 0x012f_ff10 {
BranchExchange
} else if (0x0E00_0000 & raw) == 0x0A00_0000 {
BranchLink
} else if (0x0E00_0010 & raw) == 0x0600_0000 {
Undefined
} else if (0x0FB0_0FF0 & raw) == 0x0100_0090 {
SingleDataSwap
} else if (0x0FC0_00F0 & raw) == 0x0000_0090 {
Multiply
} else if (0x0F80_00F0 & raw) == 0x0080_0090 {
MultiplyLong
} else if (0x0FBF_0FFF & raw) == 0x010F_0000 {
MoveFromStatus
} else if (0x0FBF_FFF0 & raw) == 0x0129_F000 {
MoveToStatus
} else if (0x0DBF_F000 & raw) == 0x0128_F000 {
MoveToFlags
} else if (0x0C00_0000 & raw) == 0x0400_0000 {
SingleDataTransfer
} else if (0x0E40_0F90 & raw) == 0x0000_0090 {
HalfwordDataTransferRegOffset
} else if (0x0E40_0090 & raw) == 0x0040_0090 {
HalfwordDataTransferImmediateOffset
} else if (0x0E00_0000 & raw) == 0x0800_0000 {
BlockDataTransfer
} else if (0x0F00_0000 & raw) == 0x0F00_0000 {
SoftwareInterrupt
} else if (0x0C00_0000 & raw) == 0x0000_0000 {
DataProcessing
} else {
Undefined
}
}
}
#[derive(Debug, PartialEq, Eq, Primitive)]
pub enum ArmHalfwordTransferType {
UnsignedHalfwords = 0b01,
SignedByte = 0b10,
SignedHalfwords = 0b11,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq)]
pub struct ArmInstruction {
pub fmt: ArmFormat,
pub raw: u32,
pub pc: Addr,
}
impl ArmInstruction {
pub fn new(raw: u32, pc: Addr, fmt: ArmFormat) -> ArmInstruction {
ArmInstruction { fmt, raw, pc }
}
}
impl InstructionDecoder for ArmInstruction {
type IntType = u32;
fn decode(raw: u32, addr: Addr) -> Self {
let fmt = ArmFormat::from(raw);
ArmInstruction { fmt, raw, pc: addr }
}
fn decode_from_bytes(bytes: &[u8], addr: Addr) -> Self {
let mut rdr = std::io::Cursor::new(bytes);
let raw = rdr.read_u32::<LittleEndian>().unwrap();
Self::decode(raw, addr)
}
fn get_raw(&self) -> u32 {
self.raw
}
}
pub trait ArmDecodeHelper {
fn cond(&self) -> ArmCond;
fn rm(&self) -> usize;
fn rs(&self) -> usize;
fn rd_lo(&self) -> usize;
fn rd_hi(&self) -> usize;
fn opcode(&self) -> AluOpCode;
fn branch_offset(&self) -> i32;
fn load_flag(&self) -> bool;
fn set_cond_flag(&self) -> bool;
fn write_back_flag(&self) -> bool;
fn accumulate_flag(&self) -> bool;
fn u_flag(&self) -> bool;
fn halfword_data_transfer_type(&self) -> ArmHalfwordTransferType;
fn transfer_size(&self) -> usize;
fn psr_and_force_user_flag(&self) -> bool;
fn spsr_flag(&self) -> bool;
fn add_offset_flag(&self) -> bool;
fn pre_index_flag(&self) -> bool;
fn link_flag(&self) -> bool;
/// gets offset used by ldr/str instructions
fn ldr_str_offset(&self) -> BarrelShifterValue;
fn get_bs_op(&self) -> BarrelShiftOpCode;
fn get_shift_reg_by(&self) -> ShiftRegisterBy;
fn ldr_str_hs_imm_offset(&self) -> BarrelShifterValue;
fn ldr_str_hs_reg_offset(&self) -> BarrelShifterValue;
fn operand2(&self) -> BarrelShifterValue;
fn register_list(&self) -> u16;
fn swi_comment(&self) -> u32;
}
macro_rules! arm_decode_helper_impl {
($($t:ty),*) => {$(
impl ArmDecodeHelper for $t {
#[inline(always)]
fn cond(&self) -> ArmCond {
ArmCond::from_u32(self.bit_range(28..32)).unwrap()
}
#[inline(always)]
fn rm(&self) -> usize {
self.bit_range(0..4) as usize
}
#[inline(always)]
fn rs(&self) -> usize {
self.bit_range(8..12) as usize
}
#[inline(always)]
fn rd_lo(&self) -> usize {
self.bit_range(12..16) as usize
}
#[inline(always)]
fn rd_hi(&self) -> usize {
self.bit_range(16..20) as usize
}
#[inline(always)]
fn opcode(&self) -> AluOpCode {
use std::hint::unreachable_unchecked;
unsafe {
if let Some(opc) = AluOpCode::from_u16(self.bit_range(21..25) as u16) {
opc
} else {
unreachable_unchecked()
}
}
}
#[inline(always)]
fn branch_offset(&self) -> i32 {
((self.bit_range(0..24) << 8) as i32) >> 6
}
#[inline(always)]
fn load_flag(&self) -> bool {
self.bit(20)
}
#[inline(always)]
fn set_cond_flag(&self) -> bool {
self.bit(20)
}
#[inline(always)]
fn write_back_flag(&self) -> bool {
self.bit(21)
}
#[inline(always)]
fn accumulate_flag(&self) -> bool {
self.bit(21)
}
#[inline(always)]
fn u_flag(&self) -> bool {
self.bit(22)
}
#[inline(always)]
fn halfword_data_transfer_type(&self) -> ArmHalfwordTransferType {
let bits = (*self & 0b1100000) >> 5;
ArmHalfwordTransferType::from_u32(bits).unwrap()
}
#[inline(always)]
fn transfer_size(&self) -> usize {
if self.bit(22) {
1
} else {
4
}
}
#[inline(always)]
fn psr_and_force_user_flag(&self) -> bool {
self.bit(22)
}
#[inline(always)]
fn spsr_flag(&self) -> bool {
self.bit(22)
}
#[inline(always)]
fn add_offset_flag(&self) -> bool {
self.bit(23)
}
#[inline(always)]
fn pre_index_flag(&self) -> bool {
self.bit(24)
}
#[inline(always)]
fn link_flag(&self) -> bool {
self.bit(24)
}
/// gets offset used by ldr/str instructions
#[inline(always)]
fn ldr_str_offset(&self) -> BarrelShifterValue {
let ofs = self.bit_range(0..12);
if self.bit(25) {
let rm = ofs & 0xf;
BarrelShifterValue::ShiftedRegister(ShiftedRegister {
reg: rm as usize,
shift_by: self.get_shift_reg_by(),
bs_op: self.get_bs_op(),
added: Some(self.add_offset_flag()),
})
} else {
let ofs = if self.add_offset_flag() {
ofs as u32
} else {
-(ofs as i32) as u32
};
BarrelShifterValue::ImmediateValue(ofs)
}
}
#[inline(always)]
fn get_bs_op(&self) -> BarrelShiftOpCode {
BarrelShiftOpCode::from_u8(self.bit_range(5..7) as u8).unwrap()
}
#[inline(always)]
fn get_shift_reg_by(&self) -> ShiftRegisterBy {
if self.bit(4) {
let rs = self.bit_range(8..12) as usize;
ShiftRegisterBy::ByRegister(rs)
} else {
let amount = self.bit_range(7..12) as u32;
ShiftRegisterBy::ByAmount(amount)
}
}
#[inline(always)]
fn ldr_str_hs_imm_offset(&self) -> BarrelShifterValue {
let offset8 = (self.bit_range(8..12) << 4) + self.bit_range(0..4);
let offset8 = if self.add_offset_flag() {
offset8
} else {
(-(offset8 as i32)) as u32
};
BarrelShifterValue::ImmediateValue(offset8)
}
#[inline(always)]
fn ldr_str_hs_reg_offset(&self) -> BarrelShifterValue {
BarrelShifterValue::ShiftedRegister(
ShiftedRegister {
reg: (self & 0xf) as usize,
shift_by: ShiftRegisterBy::ByAmount(0),
bs_op: BarrelShiftOpCode::LSL,
added: Some(self.add_offset_flag()),
})
}
fn operand2(&self) -> BarrelShifterValue {
if self.bit(25) {
let immediate = self & 0xff;
let rotate = 2 * self.bit_range(8..12);
BarrelShifterValue::RotatedImmediate(immediate, rotate)
} else {
let reg = self & 0xf;
let shifted_reg = ShiftedRegister {
reg: reg as usize,
bs_op: self.get_bs_op(),
shift_by: self.get_shift_reg_by(),
added: None,
}; // TODO error handling
BarrelShifterValue::ShiftedRegister(shifted_reg)
}
}
fn register_list(&self) -> u16 {
(self & 0xffff) as u16
}
fn swi_comment(&self) -> u32 {
self.bit_range(0..24)
}
}
)*}
}
arm_decode_helper_impl!(u32);
// #[cfg(test)]
// /// All instructions constants were generated using an ARM assembler.
// mod tests {
// use super::*;
// use crate::arm7tdmi::*;
// use crate::sysbus::BoxedMemory;
// #[test]
// fn swi() {
// let mut core = Core::new();
// let bytes = vec![];
// let mut mem = BoxedMemory::new(bytes.into_boxed_slice(), 0xffff_ffff);
// // swi #0x1337
// let decoded = ArmInstruction::decode(0xef001337, 0).unwrap();
// assert_eq!(decoded.fmt, ArmFormat::SoftwareInterrupt);
// assert_eq!(decoded.swi_comment(), 0x1337);
// assert_eq!(format!("{}", decoded), "swi\t#0x1337");
// core.exec_arm(&mut mem, decoded).unwrap();
// assert_eq!(core.did_pipeline_flush(), true);
// assert_eq!(core.cpsr.mode(), CpuMode::Supervisor);
// assert_eq!(core.pc, Exception::SoftwareInterrupt as u32);
// }
// #[test]
// fn branch_forwards() {
// // 0x20: b 0x30
// let decoded = ArmInstruction::decode(0xea_00_00_02, 0x20).unwrap();
// assert_eq!(decoded.fmt, ArmFormat::BranchLink);
// assert_eq!(decoded.link_flag(), false);
// assert_eq!(
// (decoded.pc as i32).wrapping_add(decoded.branch_offset()) + 8,
// 0x30
// );
// assert_eq!(format!("{}", decoded), "b\t0x30");
// let mut core = Core::new();
// core.pc = 0x20 + 8;
// let bytes = vec![];
// let mut mem = BoxedMemory::new(bytes.into_boxed_slice(), 0xffff_ffff);
// core.exec_arm(&mut mem, decoded).unwrap();
// assert_eq!(core.did_pipeline_flush(), true);
// assert_eq!(core.pc, 0x30);
// }
// #[test]
// fn branch_link_backwards() {
// // 0x20: bl 0x10
// let decoded = ArmInstruction::decode(0xeb_ff_ff_fa, 0x20).unwrap();
// assert_eq!(decoded.fmt, ArmFormat::BranchLink);
// assert_eq!(decoded.link_flag(), true);
// assert_eq!(
// (decoded.pc as i32).wrapping_add(decoded.branch_offset()) + 8,
// 0x10
// );
// assert_eq!(format!("{}", decoded), "bl\t0x10");
// let mut core = Core::new();
// core.pc = 0x20 + 8;
// let bytes = vec![];
// let mut mem = BoxedMemory::new(bytes.into_boxed_slice(), 0xffff_ffff);
// core.exec_arm(&mut mem, decoded).unwrap();
// assert_eq!(core.did_pipeline_flush(), true);
// assert_eq!(core.pc, 0x10);
// }
// #[test]
// fn ldr_pre_index() {
// // ldreq r2, [r5, -r6, lsl #5]
// let decoded = ArmInstruction::decode(0x07_15_22_86, 0).unwrap();
// assert_eq!(decoded.fmt, ArmFormat::SingleDataTransfer);
// assert_eq!(decoded.cond, ArmCond::EQ);
// assert_eq!(decoded.load_flag(), true);
// assert_eq!(decoded.pre_index_flag(), true);
// assert_eq!(decoded.write_back_flag(), false);
// assert_eq!(decoded.rd(), 2);
// assert_eq!(decoded.rn(), 5);
// assert_eq!(
// decoded.ldr_str_offset(),
// BarrelShifterValue::ShiftedRegister(ShiftedRegister {
// reg: 6,
// shift_by: ShiftRegisterBy::ByAmount(5),
// bs_op: BarrelShiftOpCode::LSL,
// added: Some(false)
// })
// );
// assert_eq!(format!("{}", decoded), "ldreq\tr2, [r5, -r6, lsl #5]");
// let mut core = Core::new();
// core.cpsr.set_Z(true);
// core.gpr[5] = 0x34;
// core.gpr[6] = 1;
// core.gpr[2] = 0;
// #[rustfmt::skip]
// let bytes = vec![
// /* 00h: */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
// /* 10h: */ 0x00, 0x00, 0x00, 0x00, 0x37, 0x13, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
// /* 20h: */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
// /* 30h: */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
// ];
// let mut mem = BoxedMemory::new(bytes.into_boxed_slice(), 0xffff_ffff);
// core.exec_arm(&mut mem, decoded).unwrap();
// assert_eq!(core.gpr[2], 0x1337);
// }
// #[test]
// fn str_post_index() {
// // strteq r2, [r4], -r7, asr #8
// let decoded = ArmInstruction::decode(0x06_24_24_47, 0).unwrap();
// assert_eq!(decoded.fmt, ArmFormat::SingleDataTransfer);
// assert_eq!(decoded.cond, ArmCond::EQ);
// assert_eq!(decoded.load_flag(), false);
// assert_eq!(decoded.pre_index_flag(), false);
// assert_eq!(decoded.write_back_flag(), true);
// assert_eq!(decoded.rd(), 2);
// assert_eq!(decoded.rn(), 4);
// assert_eq!(
// decoded.ldr_str_offset(),
// BarrelShifterValue::ShiftedRegister(ShiftedRegister {
// reg: 7,
// shift_by: ShiftRegisterBy::ByAmount(8),
// bs_op: BarrelShiftOpCode::ASR,
// added: Some(false)
// })
// );
// assert_eq!(format!("{}", decoded), "strteq\tr2, [r4], -r7, asr #8");
// let mut core = Core::new();
// core.cpsr.set_Z(true);
// core.gpr[4] = 0x0;
// core.gpr[7] = 1;
// core.gpr[2] = 0xabababab;
// #[rustfmt::skip]
// let bytes = vec![
// /* 00h: */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
// /* 10h: */ 0x00, 0x00, 0x00, 0x00, 0x37, 0x13, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
// /* 20h: */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
// /* 30h: */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
// ];
// let mut mem = BoxedMemory::new(bytes.into_boxed_slice(), 0xffff_ffff);
// core.exec_arm(&mut mem, decoded).unwrap();
// assert_eq!(mem.read_32(0), 0xabababab);
// }
// #[test]
// fn str_pre_index() {
// // str r4, [sp, 0x10]
// let decoded = ArmInstruction::decode(0xe58d4010, 0).unwrap();
// assert_eq!(decoded.fmt, ArmFormat::SingleDataTransfer);
// assert_eq!(decoded.cond, ArmCond::AL);
// let mut core = Core::new();
// core.set_reg(4, 0x12345678);
// core.set_reg(REG_SP, 0);
// #[rustfmt::skip]
// let bytes = vec![
// /* 0: */ 0xaa, 0xbb, 0xcc, 0xdd,
// /* 4: */ 0xaa, 0xbb, 0xcc, 0xdd,
// /* 8: */ 0xaa, 0xbb, 0xcc, 0xdd,
// /* c: */ 0xaa, 0xbb, 0xcc, 0xdd,
// /* 10: */ 0xaa, 0xbb, 0xcc, 0xdd,
// ];
// let mut mem = BoxedMemory::new(bytes.into_boxed_slice(), 0xffff_ffff);
// assert_ne!(mem.read_32(core.get_reg(REG_SP) + 0x10), 0x12345678);
// core.exec_arm(&mut mem, decoded).unwrap();
// assert_eq!(mem.read_32(core.get_reg(REG_SP) + 0x10), 0x12345678);
// }
// }

View file

@ -1,566 +0,0 @@
use std::fmt;
use ansi_term::Style;
use bit::BitIndex;
use log::debug;
use num::FromPrimitive;
use serde::{Deserialize, Serialize};
use rustboyadvance_utils::{Shared, WeakPointer};
pub use super::exception::Exception;
use super::reg_string;
use super::{arm::ArmCond, psr::RegPSR, Addr, CpuMode, CpuState};
use super::memory::{MemoryAccess, MemoryInterface};
use MemoryAccess::*;
use cfg_if::cfg_if;
#[cfg(feature = "debugger")]
use super::thumb::ThumbFormat;
#[cfg(feature = "debugger")]
use super::arm::ArmFormat;
#[cfg_attr(not(feature = "debugger"), repr(transparent))]
pub struct ThumbInstructionInfo<I: MemoryInterface> {
pub handler_fn: fn(&mut Arm7tdmiCore<I>, insn: u16) -> CpuAction,
#[cfg(feature = "debugger")]
pub fmt: ThumbFormat,
}
#[cfg_attr(not(feature = "debugger"), repr(transparent))]
pub struct ArmInstructionInfo<I: MemoryInterface> {
pub handler_fn: fn(&mut Arm7tdmiCore<I>, insn: u32) -> CpuAction,
#[cfg(feature = "debugger")]
pub fmt: ArmFormat,
}
cfg_if! {
if #[cfg(feature = "debugger")] {
use super::DecodedInstruction;
use super::arm::ArmInstruction;
use super::thumb::ThumbInstruction;
} else {
}
}
pub enum CpuAction {
AdvancePC(MemoryAccess),
PipelineFlushed,
}
#[derive(Serialize, Deserialize, Clone, Debug, Default)]
pub struct BankedRegisters {
// r13 and r14 are banked for all modes. System&User mode share them
pub gpr_banked_r13: [u32; 6],
pub gpr_banked_r14: [u32; 6],
// r8-r12 are banked for fiq mode
pub gpr_banked_old_r8_12: [u32; 5],
pub gpr_banked_fiq_r8_12: [u32; 5],
pub spsr_bank: [RegPSR; 6],
}
#[derive(Serialize, Deserialize, Clone, Debug)]
pub struct SavedCpuState {
pub pc: u32,
pub gpr: [u32; 15],
next_fetch_access: MemoryAccess,
pipeline: [u32; 2],
pub cpsr: RegPSR,
pub(super) spsr: RegPSR,
pub(super) banks: BankedRegisters,
}
#[derive(Clone, Debug)]
#[cfg(feature = "debugger")]
pub struct DebuggerState {
pub last_executed: Option<DecodedInstruction>,
/// store the gpr before executing an instruction to show diff in the Display impl
pub gpr_previous: [u32; 15],
pub breakpoints: Vec<u32>,
pub verbose: bool,
pub trace_opcodes: bool,
pub trace_exceptions: bool,
}
#[cfg(feature = "debugger")]
impl Default for DebuggerState {
fn default() -> DebuggerState {
DebuggerState {
last_executed: None,
gpr_previous: [0; 15],
breakpoints: Vec::new(),
verbose: false,
trace_opcodes: false,
trace_exceptions: false,
}
}
}
#[derive(Clone)]
pub struct Arm7tdmiCore<I: MemoryInterface> {
pub pc: u32,
pub bus: Shared<I>,
next_fetch_access: MemoryAccess,
pipeline: [u32; 2],
pub gpr: [u32; 15],
pub cpsr: RegPSR,
pub spsr: RegPSR,
pub banks: BankedRegisters,
/// Hardware breakpoints for use by gdb
breakpoints: Vec<Addr>,
/// Deprecated in-house debugger state
#[cfg(feature = "debugger")]
pub dbg: DebuggerState,
}
impl<I: MemoryInterface> Arm7tdmiCore<I> {
pub fn new(bus: Shared<I>) -> Arm7tdmiCore<I> {
let cpsr = RegPSR::new(0x0000_00D3);
Arm7tdmiCore {
bus,
pc: 0,
gpr: [0; 15],
pipeline: [0; 2],
next_fetch_access: MemoryAccess::NonSeq,
cpsr,
spsr: Default::default(),
banks: BankedRegisters::default(),
breakpoints: Vec::new(),
#[cfg(feature = "debugger")]
dbg: DebuggerState::default(),
}
}
pub fn weak_ptr(&mut self) -> WeakPointer<Arm7tdmiCore<I>> {
WeakPointer::new(self as *mut Arm7tdmiCore<I>)
}
pub fn from_saved_state(bus: Shared<I>, state: SavedCpuState) -> Arm7tdmiCore<I> {
Arm7tdmiCore {
bus,
pc: state.pc,
cpsr: state.cpsr,
gpr: state.gpr,
banks: state.banks,
spsr: state.spsr,
pipeline: state.pipeline,
next_fetch_access: state.next_fetch_access,
breakpoints: Vec::new(), // TODO include breakpoints in saved state
// savestate does not keep debugger related information, so just reinitialize to default
#[cfg(feature = "debugger")]
dbg: DebuggerState::default(),
}
}
pub fn save_state(&self) -> SavedCpuState {
SavedCpuState {
cpsr: self.cpsr,
pc: self.pc,
gpr: self.gpr,
spsr: self.spsr,
banks: self.banks.clone(),
pipeline: self.pipeline,
next_fetch_access: self.next_fetch_access,
}
}
pub fn restore_state(&mut self, state: SavedCpuState) {
self.pc = state.pc;
self.cpsr = state.cpsr;
self.gpr = state.gpr;
self.spsr = state.spsr;
self.banks = state.banks;
self.pipeline = state.pipeline;
self.next_fetch_access = state.next_fetch_access;
}
pub fn set_memory_interface(&mut self, i: Shared<I>) {
self.bus = i;
}
pub fn add_breakpoint(&mut self, addr: Addr) {
debug!("adding breakpoint {:08x}", addr);
self.breakpoints.push(addr);
}
pub fn del_breakpoint(&mut self, addr: Addr) {
if let Some(pos) = self.breakpoints.iter().position(|x| *x == addr) {
debug!("deleting breakpoint {:08x}", addr);
self.breakpoints.remove(pos);
}
}
pub fn check_breakpoint(&self) -> Option<u32> {
let next_pc = self.get_next_pc();
for bp in &self.breakpoints {
if (*bp & !1) == next_pc {
return Some(*bp);
}
}
None
}
#[cfg(feature = "debugger")]
pub fn set_verbose(&mut self, v: bool) {
self.dbg.verbose = v;
}
pub fn get_reg(&self, r: usize) -> u32 {
match r {
0..=14 => self.gpr[r],
15 => self.pc,
_ => panic!("invalid register {}", r),
}
}
#[inline]
/// Gets PC of the currently executed instruction in arm mode
pub fn pc_arm(&self) -> u32 {
self.pc.wrapping_sub(8)
}
#[inline]
/// Gets PC of the currently executed instruction in thumb mode
pub fn pc_thumb(&self) -> u32 {
self.pc.wrapping_sub(4)
}
pub fn get_reg_user(&mut self, r: usize) -> u32 {
match r {
0..=7 => self.gpr[r],
8..=12 => {
if self.cpsr.mode() == CpuMode::Fiq {
self.gpr[r]
} else {
self.banks.gpr_banked_old_r8_12[r - 8]
}
}
13 => self.banks.gpr_banked_r13[0],
14 => self.banks.gpr_banked_r14[0],
_ => panic!("invalid register"),
}
}
pub fn set_reg(&mut self, r: usize, val: u32) {
match r {
0..=14 => self.gpr[r] = val,
15 => {
self.pc = {
match self.cpsr.state() {
CpuState::THUMB => val & !1,
CpuState::ARM => val & !3,
}
}
}
_ => panic!("invalid register"),
}
}
pub fn set_reg_user(&mut self, r: usize, val: u32) {
match r {
0..=7 => self.gpr[r] = val,
8..=12 => {
if self.cpsr.mode() == CpuMode::Fiq {
self.gpr[r] = val;
} else {
self.banks.gpr_banked_old_r8_12[r - 8] = val;
}
}
13 => {
self.banks.gpr_banked_r13[0] = val;
}
14 => {
self.banks.gpr_banked_r14[0] = val;
}
_ => panic!("invalid register"),
}
}
pub fn copy_registers(&self) -> [u32; 15] {
self.gpr
}
pub(super) fn change_mode(&mut self, old_mode: CpuMode, new_mode: CpuMode) {
let new_index = new_mode.bank_index();
let old_index = old_mode.bank_index();
if new_index == old_index {
return;
}
let banks = &mut self.banks;
banks.spsr_bank[old_index] = self.spsr;
banks.gpr_banked_r13[old_index] = self.gpr[13];
banks.gpr_banked_r14[old_index] = self.gpr[14];
self.spsr = banks.spsr_bank[new_index];
self.gpr[13] = banks.gpr_banked_r13[new_index];
self.gpr[14] = banks.gpr_banked_r14[new_index];
if new_mode == CpuMode::Fiq {
for r in 0..5 {
banks.gpr_banked_old_r8_12[r] = self.gpr[r + 8];
self.gpr[r + 8] = banks.gpr_banked_fiq_r8_12[r];
}
} else if old_mode == CpuMode::Fiq {
for r in 0..5 {
banks.gpr_banked_fiq_r8_12[r] = self.gpr[r + 8];
self.gpr[r + 8] = banks.gpr_banked_old_r8_12[r];
}
}
self.cpsr.set_mode(new_mode);
}
/// Resets the cpu
pub fn reset(&mut self) {
self.exception(Exception::Reset, 0);
}
pub fn word_size(&self) -> usize {
match self.cpsr.state() {
CpuState::ARM => 4,
CpuState::THUMB => 2,
}
}
pub(super) fn get_required_multipiler_array_cycles(&self, rs: u32) -> usize {
if rs & 0xff == rs {
1
} else if rs & 0xffff == rs {
2
} else if rs & 0xffffff == rs {
3
} else {
4
}
}
#[inline(always)]
pub(super) fn check_arm_cond(&self, cond: ArmCond) -> bool {
use ArmCond::*;
match cond {
Invalid => {
// TODO - we would normally want to panic here
false
}
EQ => self.cpsr.Z(),
NE => !self.cpsr.Z(),
HS => self.cpsr.C(),
LO => !self.cpsr.C(),
MI => self.cpsr.N(),
PL => !self.cpsr.N(),
VS => self.cpsr.V(),
VC => !self.cpsr.V(),
HI => self.cpsr.C() && !self.cpsr.Z(),
LS => !self.cpsr.C() || self.cpsr.Z(),
GE => self.cpsr.N() == self.cpsr.V(),
LT => self.cpsr.N() != self.cpsr.V(),
GT => !self.cpsr.Z() && (self.cpsr.N() == self.cpsr.V()),
LE => self.cpsr.Z() || (self.cpsr.N() != self.cpsr.V()),
AL => true,
}
}
#[cfg(feature = "debugger")]
fn debugger_record_step(&mut self, d: DecodedInstruction) {
self.dbg.gpr_previous = self.copy_registers();
self.dbg.last_executed = Some(d);
}
fn step_arm_exec(&mut self, insn: u32) -> CpuAction {
let hash = (((insn >> 16) & 0xff0) | ((insn >> 4) & 0xf)) as usize;
let arm_info = &Self::ARM_LUT[hash];
#[cfg(feature = "debugger")]
self.debugger_record_step(DecodedInstruction::Arm(ArmInstruction::new(
insn,
self.pc.wrapping_sub(8),
arm_info.fmt,
)));
(arm_info.handler_fn)(self, insn)
}
fn step_thumb_exec(&mut self, insn: u16) -> CpuAction {
let thumb_info = &Self::THUMB_LUT[(insn >> 6) as usize];
#[cfg(feature = "debugger")]
self.debugger_record_step(DecodedInstruction::Thumb(ThumbInstruction::new(
insn,
self.pc.wrapping_sub(4),
thumb_info.fmt,
)));
(thumb_info.handler_fn)(self, insn)
}
/// 2S + 1N
#[inline(always)]
pub fn reload_pipeline16(&mut self) {
self.pipeline[0] = self.load_16(self.pc, NonSeq) as u32;
self.advance_thumb();
self.pipeline[1] = self.load_16(self.pc, Seq) as u32;
self.advance_thumb();
self.next_fetch_access = Seq;
}
/// 2S + 1N
#[inline(always)]
pub fn reload_pipeline32(&mut self) {
self.pipeline[0] = self.load_32(self.pc, NonSeq);
self.advance_arm();
self.pipeline[1] = self.load_32(self.pc, Seq);
self.advance_arm();
self.next_fetch_access = Seq;
}
#[inline]
pub(super) fn advance_thumb(&mut self) {
self.pc = self.pc.wrapping_add(2)
}
#[inline]
pub(super) fn advance_arm(&mut self) {
self.pc = self.pc.wrapping_add(4)
}
#[inline]
pub fn get_decoded_opcode(&self) -> u32 {
self.pipeline[0]
}
#[inline]
pub fn get_prefetched_opcode(&self) -> u32 {
self.pipeline[1]
}
/// Perform a pipeline step
/// If an instruction was executed in this step, return it.
#[inline]
pub fn step(&mut self) {
match self.cpsr.state() {
CpuState::ARM => {
let pc = self.pc & !3;
let fetched_now = self.load_32(pc, self.next_fetch_access);
let insn = self.pipeline[0];
self.pipeline[0] = self.pipeline[1];
self.pipeline[1] = fetched_now;
let cond = ArmCond::from_u8(insn.bit_range(28..32) as u8)
.unwrap_or_else(|| unsafe { std::hint::unreachable_unchecked() });
if cond != ArmCond::AL && !self.check_arm_cond(cond) {
self.advance_arm();
self.next_fetch_access = MemoryAccess::NonSeq;
return;
}
match self.step_arm_exec(insn) {
CpuAction::AdvancePC(access) => {
self.next_fetch_access = access;
self.advance_arm();
}
CpuAction::PipelineFlushed => {}
}
}
CpuState::THUMB => {
let pc = self.pc & !1;
let fetched_now = self.load_16(pc, self.next_fetch_access);
let insn = self.pipeline[0];
self.pipeline[0] = self.pipeline[1];
self.pipeline[1] = fetched_now as u32;
match self.step_thumb_exec(insn as u16) {
CpuAction::AdvancePC(access) => {
self.advance_thumb();
self.next_fetch_access = access;
}
CpuAction::PipelineFlushed => {}
}
}
}
}
/// Get's the address of the next instruction that is going to be executed
pub fn get_next_pc(&self) -> Addr {
let insn_size = self.word_size() as u32;
self.pc - 2 * insn_size
}
pub fn get_cpu_state(&self) -> CpuState {
self.cpsr.state()
}
}
impl<I: MemoryInterface> fmt::Debug for Arm7tdmiCore<I> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
writeln!(f, "ARM7TDMI Core Status:")?;
writeln!(f, "\tCPSR: {}", self.cpsr)?;
writeln!(f, "\tGeneral Purpose Registers:")?;
let reg_normal_style = Style::new().bold();
let gpr = self.copy_registers();
for i in 0..15 {
let mut reg_name = reg_string(i).to_string();
reg_name.make_ascii_uppercase();
let entry = format!("\t{:-3} = 0x{:08x}", reg_name, gpr[i]);
write!(
f,
"{}{}",
reg_normal_style.paint(entry),
if (i + 1) % 4 == 0 { "\n" } else { "" }
)?;
}
let pc = format!("\tPC = 0x{:08x}", self.get_next_pc());
writeln!(f, "{}", reg_normal_style.paint(pc))
}
}
#[cfg(feature = "debugger")]
impl<I: MemoryInterface> fmt::Display for Core<I> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
writeln!(f, "ARM7TDMI Core Status:")?;
writeln!(f, "\tCPSR: {}", self.cpsr)?;
writeln!(f, "\tGeneral Purpose Registers:")?;
let reg_normal_style = Style::new().bold();
let reg_dirty_style = Colour::Black.bold().on(Colour::Yellow);
let gpr = self.copy_registers();
for i in 0..15 {
let mut reg_name = reg_string(i).to_string();
reg_name.make_ascii_uppercase();
let style = if gpr[i] != self.dbg.gpr_previous[i] {
&reg_dirty_style
} else {
&reg_normal_style
};
let entry = format!("\t{:-3} = 0x{:08x}", reg_name, gpr[i]);
write!(
f,
"{}{}",
style.paint(entry),
if (i + 1) % 4 == 0 { "\n" } else { "" }
)?;
}
let pc = format!("\tPC = 0x{:08x}", self.get_next_pc());
writeln!(f, "{}", reg_normal_style.paint(pc))
}
}
include!(concat!(env!("OUT_DIR"), "/arm_lut.rs"));
include!(concat!(env!("OUT_DIR"), "/thumb_lut.rs"));

View file

@ -1,51 +0,0 @@
use std::fmt;
use std::fmt::Write;
use std::marker::PhantomData;
use super::Addr;
use super::InstructionDecoder;
pub struct Disassembler<'a, D>
where
D: InstructionDecoder,
{
base: Addr,
pos: usize,
bytes: &'a [u8],
pub word_size: usize,
instruction_decoder: PhantomData<D>,
}
impl<'a, D> Disassembler<'a, D>
where
D: InstructionDecoder,
{
pub fn new(base: Addr, bytes: &'a [u8]) -> Disassembler<'_, D> {
Disassembler {
base: base as Addr,
pos: 0,
bytes,
word_size: std::mem::size_of::<D::IntType>(),
instruction_decoder: PhantomData,
}
}
}
impl<'a, D> Iterator for Disassembler<'a, D>
where
D: InstructionDecoder + fmt::Display,
<D as InstructionDecoder>::IntType: std::fmt::LowerHex,
{
type Item = (Addr, String);
fn next(&mut self) -> Option<Self::Item> {
let mut line = String::new();
let addr = self.base + self.pos as Addr;
let decoded: D = D::decode_from_bytes(&self.bytes[(self.pos)..], addr);
let decoded_raw = decoded.get_raw();
self.pos += self.word_size;
write!(&mut line, "{addr:8x}:\t{decoded_raw:08x} \t{decoded}").unwrap();
Some((self.pos as Addr, line))
}
}

View file

@ -1,72 +0,0 @@
use super::memory::MemoryInterface;
use super::Arm7tdmiCore;
use super::{CpuMode, CpuState};
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
#[allow(dead_code)]
/// Models a CPU exception, and maps to the relavnt entry in the exception vector
pub enum Exception {
Reset = 0x00,
UndefinedInstruction = 0x04,
SoftwareInterrupt = 0x08,
PrefatchAbort = 0x0c,
DataAbort = 0x10,
Reserved = 0x14,
Irq = 0x18,
Fiq = 0x1c,
}
impl<I: MemoryInterface> Arm7tdmiCore<I> {
pub fn exception(&mut self, e: Exception, lr: u32) {
use Exception::*;
let (new_mode, irq_disable, fiq_disable) = match e {
Reset => (CpuMode::Supervisor, true, true),
UndefinedInstruction => (CpuMode::Undefined, false, false),
SoftwareInterrupt => (CpuMode::Supervisor, true, false),
DataAbort => (CpuMode::Abort, false, false),
PrefatchAbort => (CpuMode::Abort, false, false),
Reserved => panic!("Cpu reserved exception"),
Irq => (CpuMode::Irq, true, false),
Fiq => (CpuMode::Fiq, true, true),
};
#[cfg(feature = "debugger")]
{
if self.dbg.trace_exceptions {
trace!("exception {:?} lr={:x} new_mode={:?}", e, lr, new_mode);
}
}
let new_bank = new_mode.bank_index();
self.banks.spsr_bank[new_bank] = self.cpsr;
self.banks.gpr_banked_r14[new_bank] = lr;
self.change_mode(self.cpsr.mode(), new_mode);
// Set appropriate CPSR bits
self.cpsr.set_state(CpuState::ARM);
self.cpsr.set_mode(new_mode);
if irq_disable {
self.cpsr.set_irq_disabled(true);
}
if fiq_disable {
self.cpsr.set_fiq_disabled(true);
}
// Set PC to vector address
self.pc = e as u32;
self.reload_pipeline32();
}
#[inline]
pub fn irq(&mut self) {
if !self.cpsr.irq_disabled() {
let lr = self.get_next_pc() + 4;
self.exception(Exception::Irq, lr);
}
}
#[inline]
pub fn software_interrupt(&mut self, lr: u32, _cmt: u32) {
self.exception(Exception::SoftwareInterrupt, lr);
}
}

View file

@ -1,36 +0,0 @@
use gdbstub::target;
use gdbstub::target::TargetResult;
use crate::Arm7tdmiCore;
use super::target::MemoryGdbInterface;
impl<I: MemoryGdbInterface> target::ext::breakpoints::Breakpoints for Arm7tdmiCore<I> {
// there are several kinds of breakpoints - this target uses software breakpoints
#[inline(always)]
fn support_sw_breakpoint(
&mut self,
) -> Option<target::ext::breakpoints::SwBreakpointOps<'_, Self>> {
Some(self)
}
}
impl<I: MemoryGdbInterface> target::ext::breakpoints::SwBreakpoint for Arm7tdmiCore<I> {
fn add_sw_breakpoint(
&mut self,
addr: u32,
_kind: gdbstub_arch::arm::ArmBreakpointKind,
) -> TargetResult<bool, Self> {
self.add_breakpoint(addr);
Ok(true)
}
fn remove_sw_breakpoint(
&mut self,
addr: u32,
_kind: gdbstub_arch::arm::ArmBreakpointKind,
) -> TargetResult<bool, Self> {
self.del_breakpoint(addr);
Ok(true)
}
}

View file

@ -1,50 +0,0 @@
use std::io;
use std::net::{TcpListener, TcpStream};
use log::info;
mod breakpoints;
pub mod target;
// Re-export the gdbstub crate
pub extern crate gdbstub;
pub extern crate gdbstub_arch;
/// Wait for tcp connection on port
pub fn wait_for_connection(port: u16) -> io::Result<TcpStream> {
let bind_addr = format!("0.0.0.0:{port}");
info!("waiting for connection on {:?}", bind_addr);
let sock = TcpListener::bind(bind_addr)?;
// Blocks until a GDB client connects via TCP.
// i.e: Running `target remote localhost:<port>` from the GDB prompt.
let (stream, addr) = sock.accept()?;
info!("gdb connected from {:?}", addr);
Ok(stream)
}
/// Copy all bytes of `data` to `buf`.
/// Return the size of data copied.
pub fn copy_to_buf(data: &[u8], buf: &mut [u8]) -> usize {
let len = buf.len().min(data.len());
buf[..len].copy_from_slice(&data[..len]);
len
}
/// Copy a range of `data` (start at `offset` with a size of `length`) to `buf`.
/// Return the size of data copied. Returns 0 if `offset >= buf.len()`.
///
/// Mainly used by qXfer:_object_:read commands.
pub fn copy_range_to_buf(data: &[u8], offset: u64, length: usize, buf: &mut [u8]) -> usize {
let offset = offset as usize;
if offset > data.len() {
return 0;
}
let start = offset;
let end = (offset + length).min(data.len());
copy_to_buf(&data[start..end], buf)
}

View file

@ -1,113 +0,0 @@
/// Implementing the Target trait for gdbstub
use gdbstub::common::Signal;
use gdbstub::target::ext::base::singlethread::{
SingleThreadBase, SingleThreadResume, SingleThreadSingleStep,
};
use gdbstub::target::ext::base::singlethread::{SingleThreadResumeOps, SingleThreadSingleStepOps};
use gdbstub::target::ext::base::BaseOps;
use gdbstub::target::ext::breakpoints::BreakpointsOps;
use gdbstub::target::{self, Target, TargetError, TargetResult};
use crate::memory::{DebugRead, MemoryInterface};
use crate::registers_consts::*;
use crate::Arm7tdmiCore;
pub trait MemoryGdbInterface: MemoryInterface + DebugRead {
fn memory_map_xml(&self, offset: u64, length: usize, buf: &mut [u8]) -> usize;
}
impl<I: MemoryGdbInterface> Target for Arm7tdmiCore<I> {
type Error = ();
type Arch = gdbstub_arch::arm::Armv4t;
#[inline(always)]
fn base_ops(&mut self) -> BaseOps<Self::Arch, Self::Error> {
BaseOps::SingleThread(self)
}
// opt-in to support for setting/removing breakpoints
#[inline(always)]
fn support_breakpoints(&mut self) -> Option<BreakpointsOps<Self>> {
Some(self)
}
fn support_memory_map(&mut self) -> Option<target::ext::memory_map::MemoryMapOps<Self>> {
Some(self)
}
}
impl<I: MemoryGdbInterface> SingleThreadBase for Arm7tdmiCore<I> {
fn read_registers(
&mut self,
regs: &mut gdbstub_arch::arm::reg::ArmCoreRegs,
) -> TargetResult<(), Self> {
regs.pc = self.get_next_pc();
regs.lr = self.get_reg(REG_LR);
regs.sp = self.get_reg(REG_SP);
regs.r[..].copy_from_slice(&self.gpr[..13]);
regs.cpsr = self.cpsr.get();
Ok(())
}
fn write_registers(
&mut self,
regs: &gdbstub_arch::arm::reg::ArmCoreRegs,
) -> TargetResult<(), Self> {
self.set_reg(REG_PC, regs.pc);
self.set_reg(REG_LR, regs.lr);
self.set_reg(REG_SP, regs.sp);
self.gpr[..13].copy_from_slice(&regs.r);
self.cpsr.set(regs.cpsr);
Ok(())
}
fn read_addrs(&mut self, start_addr: u32, data: &mut [u8]) -> TargetResult<(), Self> {
self.bus.debug_get_into_bytes(start_addr, data);
Ok(())
}
fn write_addrs(&mut self, _start_addr: u32, _data: &[u8]) -> TargetResult<(), Self> {
// todo!("implement DebugWrite bus extention")
Err(TargetError::NonFatal)
}
// most targets will want to support at resumption as well...
#[inline(always)]
fn support_resume(&mut self) -> Option<SingleThreadResumeOps<Self>> {
Some(self)
}
}
impl<I: MemoryGdbInterface> SingleThreadResume for Arm7tdmiCore<I> {
fn resume(&mut self, _signal: Option<Signal>) -> Result<(), Self::Error> {
// do nothing
Ok(())
}
// ...and if the target supports resumption, it'll likely want to support
// single-step resume as well
#[inline(always)]
fn support_single_step(&mut self) -> Option<SingleThreadSingleStepOps<'_, Self>> {
Some(self)
}
}
impl<I: MemoryGdbInterface> SingleThreadSingleStep for Arm7tdmiCore<I> {
fn step(&mut self, _signal: Option<Signal>) -> Result<(), Self::Error> {
self.step();
Ok(())
}
}
impl<I: MemoryGdbInterface> target::ext::memory_map::MemoryMap for Arm7tdmiCore<I> {
fn memory_map_xml(
&self,
offset: u64,
length: usize,
buf: &mut [u8],
) -> TargetResult<usize, Self> {
Ok(self.bus.memory_map_xml(offset, length, buf))
}
}

View file

@ -1,146 +0,0 @@
#[macro_use]
extern crate serde;
#[macro_use]
extern crate enum_primitive_derive;
use std::fmt;
use num::Num;
use serde::{Deserialize, Serialize};
pub mod arm;
pub mod thumb;
use arm::ArmInstruction;
use thumb::ThumbInstruction;
pub mod cpu;
pub use cpu::*;
pub mod alu;
pub mod memory;
pub use alu::*;
use memory::Addr;
pub mod disass;
pub mod exception;
pub mod gdb;
pub use gdb::{gdbstub, gdbstub_arch};
pub mod psr;
mod simple_memory;
pub use simple_memory::SimpleMemory;
pub mod registers_consts {
pub const REG_PC: usize = 15;
pub const REG_LR: usize = 14;
pub const REG_SP: usize = 13;
}
#[derive(Serialize, Deserialize, Debug, PartialEq, Eq, Clone)]
pub enum DecodedInstruction {
Arm(ArmInstruction),
Thumb(ThumbInstruction),
}
impl DecodedInstruction {
pub fn get_pc(&self) -> Addr {
match self {
DecodedInstruction::Arm(a) => a.pc,
DecodedInstruction::Thumb(t) => t.pc,
}
}
}
#[cfg(feature = "debugger")]
impl fmt::Display for DecodedInstruction {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
DecodedInstruction::Arm(a) => write!(f, "{}", a),
DecodedInstruction::Thumb(t) => write!(f, "{}", t),
}
}
}
pub trait InstructionDecoder: Sized {
type IntType: Num;
fn decode(n: Self::IntType, addr: Addr) -> Self;
/// Helper functions for the Disassembler
fn decode_from_bytes(bytes: &[u8], addr: Addr) -> Self;
fn get_raw(&self) -> Self::IntType;
}
pub fn reg_string<T: Into<usize>>(reg: T) -> &'static str {
let reg_names = &[
"r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", "r8", "r9", "r10", "fp", "ip", "sp", "lr",
"pc",
];
reg_names[reg.into()]
}
#[derive(Debug, PartialEq, Eq, Primitive, Copy, Clone)]
#[repr(u8)]
pub enum CpuState {
ARM = 0,
THUMB = 1,
}
impl fmt::Display for CpuState {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
use CpuState::*;
match self {
ARM => write!(f, "ARM"),
THUMB => write!(f, "THUMB"),
}
}
}
#[derive(Debug, Primitive, Copy, Clone, PartialEq, Eq)]
#[repr(u8)]
pub enum CpuMode {
User = 0b10000,
Fiq = 0b10001,
Irq = 0b10010,
Supervisor = 0b10011,
Abort = 0b10111,
Undefined = 0b11011,
System = 0b11111,
}
impl CpuMode {
pub fn spsr_index(&self) -> Option<usize> {
match self {
CpuMode::Fiq => Some(0),
CpuMode::Irq => Some(1),
CpuMode::Supervisor => Some(2),
CpuMode::Abort => Some(3),
CpuMode::Undefined => Some(4),
_ => None,
}
}
pub fn bank_index(&self) -> usize {
match self {
CpuMode::User | CpuMode::System => 0,
CpuMode::Fiq => 1,
CpuMode::Irq => 2,
CpuMode::Supervisor => 3,
CpuMode::Abort => 4,
CpuMode::Undefined => 5,
}
}
}
impl fmt::Display for CpuMode {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
use CpuMode::*;
match self {
User => write!(f, "USR"),
Fiq => write!(f, "FIQ"),
Irq => write!(f, "IRQ"),
Supervisor => write!(f, "SVC"),
Abort => write!(f, "ABT"),
Undefined => write!(f, "UND"),
System => write!(f, "SYS"),
}
}
}

View file

@ -1,265 +0,0 @@
use super::Arm7tdmiCore;
use std::fmt;
pub type Addr = u32;
#[derive(Serialize, Deserialize, Debug, Copy, Clone)]
pub enum MemoryAccess {
NonSeq = 0,
Seq,
}
impl Default for MemoryAccess {
fn default() -> MemoryAccess {
MemoryAccess::NonSeq
}
}
impl fmt::Display for MemoryAccess {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(
f,
"{}",
match self {
MemoryAccess::NonSeq => "N",
MemoryAccess::Seq => "S",
}
)
}
}
#[derive(Debug, PartialEq, Eq, Copy, Clone)]
#[repr(u8)]
pub enum MemoryAccessWidth {
MemoryAccess8 = 0,
MemoryAccess16,
MemoryAccess32,
}
/// A trait meant to abstract memory accesses and report the access type back to the user of the arm7tdmi::Arm7tdmiCore
///
/// struct Memory {
/// data: [u8; 0x4000]
/// }
///
/// impl MemoryInterface for Memory {
/// fn load_8(&mut self, addr: u32, access: MemoryAccess) {
/// debug!("CPU read {:?} cycle", access);
/// self.data[addr & 0x3fff]
/// }
///
/// fn store_8(&mut self, addr: u32, value: u8, access: MemoryAccess) {
/// debug!("CPU write {:?} cycle", access);
/// self.data[addr & 0x3fff] = value;
/// }
///
/// fn idle_cycle(&mut self) {
/// debug!("CPU idle cycle");
/// }
///
/// // implement rest of trait methods
/// }
///
/// let mem = Shared::new(Memory { ... });
/// let cpu = arm7tdmi::Arm7tdmiCore::new(mem.clone())
///
pub trait MemoryInterface {
/// Read a byte
fn load_8(&mut self, addr: u32, access: MemoryAccess) -> u8;
/// Read a halfword
fn load_16(&mut self, addr: u32, access: MemoryAccess) -> u16;
/// Read a word
fn load_32(&mut self, addr: u32, access: MemoryAccess) -> u32;
/// Write a byte
fn store_8(&mut self, addr: u32, value: u8, access: MemoryAccess);
/// Write a halfword
fn store_16(&mut self, addr: u32, value: u16, access: MemoryAccess);
/// Write a word
fn store_32(&mut self, addr: u32, value: u32, access: MemoryAccess);
fn idle_cycle(&mut self);
}
impl<I: MemoryInterface> MemoryInterface for Arm7tdmiCore<I> {
#[inline]
fn load_8(&mut self, addr: u32, access: MemoryAccess) -> u8 {
self.bus.load_8(addr, access)
}
#[inline]
fn load_16(&mut self, addr: u32, access: MemoryAccess) -> u16 {
self.bus.load_16(addr & !1, access)
}
#[inline]
fn load_32(&mut self, addr: u32, access: MemoryAccess) -> u32 {
self.bus.load_32(addr & !3, access)
}
#[inline]
fn store_8(&mut self, addr: u32, value: u8, access: MemoryAccess) {
self.bus.store_8(addr, value, access);
}
#[inline]
fn store_16(&mut self, addr: u32, value: u16, access: MemoryAccess) {
self.bus.store_16(addr & !1, value, access);
}
#[inline]
fn store_32(&mut self, addr: u32, value: u32, access: MemoryAccess) {
self.bus.store_32(addr & !3, value, access);
}
#[inline]
fn idle_cycle(&mut self) {
self.bus.idle_cycle();
}
}
/// Implementation of memory access helpers
impl<I: MemoryInterface> Arm7tdmiCore<I> {
#[inline]
pub(super) fn store_aligned_32(&mut self, addr: Addr, value: u32, access: MemoryAccess) {
self.store_32(addr & !0x3, value, access);
}
#[inline]
pub(super) fn store_aligned_16(&mut self, addr: Addr, value: u16, access: MemoryAccess) {
self.store_16(addr & !0x1, value, access);
}
/// Helper function for "ldr" instruction that handles misaligned addresses
#[inline]
pub(super) fn ldr_word(&mut self, addr: Addr, access: MemoryAccess) -> u32 {
if addr & 0x3 != 0 {
let rotation = (addr & 0x3) << 3;
let value = self.load_32(addr & !0x3, access);
let mut carry = self.cpsr.C();
let v = self.ror(value, rotation, &mut carry, false, false);
self.cpsr.set_C(carry);
v
} else {
self.load_32(addr, access)
}
}
/// Helper function for "ldrh" instruction that handles misaligned addresses
#[inline]
pub(super) fn ldr_half(&mut self, addr: Addr, access: MemoryAccess) -> u32 {
if addr & 0x1 != 0 {
let rotation = (addr & 0x1) << 3;
let value = self.load_16(addr & !0x1, access);
let mut carry = self.cpsr.C();
let v = self.ror(value as u32, rotation, &mut carry, false, false);
self.cpsr.set_C(carry);
v
} else {
self.load_16(addr, access) as u32
}
}
/// Helper function for "ldrsh" instruction that handles misaligned addresses
#[inline]
pub(super) fn ldr_sign_half(&mut self, addr: Addr, access: MemoryAccess) -> u32 {
if addr & 0x1 != 0 {
self.load_8(addr, access) as i8 as i32 as u32
} else {
self.load_16(addr, access) as i16 as i32 as u32
}
}
}
/// Simple trait for accessing bus peripherals (higher level API than the low-level MemoryInterface)
pub trait BusIO {
fn read_32(&mut self, addr: Addr) -> u32 {
self.read_16(addr) as u32 | (self.read_16(addr + 2) as u32) << 16
}
fn read_16(&mut self, addr: Addr) -> u16 {
self.default_read_16(addr)
}
#[inline(always)]
fn default_read_16(&mut self, addr: Addr) -> u16 {
self.read_8(addr) as u16 | (self.read_8(addr + 1) as u16) << 8
}
fn read_8(&mut self, addr: Addr) -> u8;
fn write_32(&mut self, addr: Addr, value: u32) {
self.write_16(addr, (value & 0xffff) as u16);
self.write_16(addr + 2, (value >> 16) as u16);
}
fn write_16(&mut self, addr: Addr, value: u16) {
self.default_write_16(addr, value)
}
#[inline(always)]
fn default_write_16(&mut self, addr: Addr, value: u16) {
self.write_8(addr, (value & 0xff) as u8);
self.write_8(addr + 1, ((value >> 8) & 0xff) as u8);
}
fn write_8(&mut self, addr: Addr, value: u8);
fn get_bytes(&mut self, range: std::ops::Range<u32>) -> Vec<u8> {
let mut bytes = Vec::new();
for b in range {
bytes.push(self.read_8(b));
}
bytes
}
}
/// Helper trait for reading memory as if we were an all-powerfull debugger
pub trait DebugRead: BusIO {
fn debug_read_32(&mut self, addr: Addr) -> u32 {
self.debug_read_16(addr) as u32 | (self.debug_read_16(addr + 2) as u32) << 16
}
fn debug_read_16(&mut self, addr: Addr) -> u16 {
self.debug_read_8(addr) as u16 | (self.debug_read_8(addr + 1) as u16) << 8
}
fn debug_read_8(&mut self, addr: Addr) -> u8;
fn debug_get_bytes(&mut self, range: std::ops::Range<Addr>) -> Vec<u8> {
let mut bytes = Vec::new();
for b in range {
bytes.push(self.debug_read_8(b));
}
bytes
}
fn debug_get_into_bytes(&mut self, start_addr: Addr, bytes: &mut [u8]) {
bytes
.iter_mut()
.enumerate()
.for_each(|(idx, byte)| *byte = self.debug_read_8(start_addr + (idx as Addr)));
}
}
/// The caller is assumed to handle out of bound accesses,
/// For performance reasons, this impl trusts that 'addr' is within the array range.
impl BusIO for Box<[u8]> {
#[inline]
fn read_8(&mut self, addr: Addr) -> u8 {
unsafe { *self.get_unchecked(addr as usize) }
}
#[inline]
fn write_8(&mut self, addr: Addr, value: u8) {
unsafe {
*self.get_unchecked_mut(addr as usize) = value;
}
}
}
impl DebugRead for Box<[u8]> {
#[inline]
fn debug_read_8(&mut self, addr: Addr) -> u8 {
self[addr as usize]
}
}

View file

@ -1,161 +0,0 @@
/// The program status register
use std::fmt;
use serde::{Deserialize, Serialize};
use bit::BitIndex;
use num::FromPrimitive;
use super::{CpuMode, CpuState};
use colored::*;
impl From<CpuState> for bool {
fn from(state: CpuState) -> bool {
match state {
CpuState::ARM => false,
CpuState::THUMB => true,
}
}
}
impl From<bool> for CpuState {
fn from(flag: bool) -> CpuState {
if flag {
CpuState::THUMB
} else {
CpuState::ARM
}
}
}
#[derive(Serialize, Deserialize, Debug, Clone, Copy, Default)]
#[repr(transparent)]
pub struct RegPSR {
raw: u32,
}
const RESERVED_BIT_MASK: u32 = 0x0fffff00;
fn clear_reserved(n: u32) -> u32 {
n & !RESERVED_BIT_MASK
}
impl RegPSR {
pub const FLAG_BITMASK: u32 = 0xf000_0000;
pub fn new(u: u32) -> RegPSR {
RegPSR {
raw: clear_reserved(u),
}
}
pub fn get(&self) -> u32 {
self.raw
}
pub fn set(&mut self, psr: u32) {
self.raw = clear_reserved(psr);
}
pub fn set_flag_bits(&mut self, value: u32) {
self.raw &= !Self::FLAG_BITMASK;
self.raw |= Self::FLAG_BITMASK & value;
}
pub fn state(&self) -> CpuState {
self.raw.bit(5).into()
}
pub fn set_state(&mut self, state: CpuState) {
self.raw.set_bit(5, state.into());
}
pub fn mode(&self) -> CpuMode {
CpuMode::from_u32(self.raw.bit_range(0..5)).unwrap()
}
pub fn set_mode(&mut self, mode: CpuMode) {
self.raw.set_bit_range(0..5, (mode as u32) & 0b1_1111);
}
pub fn irq_disabled(&self) -> bool {
self.raw.bit(7)
}
pub fn set_irq_disabled(&mut self, disabled: bool) {
self.raw.set_bit(7, disabled);
}
pub fn fiq_disabled(&self) -> bool {
self.raw.bit(6)
}
pub fn set_fiq_disabled(&mut self, disabled: bool) {
self.raw.set_bit(6, disabled);
}
#[allow(non_snake_case)]
pub fn N(&self) -> bool {
self.raw.bit(31)
}
#[allow(non_snake_case)]
pub fn set_N(&mut self, flag: bool) {
self.raw.set_bit(31, flag);
}
#[allow(non_snake_case)]
pub fn Z(&self) -> bool {
self.raw.bit(30)
}
#[allow(non_snake_case)]
pub fn set_Z(&mut self, flag: bool) {
self.raw.set_bit(30, flag);
}
#[allow(non_snake_case)]
pub fn C(&self) -> bool {
self.raw.bit(29)
}
#[allow(non_snake_case)]
pub fn set_C(&mut self, flag: bool) {
self.raw.set_bit(29, flag);
}
#[allow(non_snake_case)]
pub fn V(&self) -> bool {
self.raw.bit(28)
}
#[allow(non_snake_case)]
pub fn set_V(&mut self, flag: bool) {
self.raw.set_bit(28, flag);
}
}
impl fmt::Display for RegPSR {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let disabled_string = |disabled: bool| -> ColoredString {
if disabled {
"disabled".bright_red()
} else {
"enabled".bright_green()
}
};
write!(
f,
"{{ [{raw:#010x}] mode: {mode}, state: {state}, irq: {irq}, fiq: {fiq}, condition_flags: (N={N} Z={Z} C={C} V={V}) }}",
raw = self.raw,
mode = self.mode(),
state = self.state(),
irq = disabled_string(self.irq_disabled()),
fiq = disabled_string(self.irq_disabled()),
N = self.N() as u8,
Z = self.Z() as u8,
C = self.C() as u8,
V = self.V() as u8,
)
}
}

View file

@ -1,84 +0,0 @@
use crate::gdb::{copy_range_to_buf, target::MemoryGdbInterface};
use crate::memory::{Addr, BusIO, DebugRead, MemoryAccess, MemoryInterface};
/// Simple wrapper around a bytearray for memory access
/// For use by tests and examples of this crate.
pub struct SimpleMemory {
data: Box<[u8]>,
}
impl SimpleMemory {
pub fn new(capacity: usize) -> SimpleMemory {
SimpleMemory {
data: vec![0; capacity].into_boxed_slice(),
}
}
pub fn load_program(&mut self, program: &[u8]) {
self.data[..program.len()].copy_from_slice(program);
}
}
impl MemoryInterface for SimpleMemory {
#[inline]
fn load_8(&mut self, addr: u32, _access: MemoryAccess) -> u8 {
self.read_8(addr)
}
#[inline]
fn load_16(&mut self, addr: u32, _access: MemoryAccess) -> u16 {
self.read_16(addr & !1)
}
#[inline]
fn load_32(&mut self, addr: u32, _access: MemoryAccess) -> u32 {
self.read_32(addr & !3)
}
fn store_8(&mut self, addr: u32, value: u8, _access: MemoryAccess) {
self.write_8(addr, value);
}
fn store_16(&mut self, addr: u32, value: u16, _access: MemoryAccess) {
self.write_16(addr & !1, value);
}
fn store_32(&mut self, addr: u32, value: u32, _access: MemoryAccess) {
self.write_32(addr & !3, value);
}
fn idle_cycle(&mut self) {}
}
impl BusIO for SimpleMemory {
fn read_8(&mut self, addr: Addr) -> u8 {
*self.data.get(addr as usize).unwrap_or(&0)
}
fn write_8(&mut self, addr: Addr, value: u8) {
if let 0..=0x3FFF = addr {
self.data[addr as usize] = value;
}
}
}
impl DebugRead for SimpleMemory {
fn debug_read_8(&mut self, addr: Addr) -> u8 {
*self.data.get(addr as usize).unwrap_or(&0)
}
}
impl MemoryGdbInterface for SimpleMemory {
fn memory_map_xml(&self, offset: u64, length: usize, buf: &mut [u8]) -> usize {
let memory_map = format!(
r#"<?xml version="1.0"?>
<!DOCTYPE memory-map
PUBLIC "+//IDN gnu.org//DTD GDB Memory Map V1.0//EN"
"http://sourceware.org/gdb/gdb-memory-map.dtd">
<memory-map>
<memory type="ram" start="0x0" length="{}"/>
</memory-map>"#,
self.data.len()
);
copy_range_to_buf(memory_map.trim().as_bytes(), offset, length, buf)
}
}

View file

@ -1,350 +0,0 @@
use std::fmt;
use crate::bit::BitIndex;
use super::*;
use crate::arm7tdmi::*;
use super::ThumbDecodeHelper;
pub(super) mod consts {
pub(super) mod flags {
pub const FLAG_H1: usize = 7;
pub const FLAG_H2: usize = 6;
pub const FLAG_R: usize = 8;
pub const FLAG_LOW_OFFSET: usize = 11;
pub const FLAG_SP: usize = 11;
pub const FLAG_SIGN_EXTEND: usize = 10;
pub const FLAG_HALFWORD: usize = 11;
}
}
impl ThumbInstruction {
fn fmt_thumb_move_shifted_reg(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(
f,
"{op}\t{Rd}, {Rs}, #{Offset5}",
op = self.raw.format1_op(),
Rd = reg_string(self.raw & 0b111),
Rs = reg_string(self.raw.rs()),
Offset5 = self.raw.offset5()
)
}
fn fmt_thumb_data_process_imm(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(
f,
"{op}\t{Rd}, #{Offset8:#x}",
op = self.raw.format3_op(),
Rd = reg_string(self.raw.bit_range(8..11)),
Offset8 = self.raw & 0xff
)
}
fn fmt_thumb_alu_ops(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(
f,
"{op}\t{Rd}, {Rs}",
op = self.raw.format4_alu_op(),
Rd = reg_string(self.raw & 0b111),
Rs = reg_string(self.raw.rs())
)
}
fn fmt_thumb_high_reg_op_or_bx(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let op = self.raw.format5_op();
let dst_reg = if self.raw.flag(consts::flags::FLAG_H1) {
self.raw & 0b111 + 8
} else {
self.raw & 0b111
};
let src_reg = if self.raw.flag(consts::flags::FLAG_H2) {
self.raw.rs() + 8
} else {
self.raw.rs()
};
write!(f, "{}\t", op)?;
match op {
OpFormat5::BX => write!(f, "{}", reg_string(src_reg)),
_ => write!(
f,
"{dst}, {src}",
dst = reg_string(dst_reg),
src = reg_string(src_reg)
),
}
}
fn fmt_thumb_ldr_pc(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(
f,
"ldr\t{Rd}, [pc, #{Imm:#x}] ; = #{effective:#x}",
Rd = reg_string(self.raw.bit_range(8..11)),
Imm = self.raw.word8(),
effective = (self.pc + 4 & !0b10) + (self.raw.word8() as Addr)
)
}
fn fmt_thumb_ldr_str_reg_offset(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(
f,
"{op}{b}\t{Rd}, [{Rb}, {Ro}]",
op = if self.raw.is_load() { "ldr" } else { "str" },
b = if self.raw.bit(10) { "b" } else { "" },
Rd = reg_string(self.raw & 0b111),
Rb = reg_string(self.raw.rb()),
Ro = reg_string(self.raw.ro()),
)
}
fn fmt_thumb_ldr_str_shb(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(
f,
"{op}\t{Rd}, [{Rb}, {Ro}]",
op = {
match (
self.raw.flag(consts::flags::FLAG_SIGN_EXTEND),
self.raw.flag(consts::flags::FLAG_HALFWORD),
) {
(false, false) => "strh",
(false, true) => "ldrh",
(true, false) => "ldsb",
(true, true) => "ldsh",
}
},
Rd = reg_string(self.raw & 0b111),
Rb = reg_string(self.raw.rb()),
Ro = reg_string(self.raw.ro()),
)
}
fn fmt_thumb_ldr_str_imm_offset(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let is_transferring_bytes = self.raw.bit(12);
write!(
f,
"{op}{b}\t{Rd}, [{Rb}, #{imm:#x}]",
op = if self.raw.is_load() { "ldr" } else { "str" },
b = if is_transferring_bytes { "b" } else { "" },
Rd = reg_string(self.raw & 0b111),
Rb = reg_string(self.raw.rb()),
imm = {
let offset5 = self.raw.offset5();
if is_transferring_bytes {
offset5
} else {
(offset5 << 3) >> 1
}
},
)
}
fn fmt_thumb_ldr_str_halfword(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(
f,
"{op}\t{Rd}, [{Rb}, #{imm:#x}]",
op = if self.raw.is_load() { "ldrh" } else { "strh" },
Rd = reg_string(self.raw & 0b111),
Rb = reg_string(self.raw.rb()),
imm = self.raw.offset5() << 1
)
}
fn fmt_thumb_ldr_str_sp(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(
f,
"{op}\t{Rd}, [sp, #{Imm:#x}]",
op = if self.raw.is_load() { "ldr" } else { "str" },
Rd = reg_string(self.raw.bit_range(8..11)),
Imm = self.raw.word8(),
)
}
fn fmt_thumb_load_address(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(
f,
"add\t{Rd}, {r}, #{Imm:#x}",
Rd = reg_string(self.raw.bit_range(8..11)),
r = if self.raw.flag(consts::flags::FLAG_SP) {
"sp"
} else {
"pc"
},
Imm = self.raw.word8(),
)
}
fn fmt_thumb_add_sub(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let operand = if self.raw.is_immediate_operand() {
format!("#{:x}", self.raw.bit_range(6..9))
} else {
String::from(reg_string(self.raw.rn()))
};
write!(
f,
"{op}\t{Rd}, {Rs}, {operand}",
op = if self.raw.is_subtract() { "sub" } else { "add" },
Rd = reg_string(self.raw & 0b111),
Rs = reg_string(self.raw.rs()),
operand = operand
)
}
fn fmt_thumb_add_sp(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "add\tsp, #{imm:x}", imm = self.raw.sword7())
}
fn fmt_register_list(&self, f: &mut fmt::Formatter<'_>, rlist: u8) -> fmt::Result {
let mut has_first = false;
for i in 0..8 {
if rlist.bit(i) {
if has_first {
write!(f, ", {}", reg_string(i))?;
} else {
has_first = true;
write!(f, "{}", reg_string(i))?;
}
}
}
Ok(())
}
fn fmt_thumb_push_pop(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{}\t{{", if self.raw.is_load() { "pop" } else { "push" })?;
let rlist = self.raw.register_list();
self.fmt_register_list(f, rlist)?;
if self.raw.flag(consts::flags::FLAG_R) {
let r = if self.raw.is_load() { "pc" } else { "lr" };
if rlist != 0 {
write!(f, ", {}", r)?;
} else {
write!(f, "{}", r)?;
}
}
write!(f, "}}")
}
fn fmt_thumb_ldm_stm(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(
f,
"{op}\t{Rb}!, {{",
op = if self.raw.is_load() { "ldm" } else { "stm" },
Rb = reg_string(self.raw.rb()),
)?;
self.fmt_register_list(f, self.raw.register_list())?;
write!(f, "}}")
}
fn fmt_thumb_branch_with_cond(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(
f,
"b{cond}\t{addr:#x}",
cond = self.raw.cond(),
addr = {
let offset = self.raw.bcond_offset();
(self.pc as i32 + 4).wrapping_add(offset) as Addr
}
)
}
fn fmt_thumb_swi(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "swi\t{value:#x}", value = self.raw & 0xff,)
}
fn fmt_thumb_branch(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(
f,
"b\t{addr:#x}",
addr = {
let offset = (self.raw.offset11() << 21) >> 20;
(self.pc as i32 + 4).wrapping_add(offset) as Addr
}
)
}
fn fmt_thumb_branch_long_with_link(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "bl\t#0x{:08x}", {
let offset11 = self.raw.offset11();
if self.raw.flag(consts::flags::FLAG_LOW_OFFSET) {
(offset11 << 1) as i32
} else {
((offset11 << 21) >> 9) as i32
}
})
}
}
#[cfg(feature = "debugger")]
impl fmt::Display for ThumbInstruction {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self.fmt {
ThumbFormat::MoveShiftedReg => self.fmt_thumb_move_shifted_reg(f),
ThumbFormat::AddSub => self.fmt_thumb_add_sub(f),
ThumbFormat::DataProcessImm => self.fmt_thumb_data_process_imm(f),
ThumbFormat::AluOps => self.fmt_thumb_alu_ops(f),
ThumbFormat::HiRegOpOrBranchExchange => self.fmt_thumb_high_reg_op_or_bx(f),
ThumbFormat::LdrPc => self.fmt_thumb_ldr_pc(f),
ThumbFormat::LdrStrRegOffset => self.fmt_thumb_ldr_str_reg_offset(f),
ThumbFormat::LdrStrSHB => self.fmt_thumb_ldr_str_shb(f),
ThumbFormat::LdrStrImmOffset => self.fmt_thumb_ldr_str_imm_offset(f),
ThumbFormat::LdrStrHalfWord => self.fmt_thumb_ldr_str_halfword(f),
ThumbFormat::LdrStrSp => self.fmt_thumb_ldr_str_sp(f),
ThumbFormat::LoadAddress => self.fmt_thumb_load_address(f),
ThumbFormat::AddSp => self.fmt_thumb_add_sp(f),
ThumbFormat::PushPop => self.fmt_thumb_push_pop(f),
ThumbFormat::LdmStm => self.fmt_thumb_ldm_stm(f),
ThumbFormat::BranchConditional => self.fmt_thumb_branch_with_cond(f),
ThumbFormat::Swi => self.fmt_thumb_swi(f),
ThumbFormat::Branch => self.fmt_thumb_branch(f),
ThumbFormat::BranchLongWithLink => self.fmt_thumb_branch_long_with_link(f),
ThumbFormat::Undefined => write!(f, "<Undefined>"),
}
}
}
impl fmt::Display for OpFormat3 {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
OpFormat3::MOV => write!(f, "mov"),
OpFormat3::CMP => write!(f, "cmp"),
OpFormat3::ADD => write!(f, "add"),
OpFormat3::SUB => write!(f, "sub"),
}
}
}
impl fmt::Display for OpFormat5 {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
OpFormat5::ADD => write!(f, "add"),
OpFormat5::CMP => write!(f, "cmp"),
OpFormat5::MOV => write!(f, "mov"),
OpFormat5::BX => write!(f, "bx"),
}
}
}
impl fmt::Display for ThumbAluOps {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
use ThumbAluOps::*;
match self {
AND => write!(f, "and"),
EOR => write!(f, "eor"),
LSL => write!(f, "lsl"),
LSR => write!(f, "lsr"),
ASR => write!(f, "asr"),
ADC => write!(f, "adc"),
SBC => write!(f, "sbc"),
ROR => write!(f, "ror"),
TST => write!(f, "tst"),
NEG => write!(f, "neg"),
CMP => write!(f, "cmp"),
CMN => write!(f, "cmn"),
ORR => write!(f, "orr"),
MUL => write!(f, "mul"),
BIC => write!(f, "bic"),
MVN => write!(f, "mvn"),
}
}
}

View file

@ -1,575 +0,0 @@
use crate::{
exception::Exception,
memory::{MemoryAccess, MemoryInterface},
registers_consts::*,
Arm7tdmiCore, CpuAction,
};
use super::*;
use MemoryAccess::*;
impl<I: MemoryInterface> Arm7tdmiCore<I> {
/// Format 1
/// Execution Time: 1S
pub(in super::super) fn exec_thumb_move_shifted_reg<const BS_OP: u8, const IMM: u8>(
&mut self,
insn: u16,
) -> CpuAction {
let rd = (insn & 0b111) as usize;
let rs = insn.bit_range(3..6) as usize;
let shift_amount = IMM as u32;
let mut carry = self.cpsr.C();
let bsop = match BS_OP {
0 => BarrelShiftOpCode::LSL,
1 => BarrelShiftOpCode::LSR,
2 => BarrelShiftOpCode::ASR,
3 => BarrelShiftOpCode::ROR,
_ => unsafe { std::hint::unreachable_unchecked() },
};
let op2 = self.barrel_shift_op(bsop, self.gpr[rs], shift_amount, &mut carry, true);
self.gpr[rd] = op2;
self.alu_update_flags(op2, false, carry, self.cpsr.V());
CpuAction::AdvancePC(Seq)
}
/// Format 2
/// Execution Time: 1S
pub(in super::super) fn exec_thumb_add_sub<
const SUB: bool,
const IMM: bool,
const RN: usize,
>(
&mut self,
insn: u16,
) -> CpuAction {
let rd = (insn & 0b111) as usize;
let op1 = self.get_reg(insn.rs());
let op2 = if IMM { RN as u32 } else { self.get_reg(RN) };
let mut carry = self.cpsr.C();
let mut overflow = self.cpsr.V();
let result = if SUB {
self.alu_sub_flags(op1, op2, &mut carry, &mut overflow)
} else {
self.alu_add_flags(op1, op2, &mut carry, &mut overflow)
};
self.alu_update_flags(result, true, carry, overflow);
self.set_reg(rd, result);
CpuAction::AdvancePC(Seq)
}
/// Format 3
/// Execution Time: 1S
pub(in super::super) fn exec_thumb_data_process_imm<const OP: u8, const RD: usize>(
&mut self,
insn: u16,
) -> CpuAction {
use OpFormat3::*;
let op = OpFormat3::from_u8(OP).unwrap();
let op1 = self.gpr[RD];
let op2_imm = (insn & 0xff) as u32;
let mut carry = self.cpsr.C();
let mut overflow = self.cpsr.V();
let result = match op {
MOV => op2_imm,
CMP | SUB => self.alu_sub_flags(op1, op2_imm, &mut carry, &mut overflow),
ADD => self.alu_add_flags(op1, op2_imm, &mut carry, &mut overflow),
};
let arithmetic = op == ADD || op == SUB;
self.alu_update_flags(result, arithmetic, carry, overflow);
if op != CMP {
self.gpr[RD] = result;
}
CpuAction::AdvancePC(Seq)
}
/// Format 4
/// Execution Time:
/// 1S for AND,EOR,ADC,SBC,TST,NEG,CMP,CMN,ORR,BIC,MVN
/// 1S+1I for LSL,LSR,ASR,ROR
/// 1S+mI for MUL on ARMv4 (m=1..4; depending on MSBs of incoming Rd value)
pub(in super::super) fn exec_thumb_alu_ops<const OP: u16>(&mut self, insn: u16) -> CpuAction {
let rd = (insn & 0b111) as usize;
let rs = insn.rs();
let dst = self.get_reg(rd);
let src = self.get_reg(rs);
let mut carry = self.cpsr.C();
let mut overflow = self.cpsr.V();
use ThumbAluOps::*;
let op = ThumbAluOps::from_u16(OP).unwrap();
macro_rules! shifter_op {
($bs_op:expr) => {{
let result = self.shift_by_register($bs_op, rd, rs, &mut carry);
self.idle_cycle();
result
}};
}
let result = match op {
AND | TST => dst & src,
EOR => dst ^ src,
LSL => shifter_op!(BarrelShiftOpCode::LSL),
LSR => shifter_op!(BarrelShiftOpCode::LSR),
ASR => shifter_op!(BarrelShiftOpCode::ASR),
ROR => shifter_op!(BarrelShiftOpCode::ROR),
ADC => self.alu_adc_flags(dst, src, &mut carry, &mut overflow),
SBC => self.alu_sbc_flags(dst, src, &mut carry, &mut overflow),
NEG => self.alu_sub_flags(0, src, &mut carry, &mut overflow),
CMP => self.alu_sub_flags(dst, src, &mut carry, &mut overflow),
CMN => self.alu_add_flags(dst, src, &mut carry, &mut overflow),
ORR => dst | src,
MUL => {
let m = self.get_required_multipiler_array_cycles(src);
for _ in 0..m {
self.idle_cycle();
}
// TODO - meaningless values?
carry = false;
overflow = false;
dst.wrapping_mul(src)
}
BIC => dst & (!src),
MVN => !src,
};
self.alu_update_flags(result, op.is_arithmetic(), carry, overflow);
if !op.is_setting_flags() {
self.set_reg(rd, result);
}
CpuAction::AdvancePC(Seq)
}
/// Format 5
/// Execution Time:
/// 1S for ADD/MOV/CMP
/// 2S+1N for ADD/MOV with Rd=R15, and for BX
pub(in super::super) fn exec_thumb_hi_reg_op_or_bx<
const OP: u8,
const FLAG_H1: bool,
const FLAG_H2: bool,
>(
&mut self,
insn: u16,
) -> CpuAction {
let op = OpFormat5::from_u8(OP).unwrap();
let rd = (insn & 0b111) as usize;
let rs = insn.rs();
let dst_reg = if FLAG_H1 { rd + 8 } else { rd };
let src_reg = if FLAG_H2 { rs + 8 } else { rs };
let op1 = self.get_reg(dst_reg);
let op2 = self.get_reg(src_reg);
let mut result = CpuAction::AdvancePC(Seq);
match op {
OpFormat5::BX => {
return self.branch_exchange(self.get_reg(src_reg));
}
OpFormat5::ADD => {
self.set_reg(dst_reg, op1.wrapping_add(op2));
if dst_reg == REG_PC {
self.reload_pipeline16();
result = CpuAction::PipelineFlushed;
}
}
OpFormat5::CMP => {
let mut carry = self.cpsr.C();
let mut overflow = self.cpsr.V();
let result = self.alu_sub_flags(op1, op2, &mut carry, &mut overflow);
self.alu_update_flags(result, true, carry, overflow);
}
OpFormat5::MOV => {
self.set_reg(dst_reg, op2);
if dst_reg == REG_PC {
self.reload_pipeline16();
result = CpuAction::PipelineFlushed;
}
}
}
result
}
/// Format 6 load PC-relative (for loading immediates from literal pool)
/// Execution Time: 1S+1N+1I
pub(in super::super) fn exec_thumb_ldr_pc<const RD: usize>(&mut self, insn: u16) -> CpuAction {
let ofs = insn.word8() as Addr;
let addr = (self.pc & !3) + ofs;
self.gpr[RD] = self.load_32(addr, NonSeq);
// +1I
self.idle_cycle();
CpuAction::AdvancePC(NonSeq)
}
/// Helper function for various ldr/str handler
/// Execution Time: 1S+1N+1I for LDR, or 2N for STR
fn do_exec_thumb_ldr_str<const LOAD: bool, const BYTE: bool>(
&mut self,
insn: u16,
addr: Addr,
) -> CpuAction {
let rd = (insn & 0b111) as usize;
if LOAD {
let data = if BYTE {
self.load_8(addr, NonSeq) as u32
} else {
self.ldr_word(addr, NonSeq)
};
self.gpr[rd] = data;
// +1I
self.idle_cycle();
CpuAction::AdvancePC(Seq)
} else {
let value = self.get_reg(rd);
if BYTE {
self.store_8(addr, value as u8, NonSeq);
} else {
self.store_aligned_32(addr, value, NonSeq);
};
CpuAction::AdvancePC(NonSeq)
}
}
/// Format 7 load/store with register offset
/// Execution Time: 1S+1N+1I for LDR, or 2N for STR
pub(in super::super) fn exec_thumb_ldr_str_reg_offset<
const LOAD: bool,
const RO: usize,
const BYTE: bool,
>(
&mut self,
insn: u16,
) -> CpuAction {
let rb = insn.bit_range(3..6) as usize;
let addr = self.gpr[rb].wrapping_add(self.gpr[RO]);
self.do_exec_thumb_ldr_str::<LOAD, BYTE>(insn, addr)
}
/// Format 8 load/store sign-extended byte/halfword
/// Execution Time: 1S+1N+1I for LDR, or 2N for STR
pub(in super::super) fn exec_thumb_ldr_str_shb<
const RO: usize,
const SIGN_EXTEND: bool,
const HALFWORD: bool,
>(
&mut self,
insn: u16,
) -> CpuAction {
let rb = insn.bit_range(3..6) as usize;
let rd = (insn & 0b111) as usize;
let addr = self.gpr[rb].wrapping_add(self.gpr[RO]);
match (SIGN_EXTEND, HALFWORD) {
(false, false) =>
/* strh */
{
self.store_aligned_16(addr, self.gpr[rd] as u16, NonSeq);
}
(false, true) =>
/* ldrh */
{
self.gpr[rd] = self.ldr_half(addr, NonSeq);
self.idle_cycle();
}
(true, false) =>
/* ldself */
{
let val = self.load_8(addr, NonSeq) as i8 as i32 as u32;
self.gpr[rd] = val;
self.idle_cycle();
}
(true, true) =>
/* ldsh */
{
let val = self.ldr_sign_half(addr, NonSeq);
self.gpr[rd] = val;
self.idle_cycle();
}
}
CpuAction::AdvancePC(NonSeq)
}
/// Format 9
/// Execution Time: 1S+1N+1I for LDR, or 2N for STR
pub(in super::super) fn exec_thumb_ldr_str_imm_offset<
const LOAD: bool,
const BYTE: bool,
const OFFSET: u8,
>(
&mut self,
insn: u16,
) -> CpuAction {
let rb = insn.bit_range(3..6) as usize;
let addr = self.gpr[rb].wrapping_add(OFFSET as u32);
self.do_exec_thumb_ldr_str::<LOAD, BYTE>(insn, addr)
}
/// Format 10
/// Execution Time: 1S+1N+1I for LDR, or 2N for STR
pub(in super::super) fn exec_thumb_ldr_str_halfword<const LOAD: bool, const OFFSET: i32>(
&mut self,
insn: u16,
) -> CpuAction {
let rb = insn.bit_range(3..6) as usize;
let rd = (insn & 0b111) as usize;
let base = self.gpr[rb] as i32;
let addr = base.wrapping_add(OFFSET) as Addr;
if LOAD {
let data = self.ldr_half(addr, NonSeq);
self.idle_cycle();
self.gpr[rd] = data;
CpuAction::AdvancePC(Seq)
} else {
self.store_aligned_16(addr, self.gpr[rd] as u16, NonSeq);
CpuAction::AdvancePC(NonSeq)
}
}
/// Format 11 load/store SP-relative
/// Execution Time: 1S+1N+1I for LDR, or 2N for STR
pub(in super::super) fn exec_thumb_ldr_str_sp<const LOAD: bool, const RD: usize>(
&mut self,
insn: u16,
) -> CpuAction {
let addr = self.gpr[REG_SP] + (insn.word8() as Addr);
if LOAD {
let data = self.ldr_word(addr, NonSeq);
self.idle_cycle();
self.gpr[RD] = data;
CpuAction::AdvancePC(Seq)
} else {
self.store_aligned_32(addr, self.gpr[RD], NonSeq);
CpuAction::AdvancePC(NonSeq)
}
}
/// Format 12
/// Execution Time: 1S
pub(in super::super) fn exec_thumb_load_address<const SP: bool, const RD: usize>(
&mut self,
insn: u16,
) -> CpuAction {
self.gpr[RD] = if SP {
self.gpr[REG_SP] + (insn.word8() as Addr)
} else {
(self.pc_thumb() & !0b10) + 4 + (insn.word8() as Addr)
};
CpuAction::AdvancePC(Seq)
}
/// Format 13
/// Execution Time: 1S
pub(in super::super) fn exec_thumb_add_sp<const FLAG_S: bool>(
&mut self,
insn: u16,
) -> CpuAction {
let op1 = self.gpr[REG_SP] as i32;
let offset = ((insn & 0x7f) << 2) as i32;
self.gpr[REG_SP] = if FLAG_S {
op1.wrapping_sub(offset) as u32
} else {
op1.wrapping_add(offset) as u32
};
CpuAction::AdvancePC(Seq)
}
/// Format 14
/// Execution Time: nS+1N+1I (POP), (n+1)S+2N+1I (POP PC), or (n-1)S+2N (PUSH).
pub(in super::super) fn exec_thumb_push_pop<const POP: bool, const FLAG_R: bool>(
&mut self,
insn: u16,
) -> CpuAction {
macro_rules! push {
($r:expr, $access:ident) => {
self.gpr[REG_SP] -= 4;
let stack_addr = self.gpr[REG_SP] & !3;
self.store_32(stack_addr, self.get_reg($r), $access);
$access = Seq;
};
}
macro_rules! pop {
($r:expr) => {
let val = self.load_32(self.gpr[REG_SP] & !3, Seq);
self.set_reg($r, val);
self.gpr[REG_SP] += 4;
};
($r:expr, $access:ident) => {
let val = self.load_32(self.gpr[REG_SP] & !3, $access);
$access = Seq;
self.set_reg($r, val);
self.gpr[REG_SP] += 4;
};
}
let mut result = CpuAction::AdvancePC(NonSeq);
let rlist = insn.register_list();
let mut access = MemoryAccess::NonSeq;
if POP {
for r in 0..8 {
if rlist.bit(r) {
pop!(r, access);
}
}
if FLAG_R {
pop!(REG_PC);
self.pc &= !1;
result = CpuAction::PipelineFlushed;
self.reload_pipeline16();
}
// Idle 1 cycle
self.idle_cycle();
} else {
if FLAG_R {
push!(REG_LR, access);
}
for r in (0..8).rev() {
if rlist.bit(r) {
push!(r, access);
}
}
}
result
}
/// Format 15
/// Execution Time: nS+1N+1I for LDM, or (n-1)S+2N for STM.
pub(in super::super) fn exec_thumb_ldm_stm<const LOAD: bool, const RB: usize>(
&mut self,
insn: u16,
) -> CpuAction {
let mut result = CpuAction::AdvancePC(NonSeq);
let align_preserve = self.gpr[RB] & 3;
let mut addr = self.gpr[RB] & !3;
let rlist = insn.register_list();
// let mut first = true;
if rlist != 0 {
if LOAD {
let mut access = NonSeq;
for r in 0..8 {
if rlist.bit(r) {
let val = self.load_32(addr, access);
access = Seq;
addr += 4;
self.set_reg(r, val);
}
}
self.idle_cycle();
if !rlist.bit(RB) {
self.gpr[RB] = addr + align_preserve;
}
} else {
let mut first = true;
let mut access = NonSeq;
for r in 0..8 {
if rlist.bit(r) {
let v = if r != RB {
self.gpr[r]
} else if first {
first = false;
addr
} else {
addr + (rlist.count_ones() - 1) * 4
};
self.store_32(addr, v, access);
access = Seq;
addr += 4;
}
self.gpr[RB] = addr + align_preserve;
}
}
} else {
// From gbatek.htm: Empty Rlist: R15 loaded/stored (ARMv4 only), and Rb=Rb+40h (ARMv4-v5).
if LOAD {
let val = self.load_32(addr, NonSeq);
self.pc = val & !1;
result = CpuAction::PipelineFlushed;
self.reload_pipeline16();
} else {
self.store_32(addr, self.pc + 2, NonSeq);
}
addr += 0x40;
self.gpr[RB] = addr + align_preserve;
}
result
}
/// Format 16
/// Execution Time:
/// 2S+1N if condition true (jump executed)
/// 1S if condition false
pub(in super::super) fn exec_thumb_branch_with_cond<const COND: u8>(
&mut self,
insn: u16,
) -> CpuAction {
let cond = ArmCond::from_u8(COND).expect("bad cond");
if !self.check_arm_cond(cond) {
CpuAction::AdvancePC(Seq)
} else {
let offset = insn.bcond_offset();
self.pc = (self.pc as i32).wrapping_add(offset) as u32;
self.reload_pipeline16();
CpuAction::PipelineFlushed
}
}
/// Format 17
/// Execution Time: 2S+1N
pub(in super::super) fn exec_thumb_swi(&mut self, _insn: u16) -> CpuAction {
self.exception(Exception::SoftwareInterrupt, self.pc - 2); // implies pipeline reload
CpuAction::PipelineFlushed
}
/// Format 18
/// Execution Time: 2S+1N
pub(in super::super) fn exec_thumb_branch(&mut self, insn: u16) -> CpuAction {
let offset = (insn.offset11() << 21) >> 20;
self.pc = (self.pc as i32).wrapping_add(offset) as u32;
self.reload_pipeline16(); // 2S + 1N
CpuAction::PipelineFlushed
}
/// Format 19
/// Execution Time: 3S+1N (first opcode 1S, second opcode 2S+1N).
pub(in super::super) fn exec_thumb_branch_long_with_link<const FLAG_LOW_OFFSET: bool>(
&mut self,
insn: u16,
) -> CpuAction {
let mut off = insn.offset11();
if FLAG_LOW_OFFSET {
off <<= 1;
let next_pc = (self.pc - 2) | 1;
self.pc = ((self.gpr[REG_LR] & !1) as i32).wrapping_add(off) as u32;
self.gpr[REG_LR] = next_pc;
self.reload_pipeline16(); // implies 2S + 1N
CpuAction::PipelineFlushed
} else {
off = (off << 21) >> 9;
self.gpr[REG_LR] = (self.pc as i32).wrapping_add(off) as u32;
CpuAction::AdvancePC(Seq) // 1S
}
}
pub fn thumb_undefined(&mut self, insn: u16) -> CpuAction {
panic!(
"executing undefind thumb instruction {:04x} at @{:08x}",
insn,
self.pc_thumb()
)
}
}

View file

@ -1,485 +0,0 @@
use super::alu::*;
use super::arm::*;
use super::memory::Addr;
use super::InstructionDecoder;
use bit::BitIndex;
use byteorder::{LittleEndian, ReadBytesExt};
use num::FromPrimitive;
#[cfg(feature = "debugger")]
pub mod disass;
pub mod exec;
#[derive(Serialize, Deserialize, Debug, Copy, Clone, PartialEq, Eq)]
pub enum ThumbFormat {
/// Format 1
MoveShiftedReg,
/// Format 2
AddSub,
/// Format 3
DataProcessImm,
/// Format 4
AluOps,
/// Format 5
HiRegOpOrBranchExchange,
/// Format 6
LdrPc,
/// Format 7
LdrStrRegOffset,
/// Format 8
LdrStrSHB,
/// Format 9
LdrStrImmOffset,
/// Format 10
LdrStrHalfWord,
/// Format 11
LdrStrSp,
/// Format 12
LoadAddress,
/// Format 13
AddSp,
/// Format 14
PushPop,
/// Format 15
LdmStm,
/// Format 16
BranchConditional,
/// Format 17
Swi,
/// Format 18
Branch,
/// Format 19
BranchLongWithLink,
/// Not an actual thumb format
Undefined,
}
impl From<u16> for ThumbFormat {
fn from(raw: u16) -> ThumbFormat {
use ThumbFormat::*;
if raw & 0xf800 == 0x1800 {
AddSub
} else if raw & 0xe000 == 0x0000 {
MoveShiftedReg
} else if raw & 0xe000 == 0x2000 {
DataProcessImm
} else if raw & 0xfc00 == 0x4000 {
AluOps
} else if raw & 0xfc00 == 0x4400 {
HiRegOpOrBranchExchange
} else if raw & 0xf800 == 0x4800 {
LdrPc
} else if raw & 0xf200 == 0x5000 {
LdrStrRegOffset
} else if raw & 0xf200 == 0x5200 {
LdrStrSHB
} else if raw & 0xe000 == 0x6000 {
LdrStrImmOffset
} else if raw & 0xf000 == 0x8000 {
LdrStrHalfWord
} else if raw & 0xf000 == 0x9000 {
LdrStrSp
} else if raw & 0xf000 == 0xa000 {
LoadAddress
} else if raw & 0xff00 == 0xb000 {
AddSp
} else if raw & 0xf600 == 0xb400 {
PushPop
} else if raw & 0xf000 == 0xc000 {
LdmStm
} else if raw & 0xff00 == 0xdf00 {
Swi
} else if raw & 0xf000 == 0xd000 {
BranchConditional
} else if raw & 0xf800 == 0xe000 {
Branch
} else if raw & 0xf000 == 0xf000 {
BranchLongWithLink
} else {
Undefined
}
}
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq)]
pub struct ThumbInstruction {
pub fmt: ThumbFormat,
pub raw: u16,
pub pc: Addr,
}
impl ThumbInstruction {
pub fn new(raw: u16, pc: Addr, fmt: ThumbFormat) -> ThumbInstruction {
ThumbInstruction { fmt, raw, pc }
}
}
impl InstructionDecoder for ThumbInstruction {
type IntType = u16;
fn decode(raw: u16, addr: Addr) -> Self {
let fmt = ThumbFormat::from(raw);
ThumbInstruction::new(raw, addr, fmt)
}
fn decode_from_bytes(bytes: &[u8], addr: Addr) -> Self {
let mut rdr = std::io::Cursor::new(bytes);
let raw = rdr.read_u16::<LittleEndian>().unwrap();
Self::decode(raw, addr)
}
fn get_raw(&self) -> u16 {
self.raw
}
}
#[derive(Debug, Primitive, PartialEq, Eq)]
pub enum OpFormat3 {
MOV = 0,
CMP = 1,
ADD = 2,
SUB = 3,
}
impl From<OpFormat3> for AluOpCode {
fn from(op: OpFormat3) -> AluOpCode {
match op {
OpFormat3::MOV => AluOpCode::MOV,
OpFormat3::CMP => AluOpCode::CMP,
OpFormat3::ADD => AluOpCode::ADD,
OpFormat3::SUB => AluOpCode::SUB,
}
}
}
#[derive(Debug, Primitive, PartialEq, Eq)]
pub enum OpFormat5 {
ADD = 0,
CMP = 1,
MOV = 2,
BX = 3,
}
#[derive(Debug, Primitive, PartialEq, Eq)]
pub enum ThumbAluOps {
AND = 0b0000,
EOR = 0b0001,
LSL = 0b0010,
LSR = 0b0011,
ASR = 0b0100,
ADC = 0b0101,
SBC = 0b0110,
ROR = 0b0111,
TST = 0b1000,
NEG = 0b1001,
CMP = 0b1010,
CMN = 0b1011,
ORR = 0b1100,
MUL = 0b1101,
BIC = 0b1110,
MVN = 0b1111,
}
impl ThumbAluOps {
pub fn is_setting_flags(&self) -> bool {
use ThumbAluOps::*;
matches!(self, TST | CMP | CMN)
}
pub fn is_arithmetic(&self) -> bool {
use ThumbAluOps::*;
matches!(self, ADC | SBC | NEG | CMP | CMN)
}
}
impl From<OpFormat5> for AluOpCode {
fn from(op: OpFormat5) -> AluOpCode {
match op {
OpFormat5::ADD => AluOpCode::ADD,
OpFormat5::CMP => AluOpCode::CMP,
OpFormat5::MOV => AluOpCode::MOV,
OpFormat5::BX => panic!("this should not be called if op = BX"),
}
}
}
/// A trait which provides methods to extract thumb instruction fields
pub trait ThumbDecodeHelper {
// Consts
// Methods
fn rs(&self) -> usize;
fn rb(&self) -> usize;
fn ro(&self) -> usize;
fn rn(&self) -> usize;
fn format1_op(&self) -> BarrelShiftOpCode;
fn format3_op(&self) -> OpFormat3;
fn format5_op(&self) -> OpFormat5;
fn format4_alu_op(&self) -> ThumbAluOps;
fn offset5(&self) -> u8;
fn bcond_offset(&self) -> i32;
fn offset11(&self) -> i32;
fn word8(&self) -> u16;
fn is_load(&self) -> bool;
fn is_subtract(&self) -> bool;
fn is_immediate_operand(&self) -> bool;
fn cond(&self) -> ArmCond;
fn flag(self, bit: usize) -> bool;
fn register_list(&self) -> u8;
fn sword7(&self) -> i32;
}
macro_rules! thumb_decode_helper_impl {
($($t:ty),*) => {$(
impl ThumbDecodeHelper for $t {
#[inline]
fn rs(&self) -> usize {
self.bit_range(3..6) as usize
}
#[inline]
/// Note: not true for LdmStm
fn rb(&self) -> usize {
self.bit_range(3..6) as usize
}
#[inline]
fn ro(&self) -> usize {
self.bit_range(6..9) as usize
}
#[inline]
fn rn(&self) -> usize {
self.bit_range(6..9) as usize
}
#[inline]
fn format1_op(&self) -> BarrelShiftOpCode {
BarrelShiftOpCode::from_u8(self.bit_range(11..13) as u8).unwrap()
}
#[inline]
fn format3_op(&self) -> OpFormat3 {
OpFormat3::from_u8(self.bit_range(11..13) as u8).unwrap()
}
#[inline]
fn format5_op(&self) -> OpFormat5 {
OpFormat5::from_u8(self.bit_range(8..10) as u8).unwrap()
}
#[inline]
fn format4_alu_op(&self) -> ThumbAluOps {
ThumbAluOps::from_u16(self.bit_range(6..10)).unwrap()
}
#[inline]
fn offset5(&self) -> u8 {
self.bit_range(6..11) as u8
}
#[inline]
fn bcond_offset(&self) -> i32 {
((((*self & 0xff) as u32) << 24) as i32) >> 23
}
#[inline]
fn offset11(&self) -> i32 {
(*self & 0x7FF) as i32
}
#[inline]
fn word8(&self) -> u16 {
(*self & 0xff) << 2
}
#[inline]
fn is_load(&self) -> bool {
self.bit(11)
}
#[inline]
fn is_subtract(&self) -> bool {
self.bit(9)
}
#[inline]
fn is_immediate_operand(&self) -> bool {
self.bit(10)
}
#[inline]
fn cond(&self) -> ArmCond {
ArmCond::from_u8(self.bit_range(8..12) as u8).expect("bad condition")
}
#[inline]
fn flag(self, bit: usize) -> bool {
self.bit(bit)
}
#[inline]
fn register_list(&self) -> u8 {
(*self & 0xff) as u8
}
#[inline]
fn sword7(&self) -> i32 {
let imm7 = *self & 0x7f;
if self.bit(7) {
-((imm7 << 2) as i32)
} else {
(imm7 << 2) as i32
}
}
}
)*}
}
thumb_decode_helper_impl!(u16);
// #[cfg(test)]
// /// All instructions constants were generated using an ARM assembler.
// mod tests {
// use super::super::Core;
// use super::*;
// use crate::sysbus::BoxedMemory;
// use crate::Bus;
// #[test]
// fn mov_low_reg() {
// let bytes = vec![];
// let mut mem = BoxedMemory::new(bytes.into_boxed_slice(), 0xffff_ffff);
// let mut core = Core::new();
// core.set_reg(0, 0);
// // movs r0, #0x27
// let insn = ThumbInstruction::decode(0x2027, 0).unwrap();
// assert_eq!(format!("{}", insn), "mov\tr0, #0x27");
// core.exec_thumb(&mut mem, insn).unwrap();
// assert_eq!(core.get_reg(0), 0x27);
// }
// // #[test]
// // fn decode_add_sub() {
// // let insn = ThumbInstruction::decode(0xac19, 0).unwrap();
// // assert!(format!("add\tr4, r4"))
// // }
// #[test]
// fn ldr_pc() {
// // ldr r0, [pc, #4]
// let insn = ThumbInstruction::decode(0x4801, 0x6).unwrap();
// #[rustfmt::skip]
// let bytes = vec![
// /* 0: */ 0x00, 0x00,
// /* 2: */ 0x00, 0x00,
// /* 4: */ 0x00, 0x00,
// /* 6: <pc> */ 0x00, 0x00,
// /* 8: */ 0x00, 0x00, 0x00, 0x00,
// /* c: */ 0x78, 0x56, 0x34, 0x12,
// ];
// let mut mem = BoxedMemory::new(bytes.into_boxed_slice(), 0xffff_ffff);
// let mut core = Core::new();
// core.set_reg(0, 0);
// assert_eq!(format!("{}", insn), "ldr\tr0, [pc, #0x4] ; = #0xc");
// core.exec_thumb(&mut mem, insn).unwrap();
// assert_eq!(core.get_reg(0), 0x12345678);
// }
// #[test]
// fn ldr_str_reg_offset() {
// // str r0, [r4, r1]
// let str_insn = ThumbInstruction::decode(0x5060, 0x6).unwrap();
// // ldrb r2, [r4, r1]
// let ldr_insn = ThumbInstruction::decode(0x5c62, 0x6).unwrap();
// let mut core = Core::new();
// core.set_reg(0, 0x12345678);
// core.set_reg(2, 0);
// core.set_reg(1, 0x4);
// core.set_reg(4, 0xc);
// #[rustfmt::skip]
// let bytes = vec![
// /* 00h: */ 0xaa, 0xbb, 0xcc, 0xdd,
// /* 04h: */ 0xaa, 0xbb, 0xcc, 0xdd,
// /* 08h: */ 0xaa, 0xbb, 0xcc, 0xdd,
// /* 0ch: */ 0xaa, 0xbb, 0xcc, 0xdd,
// /* 10h: */ 0xaa, 0xbb, 0xcc, 0xdd,
// ];
// let mut mem = BoxedMemory::new(bytes.into_boxed_slice(), 0xffff_ffff);
// assert_eq!(format!("{}", str_insn), "str\tr0, [r4, r1]");
// assert_eq!(format!("{}", ldr_insn), "ldrb\tr2, [r4, r1]");
// core.exec_thumb(&mut mem, str_insn).unwrap();
// assert_eq!(mem.read_32(0x10), 0x12345678);
// core.exec_thumb(&mut mem, ldr_insn).unwrap();
// assert_eq!(core.get_reg(2), 0x78);
// }
// #[allow(overflowing_literals)]
// #[test]
// fn format8() {
// let mut core = Core::new();
// #[rustfmt::skip]
// let bytes = vec![
// /* 00h: */ 0xaa, 0xbb, 0xcc, 0xdd, 0xaa, 0xbb, 0xcc, 0xdd,
// /* 08h: */ 0xaa, 0xbb, 0xcc, 0xdd, 0xaa, 0xbb, 0xcc, 0xdd,
// /* 10h: */ 0xaa, 0xbb, 0xcc, 0xdd, 0xaa, 0xbb, 0xcc, 0xdd,
// ];
// let mut mem = BoxedMemory::new(bytes.into_boxed_slice(), 0xffff_ffff);
// core.gpr[4] = 0x12345678;
// core.gpr[3] = 0x2;
// core.gpr[0] = 0x4;
// // strh r4, [r3, r0]
// let decoded = ThumbInstruction::decode(0x521c, 0).unwrap();
// assert_eq!(format!("{}", decoded), "strh\tr4, [r3, r0]");
// core.exec_thumb(&mut mem, decoded).unwrap();
// assert_eq!(&mem.get_bytes(0x6)[..4], [0x78, 0x56, 0xaa, 0xbb]);
// // ldsb r2, [r7, r1]
// core.gpr[2] = 0;
// core.gpr[7] = 0x10;
// core.gpr[1] = 0x5;
// let decoded = ThumbInstruction::decode(0x567a, 0).unwrap();
// assert_eq!(format!("{}", decoded), "ldsb\tr2, [r7, r1]");
// core.exec_thumb(&mut mem, decoded).unwrap();
// assert_eq!(core.gpr[2], mem.read_8(0x15) as i8 as u32);
// // ldsh r3, [r4, r2]
// core.gpr[3] = 0x0;
// core.gpr[4] = 0x0;
// core.gpr[2] = 0x6;
// let decoded = ThumbInstruction::decode(0x5ea3, 0).unwrap();
// assert_eq!(format!("{}", decoded), "ldsh\tr3, [r4, r2]");
// core.exec_thumb(&mut mem, decoded).unwrap();
// assert_eq!(core.gpr[3], 0x5678);
// }
// }