2015-03-05 23:31:03 +01:00
// Copyright (c) 2012- PPSSPP Project.
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, version 2.0 or later versions.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License 2.0 for more details.
// A copy of the GPL 2.0 should have been included with the program.
// If not, see http://www.gnu.org/licenses/
// Official git repository and contact information can be found at
// https://github.com/hrydgard/ppsspp and http://www.ppsspp.org/.
2016-10-12 17:32:52 +02:00
# include "ppsspp_config.h"
# if PPSSPP_ARCH(ARM64)
2015-03-05 23:31:03 +01:00
# include "Core/MemMap.h"
# include "Core/Config.h"
# include "Core/MIPS/MIPS.h"
# include "Core/MIPS/MIPSAnalyst.h"
# include "Core/MIPS/MIPSCodeUtils.h"
# include "Core/MIPS/ARM64/Arm64Jit.h"
# include "Core/MIPS/ARM64/Arm64RegCache.h"
# define _RS MIPS_GET_RS(op)
# define _RT MIPS_GET_RT(op)
# define _RD MIPS_GET_RD(op)
# define _FS MIPS_GET_FS(op)
# define _FT MIPS_GET_FT(op)
# define _FD MIPS_GET_FD(op)
# define _SA MIPS_GET_SA(op)
# define _POS ((op>> 6) & 0x1F)
# define _SIZE ((op>>11) & 0x1F)
# define _IMM16 (signed short)(op & 0xFFFF)
# define _IMM26 (op & 0x03FFFFFF)
// All functions should have CONDITIONAL_DISABLE, so we can narrow things down to a file quickly.
// Currently known non working ones should have DISABLE.
2019-04-15 12:07:57 +02:00
//#define CONDITIONAL_DISABLE(flag) { Comp_Generic(op); return; }
2019-02-03 14:01:51 -08:00
# define CONDITIONAL_DISABLE(flag) if (jo.Disabled(JitDisable::flag)) { Comp_Generic(op); return; }
2015-03-05 23:31:03 +01:00
# define DISABLE { Comp_Generic(op); return; }
2015-07-02 20:30:40 -07:00
namespace MIPSComp {
2015-03-05 23:31:03 +01:00
using namespace Arm64Gen ;
using namespace Arm64JitConstants ;
2015-03-16 00:29:20 +01:00
// Destroys SCRATCH2
void Arm64Jit : : SetScratch1ToEffectiveAddress ( MIPSGPReg rs , s16 offset ) {
if ( offset ) {
ADDI2R ( SCRATCH1 , gpr . R ( rs ) , offset , SCRATCH2 ) ;
} else {
MOV ( SCRATCH1 , gpr . R ( rs ) ) ;
}
2019-04-15 12:07:57 +02:00
# ifdef MASKED_PSP_MEMORY
ANDI2R ( SCRATCH1 , SCRATCH1 , 0x3FFFFFFF ) ;
# endif
2015-03-16 00:29:20 +01:00
}
2015-06-26 23:47:37 -07:00
std : : vector < FixupBranch > Arm64Jit : : SetScratch1ForSafeAddress ( MIPSGPReg rs , s16 offset , ARM64Reg tempReg ) {
std : : vector < FixupBranch > skips ;
2015-03-16 00:29:20 +01:00
SetScratch1ToEffectiveAddress ( rs , offset ) ;
2015-06-26 23:47:37 -07:00
2015-06-26 23:58:34 -07:00
// We can do this a little smarter by shifting out the lower 8 bits, since blocks are 0x100 aligned.
// PSP_GetUserMemoryEnd() is dynamic, but the others encode to imms just fine.
// So we only need to safety check the one value.
2017-03-23 16:52:12 +01:00
// This is because ARM64 immediates for many instructions like CMP can only encode
// immediates up to 12 bits, shifted by 12 or not.
2015-06-26 23:58:34 -07:00
if ( ( PSP_GetUserMemoryEnd ( ) & 0x000FFFFF ) = = 0 ) {
2017-03-23 16:52:12 +01:00
// In other words, shift right 8, and kill off the top 4 bits as we don't want them involved in the ocmpares.
UBFX ( tempReg , SCRATCH1 , 8 , 24 - 4 ) ;
2015-06-26 23:58:34 -07:00
// Now check if we're higher than that.
CMPI2R ( tempReg , PSP_GetUserMemoryEnd ( ) > > 8 ) ;
} else {
2017-03-23 16:52:12 +01:00
// Compare first using the tempReg (need it because we have a full 28-bit value), then shift into it.
ANDI2R ( SCRATCH1 , SCRATCH1 , 0x0FFFFFFF ) ;
2015-06-26 23:58:34 -07:00
CMPI2R ( SCRATCH1 , PSP_GetUserMemoryEnd ( ) , tempReg ) ;
UBFX ( tempReg , SCRATCH1 , 8 , 24 ) ;
}
2015-06-26 23:47:37 -07:00
skips . push_back ( B ( CC_HS ) ) ;
// If its higher than memory start and we didn't skip yet, it must be good. Hurray.
2015-06-26 23:58:34 -07:00
CMPI2R ( tempReg , PSP_GetKernelMemoryBase ( ) > > 8 ) ;
2015-06-26 23:47:37 -07:00
FixupBranch inRAM = B ( CC_HS ) ;
// If we got here and it's higher, then it's between VRAM and RAM - skip.
2015-06-26 23:58:34 -07:00
CMPI2R ( tempReg , PSP_GetVidMemEnd ( ) > > 8 ) ;
2015-06-26 23:47:37 -07:00
skips . push_back ( B ( CC_HS ) ) ;
// And if it's higher the VRAM and we're still here again, it's in VRAM.
2015-06-26 23:58:34 -07:00
CMPI2R ( tempReg , PSP_GetVidMemBase ( ) > > 8 ) ;
2015-06-26 23:47:37 -07:00
FixupBranch inVRAM = B ( CC_HS ) ;
// Last gap, this is between SRAM and VRAM. Skip it.
2015-06-26 23:58:34 -07:00
CMPI2R ( tempReg , PSP_GetScratchpadMemoryEnd ( ) > > 8 ) ;
2015-06-26 23:47:37 -07:00
skips . push_back ( B ( CC_HS ) ) ;
// And for lower than SRAM, we just skip again.
2015-06-26 23:58:34 -07:00
CMPI2R ( tempReg , PSP_GetScratchpadMemoryBase ( ) > > 8 ) ;
2015-06-26 23:47:37 -07:00
skips . push_back ( B ( CC_LO ) ) ;
// At this point, we're either in SRAM (above) or in RAM/VRAM.
SetJumpTarget ( inRAM ) ;
SetJumpTarget ( inVRAM ) ;
return skips ;
2015-03-16 00:29:20 +01:00
}
2015-03-05 23:31:03 +01:00
void Arm64Jit : : Comp_ITypeMemLR ( MIPSOpcode op , bool load ) {
2019-02-03 14:01:51 -08:00
CONDITIONAL_DISABLE ( LSU ) ;
2018-05-01 22:18:33 -07:00
CheckMemoryBreakpoint ( ) ;
2023-04-05 17:16:51 -07:00
int offset = SignExtend16ToS32 ( op & 0xFFFF ) ;
2015-03-16 00:29:20 +01:00
MIPSGPReg rt = _RT ;
MIPSGPReg rs = _RS ;
int o = op > > 26 ;
2019-02-03 14:01:51 -08:00
if ( ! js . inDelaySlot & & ! jo . Disabled ( JitDisable : : LSU_UNALIGNED ) ) {
2015-03-16 00:29:20 +01:00
// Optimisation: Combine to single unaligned load/store
bool isLeft = ( o = = 34 | | o = = 42 ) ;
2018-05-01 22:18:33 -07:00
CheckMemoryBreakpoint ( 1 ) ;
2015-04-11 00:52:42 -07:00
MIPSOpcode nextOp = GetOffsetInstruction ( 1 ) ;
2015-03-16 00:29:20 +01:00
// Find a matching shift in opposite direction with opposite offset.
2015-07-11 16:25:22 +02:00
if ( nextOp = = ( isLeft ? ( op . encoding + ( 4 < < 26 ) - 3 ) : ( op . encoding - ( 4 < < 26 ) + 3 ) ) ) {
2015-03-16 00:29:20 +01:00
EatInstruction ( nextOp ) ;
nextOp = MIPSOpcode ( ( ( load ? 35 : 43 ) < < 26 ) | ( ( isLeft ? nextOp : op ) & 0x03FFFFFF ) ) ; //lw, sw
Comp_ITypeMem ( nextOp ) ;
return ;
}
}
u32 iaddr = gpr . IsImm ( rs ) ? offset + gpr . GetImm ( rs ) : 0xFFFFFFFF ;
2015-06-26 23:47:37 -07:00
std : : vector < FixupBranch > skips ;
2015-03-16 00:29:20 +01:00
if ( gpr . IsImm ( rs ) & & Memory : : IsValidAddress ( iaddr ) ) {
2019-04-15 12:07:57 +02:00
# ifdef MASKED_PSP_MEMORY
u32 addr = iaddr & 0x3FFFFFFF ;
# else
u32 addr = iaddr ;
# endif
2015-03-16 00:29:20 +01:00
// Need to initialize since this only loads part of the register.
// But rs no longer matters (even if rs == rt) since we have the address.
gpr . MapReg ( rt , load ? MAP_DIRTY : 0 ) ;
2019-04-15 12:07:57 +02:00
gpr . SetRegImm ( SCRATCH1 , addr & ~ 3 ) ;
2015-03-16 00:29:20 +01:00
2019-04-15 12:07:57 +02:00
u8 shift = ( addr & 3 ) * 8 ;
2015-03-16 00:29:20 +01:00
switch ( o ) {
case 34 : // lwl
LDR ( SCRATCH1 , MEMBASEREG , SCRATCH1 ) ;
2015-04-04 22:42:24 +02:00
ANDI2R ( gpr . R ( rt ) , gpr . R ( rt ) , 0x00ffffff > > shift , INVALID_REG ) ;
2015-03-16 00:29:20 +01:00
ORR ( gpr . R ( rt ) , gpr . R ( rt ) , SCRATCH1 , ArithOption ( gpr . R ( rt ) , ST_LSL , 24 - shift ) ) ;
break ;
case 38 : // lwr
LDR ( SCRATCH1 , MEMBASEREG , SCRATCH1 ) ;
2015-04-04 22:42:24 +02:00
ANDI2R ( gpr . R ( rt ) , gpr . R ( rt ) , 0xffffff00 < < ( 24 - shift ) , INVALID_REG ) ;
2015-03-16 00:29:20 +01:00
ORR ( gpr . R ( rt ) , gpr . R ( rt ) , SCRATCH1 , ArithOption ( gpr . R ( rt ) , ST_LSR , shift ) ) ;
break ;
case 42 : // swl
LDR ( SCRATCH2 , MEMBASEREG , SCRATCH1 ) ;
2015-04-04 22:42:24 +02:00
ANDI2R ( SCRATCH2 , SCRATCH2 , 0xffffff00 < < shift , INVALID_REG ) ;
2017-12-28 14:49:55 -08:00
ORR ( SCRATCH2 , SCRATCH2 , gpr . R ( rt ) , ArithOption ( gpr . R ( rt ) , ST_LSR , 24 - shift ) ) ;
2015-03-16 00:29:20 +01:00
STR ( SCRATCH2 , MEMBASEREG , SCRATCH1 ) ;
break ;
case 46 : // swr
LDR ( SCRATCH2 , MEMBASEREG , SCRATCH1 ) ;
2015-04-04 22:42:24 +02:00
ANDI2R ( SCRATCH2 , SCRATCH2 , 0x00ffffff > > ( 24 - shift ) , INVALID_REG ) ;
2017-12-28 14:49:55 -08:00
ORR ( SCRATCH2 , SCRATCH2 , gpr . R ( rt ) , ArithOption ( gpr . R ( rt ) , ST_LSL , shift ) ) ;
2015-03-16 00:29:20 +01:00
STR ( SCRATCH2 , MEMBASEREG , SCRATCH1 ) ;
break ;
}
return ;
}
2020-07-19 17:47:02 +02:00
_dbg_assert_msg_ ( ! gpr . IsImm ( rs ) , " Invalid immediate address %08x? CPU bug? " , iaddr ) ;
2015-03-25 22:20:06 +01:00
if ( load ) {
gpr . MapDirtyIn ( rt , rs , false ) ;
} else {
gpr . MapInIn ( rt , rs ) ;
}
2017-12-29 17:15:21 -08:00
gpr . SpillLock ( rt ) ;
gpr . SpillLock ( rs ) ;
// Need to get temps before skipping safe mem.
ARM64Reg LR_SCRATCH3 = gpr . GetAndLockTempR ( ) ;
2017-12-31 16:39:11 -08:00
ARM64Reg LR_SCRATCH4 = o = = 42 | | o = = 46 ? gpr . GetAndLockTempR ( ) : INVALID_REG ;
2015-03-25 22:20:06 +01:00
2017-12-29 17:15:21 -08:00
if ( ! g_Config . bFastMemory & & rs ! = MIPS_REG_SP ) {
2015-06-26 23:47:37 -07:00
skips = SetScratch1ForSafeAddress ( rs , offset , SCRATCH2 ) ;
2015-03-16 00:29:20 +01:00
} else {
SetScratch1ToEffectiveAddress ( rs , offset ) ;
}
// Here's our shift amount.
2015-03-25 22:20:06 +01:00
ANDI2R ( SCRATCH2 , SCRATCH1 , 3 ) ;
LSL ( SCRATCH2 , SCRATCH2 , 3 ) ;
2015-03-16 00:29:20 +01:00
// Now align the address for the actual read.
2015-03-25 22:20:06 +01:00
ANDI2R ( SCRATCH1 , SCRATCH1 , ~ 3U ) ;
2015-03-16 00:29:20 +01:00
switch ( o ) {
case 34 : // lwl
2015-03-25 22:20:06 +01:00
MOVI2R ( LR_SCRATCH3 , 0x00ffffff ) ;
2017-12-28 15:54:03 -08:00
LDR ( SCRATCH1 , MEMBASEREG , ArithOption ( SCRATCH1 ) ) ;
2015-03-25 22:20:06 +01:00
LSRV ( LR_SCRATCH3 , LR_SCRATCH3 , SCRATCH2 ) ;
AND ( gpr . R ( rt ) , gpr . R ( rt ) , LR_SCRATCH3 ) ;
NEG ( SCRATCH2 , SCRATCH2 ) ;
ADDI2R ( SCRATCH2 , SCRATCH2 , 24 ) ;
LSLV ( SCRATCH1 , SCRATCH1 , SCRATCH2 ) ;
ORR ( gpr . R ( rt ) , gpr . R ( rt ) , SCRATCH1 ) ;
2015-03-16 00:29:20 +01:00
break ;
case 38 : // lwr
2015-03-25 22:20:06 +01:00
MOVI2R ( LR_SCRATCH3 , 0xffffff00 ) ;
2017-12-28 15:54:03 -08:00
LDR ( SCRATCH1 , MEMBASEREG , ArithOption ( SCRATCH1 ) ) ;
2015-03-25 22:20:06 +01:00
LSRV ( SCRATCH1 , SCRATCH1 , SCRATCH2 ) ;
NEG ( SCRATCH2 , SCRATCH2 ) ;
ADDI2R ( SCRATCH2 , SCRATCH2 , 24 ) ;
LSLV ( LR_SCRATCH3 , LR_SCRATCH3 , SCRATCH2 ) ;
AND ( gpr . R ( rt ) , gpr . R ( rt ) , LR_SCRATCH3 ) ;
ORR ( gpr . R ( rt ) , gpr . R ( rt ) , SCRATCH1 ) ;
2015-03-16 00:29:20 +01:00
break ;
case 42 : // swl
2015-03-25 22:20:06 +01:00
MOVI2R ( LR_SCRATCH3 , 0xffffff00 ) ;
2017-12-28 15:54:03 -08:00
LDR ( LR_SCRATCH4 , MEMBASEREG , ArithOption ( SCRATCH1 ) ) ;
2015-03-25 22:20:06 +01:00
LSLV ( LR_SCRATCH3 , LR_SCRATCH3 , SCRATCH2 ) ;
AND ( LR_SCRATCH4 , LR_SCRATCH4 , LR_SCRATCH3 ) ;
NEG ( SCRATCH2 , SCRATCH2 ) ;
ADDI2R ( SCRATCH2 , SCRATCH2 , 24 ) ;
2017-12-28 15:54:03 -08:00
2015-03-25 22:20:06 +01:00
LSRV ( LR_SCRATCH3 , gpr . R ( rt ) , SCRATCH2 ) ;
ORR ( LR_SCRATCH4 , LR_SCRATCH4 , LR_SCRATCH3 ) ;
2017-12-28 15:54:03 -08:00
STR ( LR_SCRATCH4 , MEMBASEREG , ArithOption ( SCRATCH1 ) ) ;
2015-03-16 00:29:20 +01:00
break ;
case 46 : // swr
2015-03-25 22:20:06 +01:00
MOVI2R ( LR_SCRATCH3 , 0x00ffffff ) ;
2017-12-28 15:54:03 -08:00
LDR ( LR_SCRATCH4 , MEMBASEREG , ArithOption ( SCRATCH1 ) ) ;
2015-03-25 22:20:06 +01:00
NEG ( SCRATCH2 , SCRATCH2 ) ;
ADDI2R ( SCRATCH2 , SCRATCH2 , 24 ) ;
LSRV ( LR_SCRATCH3 , LR_SCRATCH3 , SCRATCH2 ) ;
AND ( LR_SCRATCH4 , LR_SCRATCH4 , LR_SCRATCH3 ) ;
NEG ( SCRATCH2 , SCRATCH2 ) ;
ADDI2R ( SCRATCH2 , SCRATCH2 , 24 ) ;
LSLV ( LR_SCRATCH3 , gpr . R ( rt ) , SCRATCH2 ) ;
ORR ( LR_SCRATCH4 , LR_SCRATCH4 , LR_SCRATCH3 ) ;
2017-12-28 15:54:03 -08:00
STR ( LR_SCRATCH4 , MEMBASEREG , ArithOption ( SCRATCH1 ) ) ;
2015-03-16 00:29:20 +01:00
break ;
}
2015-06-26 23:47:37 -07:00
for ( auto skip : skips ) {
2015-03-16 00:29:20 +01:00
SetJumpTarget ( skip ) ;
2015-03-25 22:20:06 +01:00
}
2017-12-28 15:54:03 -08:00
2017-12-30 00:31:46 -08:00
gpr . ReleaseSpillLocksAndDiscardTemps ( ) ;
2015-03-05 23:31:03 +01:00
}
void Arm64Jit : : Comp_ITypeMem ( MIPSOpcode op ) {
2019-02-03 14:01:51 -08:00
CONDITIONAL_DISABLE ( LSU ) ;
2018-05-01 22:18:33 -07:00
CheckMemoryBreakpoint ( ) ;
2015-03-21 18:29:30 +01:00
2023-04-05 17:16:51 -07:00
int offset = SignExtend16ToS32 ( op & 0xFFFF ) ;
2015-03-16 00:29:20 +01:00
bool load = false ;
MIPSGPReg rt = _RT ;
MIPSGPReg rs = _RS ;
int o = op > > 26 ;
if ( ( ( op > > 29 ) & 1 ) = = 0 & & rt = = MIPS_REG_ZERO ) {
// Don't load anything into $zr
return ;
}
u32 iaddr = gpr . IsImm ( rs ) ? offset + gpr . GetImm ( rs ) : 0xFFFFFFFF ;
2015-06-26 23:47:37 -07:00
std : : vector < FixupBranch > skips ;
2017-12-28 12:28:45 -08:00
ARM64Reg targetReg = INVALID_REG ;
ARM64Reg addrReg = INVALID_REG ;
2015-03-16 00:29:20 +01:00
2015-07-11 16:25:22 +02:00
int dataSize = 4 ;
switch ( o ) {
case 37 :
case 33 :
dataSize = 2 ;
break ;
case 36 :
case 32 :
dataSize = 1 ;
break ;
// Store
case 41 :
dataSize = 2 ;
break ;
case 40 :
dataSize = 1 ;
break ;
}
2015-03-16 00:29:20 +01:00
switch ( o ) {
case 32 : //lb
case 33 : //lh
case 35 : //lw
case 36 : //lbu
case 37 : //lhu
load = true ;
case 40 : //sb
case 41 : //sh
case 43 : //sw
2020-11-03 00:13:52 +01:00
# ifndef MASKED_PSP_MEMORY
2015-03-22 11:46:14 +01:00
if ( jo . cachePointers & & g_Config . bFastMemory ) {
// ARM has smaller load/store immediate displacements than MIPS, 12 bits - and some memory ops only have 8 bits.
int offsetRange = 0x3ff ;
if ( o = = 41 | | o = = 33 | | o = = 37 | | o = = 32 )
offsetRange = 0xff ; // 8 bit offset only
2015-07-11 16:25:22 +02:00
if ( ! gpr . IsImm ( rs ) & & rs ! = rt & & ( offset < = offsetRange ) & & offset > = 0 & &
( dataSize = = 1 | | ( offset & ( dataSize - 1 ) ) = = 0 ) ) { // Check that the offset is aligned to the access size as that's required for INDEX_UNSIGNED encodings. we can get here through fallback from lwl/lwr
2015-03-22 11:46:14 +01:00
gpr . SpillLock ( rs , rt ) ;
gpr . MapRegAsPointer ( rs ) ;
2015-07-11 22:12:41 +02:00
2017-12-28 12:28:45 -08:00
// For a store, try to avoid mapping a reg if not needed.
2017-12-30 00:31:46 -08:00
targetReg = load ? INVALID_REG : gpr . TryMapTempImm ( rt ) ;
2017-12-28 12:28:45 -08:00
if ( targetReg = = INVALID_REG ) {
2015-07-11 22:12:41 +02:00
gpr . MapReg ( rt , load ? MAP_NOINIT : 0 ) ;
2017-12-28 12:28:45 -08:00
targetReg = gpr . R ( rt ) ;
2015-07-11 22:12:41 +02:00
}
2020-11-03 00:13:52 +01:00
2015-03-22 11:46:14 +01:00
switch ( o ) {
2017-12-28 12:28:45 -08:00
case 35 : LDR ( INDEX_UNSIGNED , targetReg , gpr . RPtr ( rs ) , offset ) ; break ;
case 37 : LDRH ( INDEX_UNSIGNED , targetReg , gpr . RPtr ( rs ) , offset ) ; break ;
case 33 : LDRSH ( INDEX_UNSIGNED , targetReg , gpr . RPtr ( rs ) , offset ) ; break ;
case 36 : LDRB ( INDEX_UNSIGNED , targetReg , gpr . RPtr ( rs ) , offset ) ; break ;
case 32 : LDRSB ( INDEX_UNSIGNED , targetReg , gpr . RPtr ( rs ) , offset ) ; break ;
2015-03-22 11:46:14 +01:00
// Store
2017-12-28 12:28:45 -08:00
case 43 : STR ( INDEX_UNSIGNED , targetReg , gpr . RPtr ( rs ) , offset ) ; break ;
case 41 : STRH ( INDEX_UNSIGNED , targetReg , gpr . RPtr ( rs ) , offset ) ; break ;
case 40 : STRB ( INDEX_UNSIGNED , targetReg , gpr . RPtr ( rs ) , offset ) ; break ;
2015-03-22 11:46:14 +01:00
}
2017-12-30 00:31:46 -08:00
gpr . ReleaseSpillLocksAndDiscardTemps ( ) ;
2015-03-22 11:46:14 +01:00
break ;
}
}
2020-11-03 00:13:52 +01:00
# endif
2015-03-22 11:46:14 +01:00
2017-12-30 00:31:46 -08:00
if ( ! load & & gpr . IsImm ( rt ) & & gpr . TryMapTempImm ( rt ) ! = INVALID_REG ) {
2017-12-28 12:28:45 -08:00
// We're storing an immediate value, let's see if we can optimize rt.
2018-03-31 22:33:43 -07:00
if ( ! gpr . IsImm ( rs ) | | ! Memory : : IsValidAddress ( iaddr ) | | offset = = 0 ) {
2017-12-28 12:28:45 -08:00
// In this case, we're always going to need rs mapped, which may flush the temp imm.
// We handle that in the cases below since targetReg is INVALID_REG.
gpr . MapIn ( rs ) ;
}
2017-12-30 00:31:46 -08:00
targetReg = gpr . TryMapTempImm ( rt ) ;
2017-12-28 12:28:45 -08:00
}
2015-03-16 00:29:20 +01:00
if ( gpr . IsImm ( rs ) & & Memory : : IsValidAddress ( iaddr ) ) {
2019-04-15 12:07:57 +02:00
# ifdef MASKED_PSP_MEMORY
u32 addr = iaddr & 0x3FFFFFFF ;
# else
u32 addr = iaddr ;
# endif
if ( addr = = iaddr & & offset = = 0 ) {
2015-03-16 00:29:20 +01:00
// It was already safe. Let's shove it into a reg and use it directly.
2017-12-28 12:28:45 -08:00
if ( targetReg = = INVALID_REG ) {
load ? gpr . MapDirtyIn ( rt , rs ) : gpr . MapInIn ( rt , rs ) ;
targetReg = gpr . R ( rt ) ;
}
2015-03-16 00:29:20 +01:00
addrReg = gpr . R ( rs ) ;
} else {
2017-12-28 12:28:45 -08:00
// In this case, only map rt. rs+offset will be in SCRATCH1.
if ( targetReg = = INVALID_REG ) {
gpr . MapReg ( rt , load ? MAP_NOINIT : 0 ) ;
targetReg = gpr . R ( rt ) ;
}
2019-04-15 12:07:57 +02:00
gpr . SetRegImm ( SCRATCH1 , addr ) ;
2015-03-16 00:29:20 +01:00
addrReg = SCRATCH1 ;
}
} else {
2019-02-23 09:14:40 +01:00
// This gets hit in a few games, as a result of never-taken delay slots (some branch types
// conditionally execute the delay slot instructions). Ignore in those cases.
if ( ! js . inDelaySlot ) {
2020-07-19 17:47:02 +02:00
_dbg_assert_msg_ ( ! gpr . IsImm ( rs ) , " Invalid immediate address %08x? CPU bug? " , iaddr ) ;
2019-02-23 09:14:40 +01:00
}
2017-12-28 12:28:45 -08:00
// If we already have a targetReg, we optimized an imm, and rs is already mapped.
if ( targetReg = = INVALID_REG ) {
2019-02-23 09:14:40 +01:00
if ( load ) {
gpr . MapDirtyIn ( rt , rs ) ;
} else {
gpr . MapInIn ( rt , rs ) ;
}
2017-12-28 12:28:45 -08:00
targetReg = gpr . R ( rt ) ;
}
2015-03-16 00:29:20 +01:00
if ( ! g_Config . bFastMemory & & rs ! = MIPS_REG_SP ) {
2015-06-26 23:47:37 -07:00
skips = SetScratch1ForSafeAddress ( rs , offset , SCRATCH2 ) ;
2015-03-16 00:29:20 +01:00
} else {
SetScratch1ToEffectiveAddress ( rs , offset ) ;
}
addrReg = SCRATCH1 ;
}
switch ( o ) {
// Load
2017-12-28 12:28:45 -08:00
case 35 : LDR ( targetReg , MEMBASEREG , addrReg ) ; break ;
case 37 : LDRH ( targetReg , MEMBASEREG , addrReg ) ; break ;
case 33 : LDRSH ( targetReg , MEMBASEREG , addrReg ) ; break ;
case 36 : LDRB ( targetReg , MEMBASEREG , addrReg ) ; break ;
case 32 : LDRSB ( targetReg , MEMBASEREG , addrReg ) ; break ;
2015-03-16 00:29:20 +01:00
// Store
2017-12-28 12:28:45 -08:00
case 43 : STR ( targetReg , MEMBASEREG , addrReg ) ; break ;
case 41 : STRH ( targetReg , MEMBASEREG , addrReg ) ; break ;
case 40 : STRB ( targetReg , MEMBASEREG , addrReg ) ; break ;
2015-03-16 00:29:20 +01:00
}
2015-06-26 23:47:37 -07:00
for ( auto skip : skips ) {
SetJumpTarget ( skip ) ;
// TODO: Could clear to zero here on load, if skipping this for good reads.
}
2015-03-16 00:29:20 +01:00
break ;
case 34 : //lwl
case 38 : //lwr
load = true ;
case 42 : //swl
case 46 : //swr
2015-03-25 22:20:06 +01:00
Comp_ITypeMemLR ( op , load ) ;
2015-03-16 00:29:20 +01:00
break ;
default :
Comp_Generic ( op ) ;
return ;
}
2015-03-05 23:31:03 +01:00
}
2023-07-29 17:51:16 -07:00
void Arm64Jit : : Comp_StoreSync ( MIPSOpcode op ) {
CONDITIONAL_DISABLE ( LSU ) ;
DISABLE ;
}
2015-03-05 23:31:03 +01:00
void Arm64Jit : : Comp_Cache ( MIPSOpcode op ) {
2020-10-16 09:13:41 +02:00
CONDITIONAL_DISABLE ( LSU ) ;
2015-07-11 16:25:22 +02:00
int func = ( op > > 16 ) & 0x1F ;
2020-10-16 09:13:41 +02:00
// See Int_Cache for the definitions.
2015-07-11 16:25:22 +02:00
switch ( func ) {
2020-10-16 09:13:41 +02:00
case 24 : break ;
case 25 : break ;
case 27 : break ;
case 30 : break ;
2015-07-11 16:25:22 +02:00
default :
2020-10-16 09:13:41 +02:00
// Fall back to the interpreter.
2015-07-11 16:25:22 +02:00
DISABLE ;
}
2015-03-05 23:31:03 +01:00
}
}
2016-10-12 17:32:52 +02:00
# endif // PPSSPP_ARCH(ARM64)