mirror of
https://github.com/yuzu-mirror/yuzu
synced 2024-11-24 01:03:03 +00:00
x64: Refactor to remove fake interfaces and general cleanups.
This commit is contained in:
parent
cfb354f11f
commit
bd7e691f78
16 changed files with 52 additions and 666 deletions
|
@ -2,8 +2,8 @@
|
|||
configure_file("${CMAKE_CURRENT_SOURCE_DIR}/scm_rev.cpp.in" "${CMAKE_CURRENT_SOURCE_DIR}/scm_rev.cpp" @ONLY)
|
||||
|
||||
set(SRCS
|
||||
abi.cpp
|
||||
break_points.cpp
|
||||
cpu_detect.cpp
|
||||
emu_window.cpp
|
||||
file_util.cpp
|
||||
hash.cpp
|
||||
|
@ -22,7 +22,6 @@ set(SRCS
|
|||
)
|
||||
|
||||
set(HEADERS
|
||||
abi.h
|
||||
assert.h
|
||||
bit_field.h
|
||||
break_points.h
|
||||
|
@ -61,19 +60,14 @@ set(HEADERS
|
|||
vector_math.h
|
||||
)
|
||||
|
||||
if(_M_X86_64)
|
||||
if(ARCHITECTURE_X64)
|
||||
set(SRCS ${SRCS}
|
||||
cpu_detect_x86.cpp
|
||||
x64_emitter.cpp)
|
||||
x64/abi.cpp
|
||||
x64/emitter.cpp)
|
||||
|
||||
set(HEADERS ${HEADERS}
|
||||
x64_emitter.h)
|
||||
else()
|
||||
set(SRCS ${SRCS}
|
||||
cpu_detect_generic.cpp)
|
||||
|
||||
set(HEADERS ${HEADERS}
|
||||
fake_emitter.h)
|
||||
x64/abi.h
|
||||
x64/emitter.h)
|
||||
endif()
|
||||
|
||||
create_directory_groups(${SRCS} ${HEADERS})
|
||||
|
|
|
@ -35,7 +35,7 @@
|
|||
|
||||
#ifndef _MSC_VER
|
||||
|
||||
#if defined(__x86_64__) || defined(_M_X86_64)
|
||||
#if defined(__x86_64__) || defined(ARCHITECTURE_X64)
|
||||
#define Crash() __asm__ __volatile__("int $3")
|
||||
#elif defined(_M_ARM)
|
||||
#define Crash() __asm__ __volatile__("trap")
|
||||
|
|
|
@ -62,7 +62,7 @@ CPUInfo::CPUInfo() {
|
|||
// Detects the various CPU features
|
||||
void CPUInfo::Detect() {
|
||||
memset(this, 0, sizeof(*this));
|
||||
#ifdef _M_X86_64
|
||||
#ifdef ARCHITECTURE_X64
|
||||
Mode64bit = true;
|
||||
OS64bit = true;
|
||||
#endif
|
|
@ -1,19 +0,0 @@
|
|||
// Copyright 2014 Dolphin Emulator Project
|
||||
// Licensed under GPLv2+
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#include "cpu_detect.h"
|
||||
#include "hash.h"
|
||||
|
||||
namespace Common {
|
||||
|
||||
CPUInfo cpu_info;
|
||||
|
||||
CPUInfo::CPUInfo() {
|
||||
}
|
||||
|
||||
std::string CPUInfo::Summarize() {
|
||||
return "Generic";
|
||||
}
|
||||
|
||||
} // namespace Common
|
|
@ -1,465 +0,0 @@
|
|||
// Copyright (C) 2003 Dolphin Project.
|
||||
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, version 2.0.
|
||||
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License 2.0 for more details.
|
||||
|
||||
// A copy of the GPL 2.0 should have been included with the program.
|
||||
// If not, see http://www.gnu.org/licenses/
|
||||
|
||||
// Official SVN repository and contact information can be found at
|
||||
// http://code.google.com/p/dolphin-emu/
|
||||
|
||||
// WARNING - THIS LIBRARY IS NOT THREAD SAFE!!!
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <vector>
|
||||
#include <stdint.h>
|
||||
|
||||
#include "assert.h"
|
||||
#include "common_types.h"
|
||||
|
||||
// TODO: Check if Pandora still needs signal.h/kill here. Symbian doesn't.
|
||||
|
||||
// VCVT flags
|
||||
#define TO_FLOAT 0
|
||||
#define TO_INT 1 << 0
|
||||
#define IS_SIGNED 1 << 1
|
||||
#define ROUND_TO_ZERO 1 << 2
|
||||
|
||||
namespace FakeGen
|
||||
{
|
||||
enum FakeReg
|
||||
{
|
||||
// GPRs
|
||||
R0 = 0, R1, R2, R3, R4, R5,
|
||||
R6, R7, R8, R9, R10, R11,
|
||||
|
||||
// SPRs
|
||||
// R13 - R15 are SP, LR, and PC.
|
||||
// Almost always referred to by name instead of register number
|
||||
R12 = 12, R13 = 13, R14 = 14, R15 = 15,
|
||||
R_IP = 12, R_SP = 13, R_LR = 14, R_PC = 15,
|
||||
|
||||
|
||||
// VFP single precision registers
|
||||
S0, S1, S2, S3, S4, S5, S6,
|
||||
S7, S8, S9, S10, S11, S12, S13,
|
||||
S14, S15, S16, S17, S18, S19, S20,
|
||||
S21, S22, S23, S24, S25, S26, S27,
|
||||
S28, S29, S30, S31,
|
||||
|
||||
// VFP Double Precision registers
|
||||
D0, D1, D2, D3, D4, D5, D6, D7,
|
||||
D8, D9, D10, D11, D12, D13, D14, D15,
|
||||
D16, D17, D18, D19, D20, D21, D22, D23,
|
||||
D24, D25, D26, D27, D28, D29, D30, D31,
|
||||
|
||||
// ASIMD Quad-Word registers
|
||||
Q0, Q1, Q2, Q3, Q4, Q5, Q6, Q7,
|
||||
Q8, Q9, Q10, Q11, Q12, Q13, Q14, Q15,
|
||||
|
||||
// for NEON VLD/VST instructions
|
||||
REG_UPDATE = R13,
|
||||
INVALID_REG = 0xFFFFFFFF
|
||||
};
|
||||
|
||||
enum CCFlags
|
||||
{
|
||||
CC_EQ = 0, // Equal
|
||||
CC_NEQ, // Not equal
|
||||
CC_CS, // Carry Set
|
||||
CC_CC, // Carry Clear
|
||||
CC_MI, // Minus (Negative)
|
||||
CC_PL, // Plus
|
||||
CC_VS, // Overflow
|
||||
CC_VC, // No Overflow
|
||||
CC_HI, // Unsigned higher
|
||||
CC_LS, // Unsigned lower or same
|
||||
CC_GE, // Signed greater than or equal
|
||||
CC_LT, // Signed less than
|
||||
CC_GT, // Signed greater than
|
||||
CC_LE, // Signed less than or equal
|
||||
CC_AL, // Always (unconditional) 14
|
||||
CC_HS = CC_CS, // Alias of CC_CS Unsigned higher or same
|
||||
CC_LO = CC_CC, // Alias of CC_CC Unsigned lower
|
||||
};
|
||||
const u32 NO_COND = 0xE0000000;
|
||||
|
||||
enum ShiftType
|
||||
{
|
||||
ST_LSL = 0,
|
||||
ST_ASL = 0,
|
||||
ST_LSR = 1,
|
||||
ST_ASR = 2,
|
||||
ST_ROR = 3,
|
||||
ST_RRX = 4
|
||||
};
|
||||
enum IntegerSize
|
||||
{
|
||||
I_I8 = 0,
|
||||
I_I16,
|
||||
I_I32,
|
||||
I_I64
|
||||
};
|
||||
|
||||
enum
|
||||
{
|
||||
NUMGPRs = 13,
|
||||
};
|
||||
|
||||
class FakeXEmitter;
|
||||
|
||||
enum OpType
|
||||
{
|
||||
TYPE_IMM = 0,
|
||||
TYPE_REG,
|
||||
TYPE_IMMSREG,
|
||||
TYPE_RSR,
|
||||
TYPE_MEM
|
||||
};
|
||||
|
||||
// This is no longer a proper operand2 class. Need to split up.
|
||||
class Operand2
|
||||
{
|
||||
friend class FakeXEmitter;
|
||||
protected:
|
||||
u32 Value;
|
||||
|
||||
private:
|
||||
OpType Type;
|
||||
|
||||
// IMM types
|
||||
u8 Rotation; // Only for u8 values
|
||||
|
||||
// Register types
|
||||
u8 IndexOrShift;
|
||||
ShiftType Shift;
|
||||
public:
|
||||
OpType GetType()
|
||||
{
|
||||
return Type;
|
||||
}
|
||||
Operand2() {}
|
||||
Operand2(u32 imm, OpType type = TYPE_IMM)
|
||||
{
|
||||
Type = type;
|
||||
Value = imm;
|
||||
Rotation = 0;
|
||||
}
|
||||
|
||||
Operand2(FakeReg Reg)
|
||||
{
|
||||
Type = TYPE_REG;
|
||||
Value = Reg;
|
||||
Rotation = 0;
|
||||
}
|
||||
Operand2(u8 imm, u8 rotation)
|
||||
{
|
||||
Type = TYPE_IMM;
|
||||
Value = imm;
|
||||
Rotation = rotation;
|
||||
}
|
||||
Operand2(FakeReg base, ShiftType type, FakeReg shift) // RSR
|
||||
{
|
||||
Type = TYPE_RSR;
|
||||
ASSERT_MSG(type != ST_RRX, "Invalid Operand2: RRX does not take a register shift amount");
|
||||
IndexOrShift = shift;
|
||||
Shift = type;
|
||||
Value = base;
|
||||
}
|
||||
|
||||
Operand2(FakeReg base, ShiftType type, u8 shift)// For IMM shifted register
|
||||
{
|
||||
if(shift == 32) shift = 0;
|
||||
switch (type)
|
||||
{
|
||||
case ST_LSL:
|
||||
ASSERT_MSG(shift < 32, "Invalid Operand2: LSL %u", shift);
|
||||
break;
|
||||
case ST_LSR:
|
||||
ASSERT_MSG(shift <= 32, "Invalid Operand2: LSR %u", shift);
|
||||
if (!shift)
|
||||
type = ST_LSL;
|
||||
if (shift == 32)
|
||||
shift = 0;
|
||||
break;
|
||||
case ST_ASR:
|
||||
ASSERT_MSG(shift < 32, "Invalid Operand2: ASR %u", shift);
|
||||
if (!shift)
|
||||
type = ST_LSL;
|
||||
if (shift == 32)
|
||||
shift = 0;
|
||||
break;
|
||||
case ST_ROR:
|
||||
ASSERT_MSG(shift < 32, "Invalid Operand2: ROR %u", shift);
|
||||
if (!shift)
|
||||
type = ST_LSL;
|
||||
break;
|
||||
case ST_RRX:
|
||||
ASSERT_MSG(shift == 0, "Invalid Operand2: RRX does not take an immediate shift amount");
|
||||
type = ST_ROR;
|
||||
break;
|
||||
}
|
||||
IndexOrShift = shift;
|
||||
Shift = type;
|
||||
Value = base;
|
||||
Type = TYPE_IMMSREG;
|
||||
}
|
||||
u32 GetData()
|
||||
{
|
||||
switch(Type)
|
||||
{
|
||||
case TYPE_IMM:
|
||||
return Imm12Mod(); // This'll need to be changed later
|
||||
case TYPE_REG:
|
||||
return Rm();
|
||||
case TYPE_IMMSREG:
|
||||
return IMMSR();
|
||||
case TYPE_RSR:
|
||||
return RSR();
|
||||
default:
|
||||
ASSERT_MSG(false, "GetData with Invalid Type");
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
u32 IMMSR() // IMM shifted register
|
||||
{
|
||||
ASSERT_MSG(Type == TYPE_IMMSREG, "IMMSR must be imm shifted register");
|
||||
return ((IndexOrShift & 0x1f) << 7 | (Shift << 5) | Value);
|
||||
}
|
||||
u32 RSR() // Register shifted register
|
||||
{
|
||||
ASSERT_MSG(Type == TYPE_RSR, "RSR must be RSR Of Course");
|
||||
return (IndexOrShift << 8) | (Shift << 5) | 0x10 | Value;
|
||||
}
|
||||
u32 Rm()
|
||||
{
|
||||
ASSERT_MSG(Type == TYPE_REG, "Rm must be with Reg");
|
||||
return Value;
|
||||
}
|
||||
|
||||
u32 Imm5()
|
||||
{
|
||||
ASSERT_MSG((Type == TYPE_IMM), "Imm5 not IMM value");
|
||||
return ((Value & 0x0000001F) << 7);
|
||||
}
|
||||
u32 Imm8()
|
||||
{
|
||||
ASSERT_MSG((Type == TYPE_IMM), "Imm8Rot not IMM value");
|
||||
return Value & 0xFF;
|
||||
}
|
||||
u32 Imm8Rot() // IMM8 with Rotation
|
||||
{
|
||||
ASSERT_MSG((Type == TYPE_IMM), "Imm8Rot not IMM value");
|
||||
ASSERT_MSG((Rotation & 0xE1) != 0, "Invalid Operand2: immediate rotation %u", Rotation);
|
||||
return (1 << 25) | (Rotation << 7) | (Value & 0x000000FF);
|
||||
}
|
||||
u32 Imm12()
|
||||
{
|
||||
ASSERT_MSG((Type == TYPE_IMM), "Imm12 not IMM");
|
||||
return (Value & 0x00000FFF);
|
||||
}
|
||||
|
||||
u32 Imm12Mod()
|
||||
{
|
||||
// This is an IMM12 with the top four bits being rotation and the
|
||||
// bottom eight being an IMM. This is for instructions that need to
|
||||
// expand a 8bit IMM to a 32bit value and gives you some rotation as
|
||||
// well.
|
||||
// Each rotation rotates to the right by 2 bits
|
||||
ASSERT_MSG((Type == TYPE_IMM), "Imm12Mod not IMM");
|
||||
return ((Rotation & 0xF) << 8) | (Value & 0xFF);
|
||||
}
|
||||
u32 Imm16()
|
||||
{
|
||||
ASSERT_MSG((Type == TYPE_IMM), "Imm16 not IMM");
|
||||
return ( (Value & 0xF000) << 4) | (Value & 0x0FFF);
|
||||
}
|
||||
u32 Imm16Low()
|
||||
{
|
||||
return Imm16();
|
||||
}
|
||||
u32 Imm16High() // Returns high 16bits
|
||||
{
|
||||
ASSERT_MSG((Type == TYPE_IMM), "Imm16 not IMM");
|
||||
return ( ((Value >> 16) & 0xF000) << 4) | ((Value >> 16) & 0x0FFF);
|
||||
}
|
||||
u32 Imm24()
|
||||
{
|
||||
ASSERT_MSG((Type == TYPE_IMM), "Imm16 not IMM");
|
||||
return (Value & 0x0FFFFFFF);
|
||||
}
|
||||
};
|
||||
|
||||
// Use these when you don't know if an imm can be represented as an operand2.
|
||||
// This lets you generate both an optimal and a fallback solution by checking
|
||||
// the return value, which will be false if these fail to find a Operand2 that
|
||||
// represents your 32-bit imm value.
|
||||
bool TryMakeOperand2(u32 imm, Operand2 &op2);
|
||||
bool TryMakeOperand2_AllowInverse(u32 imm, Operand2 &op2, bool *inverse);
|
||||
bool TryMakeOperand2_AllowNegation(s32 imm, Operand2 &op2, bool *negated);
|
||||
|
||||
// Use this only when you know imm can be made into an Operand2.
|
||||
Operand2 AssumeMakeOperand2(u32 imm);
|
||||
|
||||
inline Operand2 R(FakeReg Reg) { return Operand2(Reg, TYPE_REG); }
|
||||
inline Operand2 IMM(u32 Imm) { return Operand2(Imm, TYPE_IMM); }
|
||||
inline Operand2 Mem(void *ptr) { return Operand2((u32)(uintptr_t)ptr, TYPE_IMM); }
|
||||
//usage: struct {int e;} s; STRUCT_OFFSET(s,e)
|
||||
#define STRUCT_OFF(str,elem) ((u32)((u32)&(str).elem-(u32)&(str)))
|
||||
|
||||
|
||||
struct FixupBranch
|
||||
{
|
||||
u8 *ptr;
|
||||
u32 condition; // Remembers our codition at the time
|
||||
int type; //0 = B 1 = BL
|
||||
};
|
||||
|
||||
typedef const u8* JumpTarget;
|
||||
|
||||
// XXX: Stop polluting the global namespace
|
||||
const u32 I_8 = (1 << 0);
|
||||
const u32 I_16 = (1 << 1);
|
||||
const u32 I_32 = (1 << 2);
|
||||
const u32 I_64 = (1 << 3);
|
||||
const u32 I_SIGNED = (1 << 4);
|
||||
const u32 I_UNSIGNED = (1 << 5);
|
||||
const u32 F_32 = (1 << 6);
|
||||
const u32 I_POLYNOMIAL = (1 << 7); // Only used in VMUL/VMULL
|
||||
|
||||
u32 EncodeVd(FakeReg Vd);
|
||||
u32 EncodeVn(FakeReg Vn);
|
||||
u32 EncodeVm(FakeReg Vm);
|
||||
|
||||
u32 encodedSize(u32 value);
|
||||
|
||||
// Subtracts the base from the register to give us the real one
|
||||
FakeReg SubBase(FakeReg Reg);
|
||||
|
||||
// See A.7.1 in the Fakev7-A
|
||||
// VMUL F32 scalars can only be up to D15[0], D15[1] - higher scalars cannot be individually addressed
|
||||
FakeReg DScalar(FakeReg dreg, int subScalar);
|
||||
FakeReg QScalar(FakeReg qreg, int subScalar);
|
||||
|
||||
enum NEONAlignment {
|
||||
ALIGN_NONE = 0,
|
||||
ALIGN_64 = 1,
|
||||
ALIGN_128 = 2,
|
||||
ALIGN_256 = 3
|
||||
};
|
||||
|
||||
|
||||
class NEONXEmitter;
|
||||
|
||||
class FakeXEmitter
|
||||
{
|
||||
friend struct OpArg; // for Write8 etc
|
||||
private:
|
||||
u8 *code, *startcode;
|
||||
u8 *lastCacheFlushEnd;
|
||||
u32 condition;
|
||||
|
||||
protected:
|
||||
inline void Write32(u32 value) {*(u32*)code = value; code+=4;}
|
||||
|
||||
public:
|
||||
FakeXEmitter() : code(0), startcode(0), lastCacheFlushEnd(0) {
|
||||
condition = CC_AL << 28;
|
||||
}
|
||||
FakeXEmitter(u8 *code_ptr) {
|
||||
code = code_ptr;
|
||||
lastCacheFlushEnd = code_ptr;
|
||||
startcode = code_ptr;
|
||||
condition = CC_AL << 28;
|
||||
}
|
||||
virtual ~FakeXEmitter() {}
|
||||
|
||||
void SetCodePtr(u8 *ptr) {}
|
||||
void ReserveCodeSpace(u32 bytes) {}
|
||||
const u8 *AlignCode16() { return nullptr; }
|
||||
const u8 *AlignCodePage() { return nullptr; }
|
||||
const u8 *GetCodePtr() const { return nullptr; }
|
||||
void FlushIcache() {}
|
||||
void FlushIcacheSection(u8 *start, u8 *end) {}
|
||||
u8 *GetWritableCodePtr() { return nullptr; }
|
||||
|
||||
CCFlags GetCC() { return CCFlags(condition >> 28); }
|
||||
void SetCC(CCFlags cond = CC_AL) {}
|
||||
|
||||
// Special purpose instructions
|
||||
|
||||
// Do nothing
|
||||
void NOP(int count = 1) {} //nop padding - TODO: fast nop slides, for amd and intel (check their manuals)
|
||||
|
||||
#ifdef CALL
|
||||
#undef CALL
|
||||
#endif
|
||||
|
||||
void QuickCallFunction(FakeReg scratchreg, const void *func);
|
||||
template <typename T> void QuickCallFunction(FakeReg scratchreg, T func) {
|
||||
QuickCallFunction(scratchreg, (const void *)func);
|
||||
}
|
||||
}; // class FakeXEmitter
|
||||
|
||||
|
||||
// Everything that needs to generate machine code should inherit from this.
|
||||
// You get memory management for free, plus, you can use all the MOV etc functions without
|
||||
// having to prefix them with gen-> or something similar.
|
||||
class FakeXCodeBlock : public FakeXEmitter
|
||||
{
|
||||
protected:
|
||||
u8 *region;
|
||||
size_t region_size;
|
||||
|
||||
public:
|
||||
FakeXCodeBlock() : region(NULL), region_size(0) {}
|
||||
virtual ~FakeXCodeBlock() { if (region) FreeCodeSpace(); }
|
||||
|
||||
// Call this before you generate any code.
|
||||
void AllocCodeSpace(int size) { }
|
||||
|
||||
// Always clear code space with breakpoints, so that if someone accidentally executes
|
||||
// uninitialized, it just breaks into the debugger.
|
||||
void ClearCodeSpace() { }
|
||||
|
||||
// Call this when shutting down. Don't rely on the destructor, even though it'll do the job.
|
||||
void FreeCodeSpace() { }
|
||||
|
||||
bool IsInSpace(const u8 *ptr) const
|
||||
{
|
||||
return ptr >= region && ptr < region + region_size;
|
||||
}
|
||||
|
||||
// Cannot currently be undone. Will write protect the entire code region.
|
||||
// Start over if you need to change the code (call FreeCodeSpace(), AllocCodeSpace()).
|
||||
void WriteProtect() { }
|
||||
void UnWriteProtect() { }
|
||||
|
||||
void ResetCodePtr()
|
||||
{
|
||||
SetCodePtr(region);
|
||||
}
|
||||
|
||||
size_t GetSpaceLeft() const
|
||||
{
|
||||
return region_size - (GetCodePtr() - region);
|
||||
}
|
||||
|
||||
u8 *GetBasePtr() {
|
||||
return region;
|
||||
}
|
||||
|
||||
size_t GetOffset(const u8 *ptr) const {
|
||||
return ptr - region;
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace
|
|
@ -27,7 +27,7 @@
|
|||
////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
// Platform detection
|
||||
|
||||
#if defined(__x86_64__) || defined(_M_X86_64) || defined(__aarch64__)
|
||||
#if defined(__x86_64__) || defined(ARCHITECTURE_X64) || defined(__aarch64__)
|
||||
#define EMU_ARCH_BITS 64
|
||||
#elif defined(__i386) || defined(_M_IX86) || defined(__arm__) || defined(_M_ARM)
|
||||
#define EMU_ARCH_BITS 32
|
||||
|
|
|
@ -15,8 +15,8 @@
|
|||
// Official SVN repository and contact information can be found at
|
||||
// http://code.google.com/p/dolphin-emu/
|
||||
|
||||
#include "x64_emitter.h"
|
||||
#include "abi.h"
|
||||
#include "emitter.h"
|
||||
|
||||
using namespace Gen;
|
||||
|
||||
|
@ -27,7 +27,7 @@ void XEmitter::ABI_EmitPrologue(int maxCallParams)
|
|||
{
|
||||
#ifdef _M_IX86
|
||||
// Don't really need to do anything
|
||||
#elif defined(_M_X86_64)
|
||||
#elif defined(ARCHITECTURE_X64)
|
||||
#if _WIN32
|
||||
int stacksize = ((maxCallParams + 1) & ~1) * 8 + 8;
|
||||
// Set up a stack frame so that we can call functions
|
||||
|
@ -43,7 +43,7 @@ void XEmitter::ABI_EmitEpilogue(int maxCallParams)
|
|||
{
|
||||
#ifdef _M_IX86
|
||||
RET();
|
||||
#elif defined(_M_X86_64)
|
||||
#elif defined(ARCHITECTURE_X64)
|
||||
#ifdef _WIN32
|
||||
int stacksize = ((maxCallParams+1)&~1)*8 + 8;
|
||||
ADD(64, R(RSP), Imm8(stacksize));
|
|
@ -17,7 +17,7 @@
|
|||
|
||||
#pragma once
|
||||
|
||||
#include "common_types.h"
|
||||
#include "common/common_types.h"
|
||||
|
||||
// x86/x64 ABI:s, and helpers to help follow them when JIT-ing code.
|
||||
// All convensions return values in EAX (+ possibly EDX).
|
||||
|
@ -55,7 +55,7 @@
|
|||
// 32-bit bog standard cdecl, shared between linux and windows
|
||||
// MacOSX 32-bit is same as System V with a few exceptions that we probably don't care much about.
|
||||
|
||||
#elif _M_X86_64 // 64 bit calling convention
|
||||
#elif ARCHITECTURE_X64 // 64 bit calling convention
|
||||
|
||||
#ifdef _WIN32 // 64-bit Windows - the really exotic calling convention
|
||||
|
|
@ -17,13 +17,13 @@
|
|||
|
||||
#include <cstring>
|
||||
|
||||
#include "logging/log.h"
|
||||
#include "common/assert.h"
|
||||
#include "common/cpu_detect.h"
|
||||
#include "common/logging/log.h"
|
||||
#include "common/memory_util.h"
|
||||
|
||||
#include "assert.h"
|
||||
#include "x64_emitter.h"
|
||||
#include "abi.h"
|
||||
#include "cpu_detect.h"
|
||||
#include "memory_util.h"
|
||||
#include "emitter.h"
|
||||
|
||||
#define PRIx64 "llx"
|
||||
|
||||
|
@ -164,7 +164,7 @@ void XEmitter::WriteSIB(int scale, int index, int base)
|
|||
void OpArg::WriteRex(XEmitter *emit, int opBits, int bits, int customOp) const
|
||||
{
|
||||
if (customOp == -1) customOp = operandReg;
|
||||
#ifdef _M_X86_64
|
||||
#ifdef ARCHITECTURE_X64
|
||||
u8 op = 0x40;
|
||||
// REX.W (whether operation is a 64-bit operation)
|
||||
if (opBits == 64) op |= 8;
|
||||
|
@ -236,7 +236,7 @@ void OpArg::WriteRest(XEmitter *emit, int extraBytes, X64Reg _operandReg,
|
|||
_offsetOrBaseReg = 5;
|
||||
emit->WriteModRM(0, _operandReg, _offsetOrBaseReg);
|
||||
//TODO : add some checks
|
||||
#ifdef _M_X86_64
|
||||
#ifdef ARCHITECTURE_X64
|
||||
u64 ripAddr = (u64)emit->GetCodePtr() + 4 + extraBytes;
|
||||
s64 distance = (s64)offset - (s64)ripAddr;
|
||||
ASSERT_MSG(
|
||||
|
@ -1463,7 +1463,7 @@ void XEmitter::MOVD_xmm(const OpArg &arg, X64Reg src) {WriteSSEOp(0x66, 0x7E, sr
|
|||
|
||||
void XEmitter::MOVQ_xmm(X64Reg dest, OpArg arg)
|
||||
{
|
||||
#ifdef _M_X86_64
|
||||
#ifdef ARCHITECTURE_X64
|
||||
// Alternate encoding
|
||||
// This does not display correctly in MSVC's debugger, it thinks it's a MOVD
|
||||
arg.operandReg = dest;
|
|
@ -17,11 +17,11 @@
|
|||
|
||||
#pragma once
|
||||
|
||||
#include "assert.h"
|
||||
#include "common_types.h"
|
||||
#include "code_block.h"
|
||||
#include "common/assert.h"
|
||||
#include "common/common_types.h"
|
||||
#include "common/code_block.h"
|
||||
|
||||
#if defined(_M_X86_64) && !defined(_ARCH_64)
|
||||
#if defined(ARCHITECTURE_X64) && !defined(_ARCH_64)
|
||||
#define _ARCH_64
|
||||
#endif
|
||||
|
|
@ -13,7 +13,6 @@ set(SRCS
|
|||
rasterizer.cpp
|
||||
shader/shader.cpp
|
||||
shader/shader_interpreter.cpp
|
||||
shader/shader_jit.cpp
|
||||
utils.cpp
|
||||
video_core.cpp
|
||||
)
|
||||
|
@ -39,17 +38,16 @@ set(HEADERS
|
|||
renderer_base.h
|
||||
shader/shader.h
|
||||
shader/shader_interpreter.h
|
||||
shader/shader_jit.h
|
||||
utils.h
|
||||
video_core.h
|
||||
)
|
||||
|
||||
if(_M_X86_64)
|
||||
if(ARCHITECTURE_X64)
|
||||
set(SRCS ${SRCS}
|
||||
shader/shader_jit_x64.cpp)
|
||||
else()
|
||||
set(SRCS ${SRCS}
|
||||
shader/shader_jit_fake.cpp)
|
||||
|
||||
set(HEADERS ${HEADERS}
|
||||
shader/shader_jit_x64.h)
|
||||
endif()
|
||||
|
||||
create_directory_groups(${SRCS} ${HEADERS})
|
||||
|
|
|
@ -15,7 +15,10 @@
|
|||
|
||||
#include "shader.h"
|
||||
#include "shader_interpreter.h"
|
||||
#include "shader_jit.h"
|
||||
|
||||
#ifdef ARCHITECTURE_X64
|
||||
#include "shader_jit_x64.h"
|
||||
#endif // ARCHITECTURE_X64
|
||||
|
||||
namespace Pica {
|
||||
|
||||
|
@ -43,7 +46,7 @@ void Setup(UnitState& state) {
|
|||
jit_shader = jit.Compile();
|
||||
shader_map.emplace(cache_key, jit_shader);
|
||||
}
|
||||
}
|
||||
#endif // ARCHITECTURE_X64
|
||||
}
|
||||
|
||||
void Shutdown() {
|
||||
|
@ -92,7 +95,7 @@ OutputVertex Run(UnitState& state, const InputVertex& input, int num_attributes)
|
|||
RunInterpreter(state);
|
||||
#else
|
||||
RunInterpreter(state);
|
||||
#endif
|
||||
#endif // ARCHITECTURE_X64
|
||||
|
||||
#if PICA_DUMP_SHADERS
|
||||
DebugUtils::DumpShader(setup.program_code.data(), state.debug.max_offset, setup.swizzle_data.data(),
|
||||
|
|
|
@ -1,36 +0,0 @@
|
|||
// Copyright 2015 Citra Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#include "video_core/pica.h"
|
||||
|
||||
#include "shader.h"
|
||||
#include "shader_jit.h"
|
||||
|
||||
namespace Pica {
|
||||
|
||||
namespace Shader {
|
||||
|
||||
JitShader::JitShader() : jitted(nullptr) {
|
||||
}
|
||||
|
||||
void JitShader::DoJit(JitCompiler& jit) {
|
||||
jitted = jit.Compile();
|
||||
}
|
||||
|
||||
void JitShader::Run(UnitState& state) {
|
||||
if (jitted)
|
||||
jitted(&state);
|
||||
}
|
||||
|
||||
JitCompiler::JitCompiler() {
|
||||
AllocCodeSpace(1024 * 1024 * 4);
|
||||
}
|
||||
|
||||
void JitCompiler::Clear() {
|
||||
ClearCodeSpace();
|
||||
}
|
||||
|
||||
} // namespace Shader
|
||||
|
||||
} // namespace Pica
|
|
@ -1,91 +0,0 @@
|
|||
// Copyright 2015 Citra Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#include "common/fake_emitter.h"
|
||||
|
||||
#include "video_core/shader/shader.h"
|
||||
#include "video_core/shader/shader_jit.h"
|
||||
|
||||
namespace Pica {
|
||||
|
||||
namespace Shader {
|
||||
|
||||
using namespace FakeGen;
|
||||
|
||||
void Jit::Comp_ADD(Instruction instr) {
|
||||
}
|
||||
|
||||
void Jit::Comp_DP3(Instruction instr) {
|
||||
}
|
||||
|
||||
void Jit::Comp_DP4(Instruction instr) {
|
||||
}
|
||||
|
||||
void Jit::Comp_MUL(Instruction instr) {
|
||||
}
|
||||
|
||||
void Jit::Comp_FLR(Instruction instr) {
|
||||
}
|
||||
|
||||
void Jit::Comp_MAX(Instruction instr) {
|
||||
}
|
||||
|
||||
void Jit::Comp_MIN(Instruction instr) {
|
||||
}
|
||||
|
||||
void Jit::Comp_MOVA(Instruction instr) {
|
||||
}
|
||||
|
||||
void Jit::Comp_MOV(Instruction instr) {
|
||||
}
|
||||
|
||||
void Jit::Comp_SLTI(Instruction instr) {
|
||||
}
|
||||
|
||||
void Jit::Comp_RCP(Instruction instr) {
|
||||
}
|
||||
|
||||
void Jit::Comp_RSQ(Instruction instr) {
|
||||
}
|
||||
|
||||
void Jit::Comp_NOP(Instruction instr) {
|
||||
}
|
||||
|
||||
void Jit::Comp_END(Instruction instr) {
|
||||
}
|
||||
|
||||
void Jit::Comp_CALL(Instruction instr) {
|
||||
}
|
||||
|
||||
void Jit::Comp_CALLC(Instruction instr) {
|
||||
}
|
||||
|
||||
void Jit::Comp_CALLU(Instruction instr) {
|
||||
}
|
||||
|
||||
void Jit::Comp_CMP(Instruction instr) {
|
||||
}
|
||||
|
||||
void Jit::Comp_MAD(Instruction instr) {
|
||||
}
|
||||
|
||||
void Jit::Comp_IF(Instruction instr) {
|
||||
}
|
||||
|
||||
void Jit::Comp_LOOP(Instruction instr) {
|
||||
}
|
||||
|
||||
void Jit::Comp_JMP(Instruction instr) {
|
||||
}
|
||||
|
||||
void Jit::Comp_NextInstr(unsigned* offset) {
|
||||
}
|
||||
|
||||
CompiledShader Jit::Compile() {
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
} // namespace Shader
|
||||
|
||||
} // namespace Pica
|
|
@ -4,12 +4,13 @@
|
|||
|
||||
#include <smmintrin.h>
|
||||
|
||||
#include "common/abi.h"
|
||||
#include "common/cpu_detect.h"
|
||||
#include "common/x64_emitter.h"
|
||||
|
||||
#include "common/x64/abi.h"
|
||||
#include "common/x64/emitter.h"
|
||||
|
||||
#include "shader.h"
|
||||
#include "shader_jit.h"
|
||||
#include "shader_jit_x64.h"
|
||||
|
||||
namespace Pica {
|
||||
|
||||
|
@ -134,7 +135,7 @@ static const u8 NO_DEST_REG_MASK = 0xf;
|
|||
*/
|
||||
void JitCompiler::Compile_SwizzleSrc(Instruction instr, unsigned src_num, SourceRegister src_reg, X64Reg dest) {
|
||||
X64Reg src_ptr;
|
||||
std::size_t src_offset;
|
||||
int src_offset;
|
||||
|
||||
if (src_reg.GetRegisterType() == RegisterType::FloatUniform) {
|
||||
src_ptr = UNIFORMS;
|
||||
|
@ -451,7 +452,6 @@ void JitCompiler::Compile_NOP(Instruction instr) {
|
|||
void JitCompiler::Compile_END(Instruction instr) {
|
||||
ABI_PopAllCalleeSavedRegsAndAdjustStack();
|
||||
RET();
|
||||
done = true;
|
||||
}
|
||||
|
||||
void JitCompiler::Compile_CALL(Instruction instr) {
|
||||
|
@ -655,7 +655,7 @@ CompiledShader* JitCompiler::Compile() {
|
|||
MOVAPS(NEGBIT, MDisp(RAX, 0));
|
||||
|
||||
looping = false;
|
||||
done = false;
|
||||
|
||||
while (offset < g_state.vs.program_code.size()) {
|
||||
Compile_NextInstr(&offset);
|
||||
}
|
||||
|
@ -663,6 +663,14 @@ CompiledShader* JitCompiler::Compile() {
|
|||
return (CompiledShader*)start;
|
||||
}
|
||||
|
||||
JitCompiler::JitCompiler() {
|
||||
AllocCodeSpace(1024 * 1024 * 4);
|
||||
}
|
||||
|
||||
void JitCompiler::Clear() {
|
||||
ClearCodeSpace();
|
||||
}
|
||||
|
||||
} // namespace Shader
|
||||
|
||||
} // namespace Pica
|
||||
|
|
|
@ -6,11 +6,7 @@
|
|||
|
||||
#include <nihstro/shader_bytecode.h>
|
||||
|
||||
#if defined(_M_X86_64)
|
||||
#include "common/x64_emitter.h"
|
||||
#else
|
||||
#include "common/fake_emitter.h"
|
||||
#endif
|
||||
#include "common/x64/emitter.h"
|
||||
|
||||
#include "video_core/pica.h"
|
||||
|
||||
|
@ -65,18 +61,16 @@ private:
|
|||
void Compile_Block(unsigned stop);
|
||||
void Compile_NextInstr(unsigned* offset);
|
||||
|
||||
#if defined(_M_X86_64)
|
||||
void Compile_SwizzleSrc(Instruction instr, unsigned src_num, SourceRegister src_reg, Gen::X64Reg dest);
|
||||
void Compile_DestEnable(Instruction instr, Gen::X64Reg dest);
|
||||
|
||||
void Compile_EvaluateCondition(Instruction instr);
|
||||
void Compile_UniformCondition(Instruction instr);
|
||||
#endif
|
||||
|
||||
/// Pointer to the variable that stores the current Pica code offset. Used to handle nested code blocks.
|
||||
unsigned* offset_ptr = nullptr;
|
||||
|
||||
bool done = false;
|
||||
/// Set to true if currently in a loop, used to check for the existence of nested loops
|
||||
bool looping = false;
|
||||
};
|
||||
|
Loading…
Reference in a new issue