id
int64 0
755k
| file_name
stringlengths 3
109
| file_path
stringlengths 13
185
| content
stringlengths 31
9.38M
| size
int64 31
9.38M
| language
stringclasses 1
value | extension
stringclasses 11
values | total_lines
int64 1
340k
| avg_line_length
float64 2.18
149k
| max_line_length
int64 7
2.22M
| alphanum_fraction
float64 0
1
| repo_name
stringlengths 6
65
| repo_stars
int64 100
47.3k
| repo_forks
int64 0
12k
| repo_open_issues
int64 0
3.4k
| repo_license
stringclasses 9
values | repo_extraction_date
stringclasses 92
values | exact_duplicates_redpajama
bool 2
classes | near_duplicates_redpajama
bool 2
classes | exact_duplicates_githubcode
bool 2
classes | exact_duplicates_stackv2
bool 1
class | exact_duplicates_stackv1
bool 2
classes | near_duplicates_githubcode
bool 2
classes | near_duplicates_stackv1
bool 2
classes | near_duplicates_stackv2
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
5,918
|
cellSysutilAvc2.h
|
RPCS3_rpcs3/rpcs3/Emu/Cell/Modules/cellSysutilAvc2.h
|
#pragma once
#include "sceNp2.h"
#include "Emu/Memory/vm_ptr.h"
// Error codes
enum CellSysutilAvc2Error : u32
{
CELL_AVC2_ERROR_UNKNOWN = 0x8002b701,
CELL_AVC2_ERROR_NOT_SUPPORTED = 0x8002b702,
CELL_AVC2_ERROR_NOT_INITIALIZED = 0x8002b703,
CELL_AVC2_ERROR_ALREADY_INITIALIZED = 0x8002b704,
CELL_AVC2_ERROR_INVALID_ARGUMENT = 0x8002b705,
CELL_AVC2_ERROR_OUT_OF_MEMORY = 0x8002b706,
CELL_AVC2_ERROR_ERROR_BAD_ID = 0x8002b707,
CELL_AVC2_ERROR_INVALID_STATUS = 0x8002b70a,
CELL_AVC2_ERROR_TIMEOUT = 0x8002b70b,
CELL_AVC2_ERROR_NO_SESSION = 0x8002b70d,
CELL_AVC2_ERROR_WINDOW_ALREADY_EXISTS = 0x8002b70f,
CELL_AVC2_ERROR_TOO_MANY_WINDOWS = 0x8002b710,
CELL_AVC2_ERROR_TOO_MANY_PEER_WINDOWS = 0x8002b711,
CELL_AVC2_ERROR_WINDOW_NOT_FOUND = 0x8002b712,
};
enum CellSysutilAvc2MediaType : u32
{
CELL_SYSUTIL_AVC2_VOICE_CHAT = 0x00000001,
CELL_SYSUTIL_AVC2_VIDEO_CHAT = 0x00000010,
};
enum CellSysutilAvc2VoiceQuality : u32
{
CELL_SYSUTIL_AVC2_VOICE_QUALITY_NORMAL = 0x00000001,
};
enum CellSysutilAvc2VideoQuality : u32
{
CELL_SYSUTIL_AVC2_VIDEO_QUALITY_NORMAL = 0x00000001,
};
enum CellSysutilAvc2FrameMode : u32
{
CELL_SYSUTIL_AVC2_FRAME_MODE_NORMAL = 0x00000001,
CELL_SYSUTIL_AVC2_FRAME_MODE_INTRA_ONLY = 0x00000002,
};
enum CellSysutilAvc2CoordinatesForm : u32
{
CELL_SYSUTIL_AVC2_VIRTUAL_COORDINATES = 0x00000001,
CELL_SYSUTIL_AVC2_ABSOLUTE_COORDINATES = 0x00000002,
};
enum CellSysutilAvc2VideoResolution : u32
{
CELL_SYSUTIL_AVC2_VIDEO_RESOLUTION_QQVGA = 0x00000001,
CELL_SYSUTIL_AVC2_VIDEO_RESOLUTION_QVGA = 0x00000002,
};
enum CellSysutilAvc2ChatTargetMode : u32
{
CELL_SYSUTIL_AVC2_CHAT_TARGET_MODE_ROOM = 0x00000100,
CELL_SYSUTIL_AVC2_CHAT_TARGET_MODE_TEAM = 0x00000200,
CELL_SYSUTIL_AVC2_CHAT_TARGET_MODE_PRIVATE = 0x00000300,
CELL_SYSUTIL_AVC2_CHAT_TARGET_MODE_DIRECT = 0x00001000,
};
enum CellSysutilAvc2AttributeId : u32
{
CELL_SYSUTIL_AVC2_ATTRIBUTE_VOICE_DETECT_EVENT_TYPE = 0x00001001,
CELL_SYSUTIL_AVC2_ATTRIBUTE_VOICE_DETECT_INTERVAL_TIME = 0x00001002,
CELL_SYSUTIL_AVC2_ATTRIBUTE_VOICE_DETECT_SIGNAL_LEVEL = 0x00001003,
CELL_SYSUTIL_AVC2_ATTRIBUTE_VOICE_MAX_BITRATE = 0x00001004,
CELL_SYSUTIL_AVC2_ATTRIBUTE_VOICE_DATA_FEC = 0x00001005,
CELL_SYSUTIL_AVC2_ATTRIBUTE_VOICE_PACKET_CONTENTION = 0x00001006,
CELL_SYSUTIL_AVC2_ATTRIBUTE_VOICE_DTX_MODE = 0x00001007,
CELL_SYSUTIL_AVC2_ATTRIBUTE_MIC_STATUS_DETECTION = 0x00001008,
CELL_SYSUTIL_AVC2_ATTRIBUTE_MIC_SETTING_NOTIFICATION = 0x00001009,
CELL_SYSUTIL_AVC2_ATTRIBUTE_VOICE_MUTING_NOTIFICATION = 0x0000100A,
CELL_SYSUTIL_AVC2_ATTRIBUTE_CAMERA_STATUS_DETECTION = 0x0000100B,
};
enum CellSysutilAvc2WindowAttributeId : u32
{
CELL_SYSUTIL_AVC2_WINDOW_ATTRIBUTE_ALPHA = 0x00002001,
CELL_SYSUTIL_AVC2_WINDOW_ATTRIBUTE_TRANSITION_TYPE = 0x00002002,
CELL_SYSUTIL_AVC2_WINDOW_ATTRIBUTE_TRANSITION_DURATION = 0x00002003,
CELL_SYSUTIL_AVC2_WINDOW_ATTRIBUTE_STRING_VISIBLE = 0x00002004,
CELL_SYSUTIL_AVC2_WINDOW_ATTRIBUTE_ROTATION = 0x00002005,
CELL_SYSUTIL_AVC2_WINDOW_ATTRIBUTE_ZORDER = 0x00002006,
CELL_SYSUTIL_AVC2_WINDOW_ATTRIBUTE_SURFACE = 0x00002007,
};
enum CellSysutilAvc2TransitionType : u32
{
CELL_SYSUTIL_AVC2_TRANSITION_NONE = 0xffffffff,
CELL_SYSUTIL_AVC2_TRANSITION_LINEAR = 0x00000000,
CELL_SYSUTIL_AVC2_TRANSITION_SLOWDOWN = 0x00000001,
CELL_SYSUTIL_AVC2_TRANSITION_FASTUP = 0x00000002,
CELL_SYSUTIL_AVC2_TRANSITION_ANGULAR = 0x00000003,
CELL_SYSUTIL_AVC2_TRANSITION_EXPONENT = 0x00000004,
};
enum CellSysutilAvc2WindowZorderMode : u32
{
CELL_SYSUTIL_AVC2_ZORDER_FORWARD_MOST = 0x00000001,
CELL_SYSUTIL_AVC2_ZORDER_BEHIND_MOST = 0x00000002,
};
enum
{
CELL_AVC2_CAMERA_STATUS_DETACHED = 0,
CELL_AVC2_CAMERA_STATUS_ATTACHED_OFF = 1,
CELL_AVC2_CAMERA_STATUS_ATTACHED_ON = 2,
CELL_AVC2_CAMERA_STATUS_UNKNOWN = 3,
};
enum
{
CELL_AVC2_MIC_STATUS_DETACHED = 0,
CELL_AVC2_MIC_STATUS_ATTACHED_OFF = 1,
CELL_AVC2_MIC_STATUS_ATTACHED_ON = 2,
CELL_AVC2_MIC_STATUS_UNKNOWN = 3,
};
enum
{
CELL_SYSUTIL_AVC2_STREAMING_MODE_NORMAL = 0,
CELL_SYSUTIL_AVC2_STREAMING_MODE_DIRECT_WAN = 1,
CELL_SYSUTIL_AVC2_STREAMING_MODE_DIRECT_LAN = 2,
};
enum
{
CELL_SYSUTIL_AVC2_VIDEO_SHARING_MODE_DISABLE = 0,
CELL_SYSUTIL_AVC2_VIDEO_SHARING_MODE_1 = 1,
CELL_SYSUTIL_AVC2_VIDEO_SHARING_MODE_2 = 2,
CELL_SYSUTIL_AVC2_VIDEO_SHARING_MODE_3 = 3,
};
enum
{
CELL_AVC2_EVENT_LOAD_SUCCEEDED = 0x00000001,
CELL_AVC2_EVENT_LOAD_FAILED = 0x00000002,
CELL_AVC2_EVENT_UNLOAD_SUCCEEDED = 0x00000003,
CELL_AVC2_EVENT_UNLOAD_FAILED = 0x00000004,
CELL_AVC2_EVENT_JOIN_SUCCEEDED = 0x00000005,
CELL_AVC2_EVENT_JOIN_FAILED = 0x00000006,
CELL_AVC2_EVENT_LEAVE_SUCCEEDED = 0x00000007,
CELL_AVC2_EVENT_LEAVE_FAILED = 0x00000008,
};
enum
{
CELL_AVC2_EVENT_SYSTEM_NEW_MEMBER_JOINED = 0x10000001,
CELL_AVC2_EVENT_SYSTEM_MEMBER_LEFT = 0x10000002,
CELL_AVC2_EVENT_SYSTEM_SESSION_ESTABLISHED = 0x10000003,
CELL_AVC2_EVENT_SYSTEM_SESSION_CANNOT_ESTABLISHED = 0x10000004,
CELL_AVC2_EVENT_SYSTEM_SESSION_DISCONNECTED = 0x10000005,
CELL_AVC2_EVENT_SYSTEM_VOICE_DETECTED = 0x10000006,
CELL_AVC2_EVENT_SYSTEM_MIC_DETECTED = 0x10000007,
CELL_AVC2_EVENT_SYSTEM_CAMERA_DETECTED = 0x10000008,
};
enum
{
CELL_AVC2_EVENT_PARAM_ERROR_UNKNOWN = 0x0000000000000001,
CELL_AVC2_EVENT_PARAM_ERROR_NOT_SUPPORTED = 0x0000000000000002,
CELL_AVC2_EVENT_PARAM_ERROR_INVALID_ARGUMENT = 0x0000000000000003,
CELL_AVC2_EVENT_PARAM_ERROR_OUT_OF_MEMORY = 0x0000000000000004,
CELL_AVC2_EVENT_PARAM_ERROR_INVALID_STATUS = 0x0000000000000005,
CELL_AVC2_EVENT_PARAM_ERROR_CONTEXT_DOES_NOT_EXIST = 0x0000000000000006,
CELL_AVC2_EVENT_PARAM_ERROR_ROOM_DOES_NOT_EXIST = 0x0000000000000007,
CELL_AVC2_EVENT_PARAM_ERROR_NETWORK_ERROR = 0x0000000000000008,
};
enum
{
CELL_AVC2_REQUEST_ID_SYSTEM_EVENT = 0x00000000
};
enum
{
CELL_SYSUTIL_AVC2_INIT_PARAM_VERSION = 140 // Older versions may be 100, 110, 120, 130
};
enum
{
AVC2_SPECIAL_ROOM_MEMBER_ID_CUSTOM_VIDEO_WINDOW = 0xfff0
};
typedef u32 CellSysutilAvc2EventId;
typedef u64 CellSysutilAvc2EventParam;
using CellSysutilAvc2Callback = void(CellSysutilAvc2EventId event_id, CellSysutilAvc2EventParam event_param, vm::ptr<void> userdata);
struct CellSysutilAvc2VoiceInitParam
{
be_t<CellSysutilAvc2VoiceQuality> voice_quality;
be_t<u16> max_speakers;
u8 mic_out_stream_sharing;
u8 reserved[25];
};
struct CellSysutilAvc2VideoInitParam
{
be_t<u32> video_quality;
be_t<u32> frame_mode;
be_t<u32> max_video_resolution;
be_t<u16> max_video_windows;
be_t<u16> max_video_framerate;
be_t<u32> max_video_bitrate;
be_t<u32> coordinates_form;
u8 video_stream_sharing;
u8 no_use_camera_device;
u8 reserved[6];
};
struct CellSysutilAvc2StreamingModeParam
{
be_t<u16> mode;
be_t<u16> port;
u8 reserved[10];
};
struct CellSysutilAvc2InitParam
{
be_t<u16> avc_init_param_version;
be_t<u16> max_players;
be_t<u16> spu_load_average;
union
{
be_t<u16> direct_streaming_mode;
CellSysutilAvc2StreamingModeParam streaming_mode;
};
u8 reserved[18];
be_t<u32> media_type;
CellSysutilAvc2VoiceInitParam voice_param;
CellSysutilAvc2VideoInitParam video_param;
u8 reserved2[22];
};
struct CellSysutilAvc2RoomMemberList
{
vm::bptr<SceNpMatching2RoomMemberId> member_id;
u8 member_num;
};
struct CellSysutilAvc2MemberIpAndPortList
{
vm::bptr<SceNpMatching2RoomMemberId> member_id;
vm::bptr<u32> dst_addr; // in_addr
vm::bptr<u16> dst_port; // in_port_t
be_t<SceNpMatching2RoomMemberId> my_member_id;
u8 member_num;
};
union CellSysutilAvc2AttributeParam
{
be_t<u64> int_param;
be_t<f32> float_param;
vm::bptr<void> ptr_param;
};
struct CellSysutilAvc2Attribute
{
be_t<CellSysutilAvc2AttributeId> attr_id;
CellSysutilAvc2AttributeParam attr_param;
};
union CellSysutilAvc2WindowAttributeParam
{
be_t<s32> int_vector[4];
be_t<f32> float_vector[4];
vm::bptr<void> ptr_vector[4];
};
struct CellSysutilAvc2WindowAttribute
{
be_t<CellSysutilAvc2WindowAttributeId> attr_id;
CellSysutilAvc2WindowAttributeParam attr_param;
};
struct CellSysutilAvc2PlayerInfo
{
be_t<SceNpMatching2RoomMemberId> member_id;
u8 joined;
u8 connected;
u8 mic_attached;
u8 reserved[11];
};
struct CellSysutilAvc2StreamingTarget
{
be_t<CellSysutilAvc2ChatTargetMode> target_mode;
union
{
CellSysutilAvc2RoomMemberList room_member_list;
SceNpMatching2TeamId team_id;
CellSysutilAvc2MemberIpAndPortList ip_and_port_list;
};
};
| 8,709
|
C++
|
.h
| 264
| 31.238636
| 133
| 0.764482
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
5,919
|
sys_dbg.h
|
RPCS3_rpcs3/rpcs3/Emu/Cell/lv2/sys_dbg.h
|
#pragma once
#include "Emu/Memory/vm_ptr.h"
#include "Emu/Cell/ErrorCodes.h"
// Syscalls
error_code sys_dbg_read_process_memory(s32 pid, u32 address, u32 size, vm::ptr<void> data);
error_code sys_dbg_write_process_memory(s32 pid, u32 address, u32 size, vm::cptr<void> data);
| 278
|
C++
|
.h
| 6
| 44.833333
| 93
| 0.754647
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
5,920
|
sys_ppu_thread.h
|
RPCS3_rpcs3/rpcs3/Emu/Cell/lv2/sys_ppu_thread.h
|
#pragma once
#include "Emu/Memory/vm_ptr.h"
#include "Emu/Cell/ErrorCodes.h"
class ppu_thread;
enum : s32
{
SYS_PPU_THREAD_ONCE_INIT = 0,
SYS_PPU_THREAD_DONE_INIT = 1,
};
// PPU Thread Flags
enum : u64
{
SYS_PPU_THREAD_CREATE_JOINABLE = 0x1,
SYS_PPU_THREAD_CREATE_INTERRUPT = 0x2,
};
struct sys_ppu_thread_stack_t
{
be_t<u32> pst_addr;
be_t<u32> pst_size;
};
struct ppu_thread_param_t
{
vm::bptr<void(u64)> entry;
be_t<u32> tls; // vm::bptr<void>
};
struct sys_ppu_thread_icontext_t
{
be_t<u64> gpr[32];
be_t<u32> cr;
be_t<u32> rsv1;
be_t<u64> xer;
be_t<u64> lr;
be_t<u64> ctr;
be_t<u64> pc;
};
// Syscalls
void _sys_ppu_thread_exit(ppu_thread& ppu, u64 errorcode);
s32 sys_ppu_thread_yield(ppu_thread& ppu); // Return value is ignored by the library
error_code sys_ppu_thread_join(ppu_thread& ppu, u32 thread_id, vm::ptr<u64> vptr);
error_code sys_ppu_thread_detach(ppu_thread& ppu, u32 thread_id);
error_code sys_ppu_thread_get_join_state(ppu_thread& ppu, vm::ptr<s32> isjoinable); // Error code is ignored by the library
error_code sys_ppu_thread_set_priority(ppu_thread& ppu, u32 thread_id, s32 prio);
error_code sys_ppu_thread_get_priority(ppu_thread& ppu, u32 thread_id, vm::ptr<s32> priop);
error_code sys_ppu_thread_get_stack_information(ppu_thread& ppu, vm::ptr<sys_ppu_thread_stack_t> sp);
error_code sys_ppu_thread_stop(ppu_thread& ppu, u32 thread_id);
error_code sys_ppu_thread_restart(ppu_thread& ppu);
error_code _sys_ppu_thread_create(ppu_thread& ppu, vm::ptr<u64> thread_id, vm::ptr<ppu_thread_param_t> param, u64 arg, u64 arg4, s32 prio, u32 stacksize, u64 flags, vm::cptr<char> threadname);
error_code sys_ppu_thread_start(ppu_thread& ppu, u32 thread_id);
error_code sys_ppu_thread_rename(ppu_thread& ppu, u32 thread_id, vm::cptr<char> name);
error_code sys_ppu_thread_recover_page_fault(ppu_thread& ppu, u32 thread_id);
error_code sys_ppu_thread_get_page_fault_context(ppu_thread& ppu, u32 thread_id, vm::ptr<sys_ppu_thread_icontext_t> ctxt);
| 1,986
|
C++
|
.h
| 51
| 37.470588
| 192
| 0.736241
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
5,921
|
sys_lwcond.h
|
RPCS3_rpcs3/rpcs3/Emu/Cell/lv2/sys_lwcond.h
|
#pragma once
#include "sys_sync.h"
#include "Emu/Memory/vm_ptr.h"
struct sys_lwmutex_t;
struct sys_lwcond_attribute_t
{
union
{
nse_t<u64, 1> name_u64;
char name[sizeof(u64)];
};
};
struct sys_lwcond_t
{
vm::bptr<sys_lwmutex_t> lwmutex;
be_t<u32> lwcond_queue; // lwcond pseudo-id
};
struct lv2_lwcond final : lv2_obj
{
static const u32 id_base = 0x97000000;
const be_t<u64> name;
const u32 lwid;
const lv2_protocol protocol;
vm::ptr<sys_lwcond_t> control;
shared_mutex mutex;
ppu_thread* sq{};
atomic_t<s32> lwmutex_waiters = 0;
lv2_lwcond(u64 name, u32 lwid, u32 protocol, vm::ptr<sys_lwcond_t> control) noexcept
: name(std::bit_cast<be_t<u64>>(name))
, lwid(lwid)
, protocol{static_cast<u8>(protocol)}
, control(control)
{
}
lv2_lwcond(utils::serial& ar);
void save(utils::serial& ar);
};
// Aux
class ppu_thread;
// Syscalls
error_code _sys_lwcond_create(ppu_thread& ppu, vm::ptr<u32> lwcond_id, u32 lwmutex_id, vm::ptr<sys_lwcond_t> control, u64 name);
error_code _sys_lwcond_destroy(ppu_thread& ppu, u32 lwcond_id);
error_code _sys_lwcond_signal(ppu_thread& ppu, u32 lwcond_id, u32 lwmutex_id, u64 ppu_thread_id, u32 mode);
error_code _sys_lwcond_signal_all(ppu_thread& ppu, u32 lwcond_id, u32 lwmutex_id, u32 mode);
error_code _sys_lwcond_queue_wait(ppu_thread& ppu, u32 lwcond_id, u32 lwmutex_id, u64 timeout);
| 1,363
|
C++
|
.h
| 45
| 28.311111
| 128
| 0.72546
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
5,922
|
sys_vm.h
|
RPCS3_rpcs3/rpcs3/Emu/Cell/lv2/sys_vm.h
|
#pragma once
#include "Emu/Memory/vm_ptr.h"
#include "sys_memory.h"
#include <array>
enum : u64
{
SYS_VM_STATE_INVALID = 0ull,
SYS_VM_STATE_UNUSED = 1ull,
SYS_VM_STATE_ON_MEMORY = 2ull,
SYS_VM_STATE_STORED = 4ull,
SYS_VM_POLICY_AUTO_RECOMMENDED = 1ull,
};
struct sys_vm_statistics_t
{
be_t<u64> page_fault_ppu; // Number of bad virtual memory accesses from a PPU thread.
be_t<u64> page_fault_spu; // Number of bad virtual memory accesses from a SPU thread.
be_t<u64> page_in; // Number of virtual memory backup reading operations.
be_t<u64> page_out; // Number of virtual memory backup writing operations.
be_t<u32> pmem_total; // Total physical memory allocated for the virtual memory area.
be_t<u32> pmem_used; // Physical memory in use by the virtual memory area.
be_t<u64> timestamp;
};
// Block info
struct sys_vm_t
{
static const u32 id_base = 0x1;
static const u32 id_step = 0x1;
static const u32 id_count = 16;
lv2_memory_container* const ct;
const u32 addr;
const u32 size;
atomic_t<u32> psize;
sys_vm_t(u32 addr, u32 vsize, lv2_memory_container* ct, u32 psize);
~sys_vm_t();
SAVESTATE_INIT_POS(10);
sys_vm_t(utils::serial& ar);
void save(utils::serial& ar);
static std::array<atomic_t<u32>, id_count> g_ids;
static u32 find_id(u32 addr)
{
return g_ids[addr >> 28].load();
}
};
// Aux
class ppu_thread;
// SysCalls
error_code sys_vm_memory_map(ppu_thread& ppu, u64 vsize, u64 psize, u32 cid, u64 flag, u64 policy, vm::ptr<u32> addr);
error_code sys_vm_memory_map_different(ppu_thread& ppu, u64 vsize, u64 psize, u32 cid, u64 flag, u64 policy, vm::ptr<u32> addr);
error_code sys_vm_unmap(ppu_thread& ppu, u32 addr);
error_code sys_vm_append_memory(ppu_thread& ppu, u32 addr, u64 size);
error_code sys_vm_return_memory(ppu_thread& ppu, u32 addr, u64 size);
error_code sys_vm_lock(ppu_thread& ppu, u32 addr, u32 size);
error_code sys_vm_unlock(ppu_thread& ppu, u32 addr, u32 size);
error_code sys_vm_touch(ppu_thread& ppu, u32 addr, u32 size);
error_code sys_vm_flush(ppu_thread& ppu, u32 addr, u32 size);
error_code sys_vm_invalidate(ppu_thread& ppu, u32 addr, u32 size);
error_code sys_vm_store(ppu_thread& ppu, u32 addr, u32 size);
error_code sys_vm_sync(ppu_thread& ppu, u32 addr, u32 size);
error_code sys_vm_test(ppu_thread& ppu, u32 addr, u32 size, vm::ptr<u64> result);
error_code sys_vm_get_statistics(ppu_thread& ppu, u32 addr, vm::ptr<sys_vm_statistics_t> stat);
| 2,454
|
C++
|
.h
| 60
| 39.166667
| 128
| 0.721849
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
5,923
|
sys_net.h
|
RPCS3_rpcs3/rpcs3/Emu/Cell/lv2/sys_net.h
|
#pragma once
#include "Utilities/bit_set.h"
#include "Utilities/mutex.h"
#include "Emu/Memory/vm_ptr.h"
#include "Emu/Cell/ErrorCodes.h"
#include <vector>
#include <utility>
#include <functional>
#include <queue>
// Error codes
enum sys_net_error : s32
{
SYS_NET_ENOENT = 2,
SYS_NET_EINTR = 4,
SYS_NET_EBADF = 9,
SYS_NET_ENOMEM = 12,
SYS_NET_EACCES = 13,
SYS_NET_EFAULT = 14,
SYS_NET_EBUSY = 16,
SYS_NET_EINVAL = 22,
SYS_NET_EMFILE = 24,
SYS_NET_ENOSPC = 28,
SYS_NET_EPIPE = 32,
SYS_NET_EAGAIN = 35,
SYS_NET_EWOULDBLOCK = SYS_NET_EAGAIN,
SYS_NET_EINPROGRESS = 36,
SYS_NET_EALREADY = 37,
SYS_NET_EDESTADDRREQ = 39,
SYS_NET_EMSGSIZE = 40,
SYS_NET_EPROTOTYPE = 41,
SYS_NET_ENOPROTOOPT = 42,
SYS_NET_EPROTONOSUPPORT = 43,
SYS_NET_EOPNOTSUPP = 45,
SYS_NET_EPFNOSUPPORT = 46,
SYS_NET_EAFNOSUPPORT = 47,
SYS_NET_EADDRINUSE = 48,
SYS_NET_EADDRNOTAVAIL = 49,
SYS_NET_ENETDOWN = 50,
SYS_NET_ENETUNREACH = 51,
SYS_NET_ECONNABORTED = 53,
SYS_NET_ECONNRESET = 54,
SYS_NET_ENOBUFS = 55,
SYS_NET_EISCONN = 56,
SYS_NET_ENOTCONN = 57,
SYS_NET_ESHUTDOWN = 58,
SYS_NET_ETOOMANYREFS = 59,
SYS_NET_ETIMEDOUT = 60,
SYS_NET_ECONNREFUSED = 61,
SYS_NET_EHOSTDOWN = 64,
SYS_NET_EHOSTUNREACH = 65,
};
static constexpr sys_net_error operator-(sys_net_error v)
{
return sys_net_error{-+v};
}
// Socket types (prefixed with SYS_NET_)
enum lv2_socket_type : s32
{
SYS_NET_SOCK_STREAM = 1,
SYS_NET_SOCK_DGRAM = 2,
SYS_NET_SOCK_RAW = 3,
SYS_NET_SOCK_DGRAM_P2P = 6,
SYS_NET_SOCK_STREAM_P2P = 10,
};
// Socket options (prefixed with SYS_NET_)
enum lv2_socket_option : s32
{
SYS_NET_SO_SNDBUF = 0x1001,
SYS_NET_SO_RCVBUF = 0x1002,
SYS_NET_SO_SNDLOWAT = 0x1003,
SYS_NET_SO_RCVLOWAT = 0x1004,
SYS_NET_SO_SNDTIMEO = 0x1005,
SYS_NET_SO_RCVTIMEO = 0x1006,
SYS_NET_SO_ERROR = 0x1007,
SYS_NET_SO_TYPE = 0x1008,
SYS_NET_SO_NBIO = 0x1100, // Non-blocking IO
SYS_NET_SO_TPPOLICY = 0x1101,
SYS_NET_SO_REUSEADDR = 0x0004,
SYS_NET_SO_KEEPALIVE = 0x0008,
SYS_NET_SO_BROADCAST = 0x0020,
SYS_NET_SO_LINGER = 0x0080,
SYS_NET_SO_OOBINLINE = 0x0100,
SYS_NET_SO_REUSEPORT = 0x0200,
SYS_NET_SO_ONESBCAST = 0x0800,
SYS_NET_SO_USECRYPTO = 0x1000,
SYS_NET_SO_USESIGNATURE = 0x2000,
SYS_NET_SOL_SOCKET = 0xffff,
};
// IP options (prefixed with SYS_NET_)
enum lv2_ip_option : s32
{
SYS_NET_IP_HDRINCL = 2,
SYS_NET_IP_TOS = 3,
SYS_NET_IP_TTL = 4,
SYS_NET_IP_MULTICAST_IF = 9,
SYS_NET_IP_MULTICAST_TTL = 10,
SYS_NET_IP_MULTICAST_LOOP = 11,
SYS_NET_IP_ADD_MEMBERSHIP = 12,
SYS_NET_IP_DROP_MEMBERSHIP = 13,
SYS_NET_IP_TTLCHK = 23,
SYS_NET_IP_MAXTTL = 24,
SYS_NET_IP_DONTFRAG = 26
};
// Family (prefixed with SYS_NET_)
enum lv2_socket_family : s32
{
SYS_NET_AF_UNSPEC = 0,
SYS_NET_AF_LOCAL = 1,
SYS_NET_AF_UNIX = SYS_NET_AF_LOCAL,
SYS_NET_AF_INET = 2,
SYS_NET_AF_INET6 = 24,
};
// Flags (prefixed with SYS_NET_)
enum
{
SYS_NET_MSG_OOB = 0x1,
SYS_NET_MSG_PEEK = 0x2,
SYS_NET_MSG_DONTROUTE = 0x4,
SYS_NET_MSG_EOR = 0x8,
SYS_NET_MSG_TRUNC = 0x10,
SYS_NET_MSG_CTRUNC = 0x20,
SYS_NET_MSG_WAITALL = 0x40,
SYS_NET_MSG_DONTWAIT = 0x80,
SYS_NET_MSG_BCAST = 0x100,
SYS_NET_MSG_MCAST = 0x200,
SYS_NET_MSG_USECRYPTO = 0x400,
SYS_NET_MSG_USESIGNATURE= 0x800,
};
// Shutdown types (prefixed with SYS_NET_)
enum
{
SYS_NET_SHUT_RD = 0,
SYS_NET_SHUT_WR = 1,
SYS_NET_SHUT_RDWR = 2,
};
// TCP options (prefixed with SYS_NET_)
enum lv2_tcp_option : s32
{
SYS_NET_TCP_NODELAY = 1,
SYS_NET_TCP_MAXSEG = 2,
SYS_NET_TCP_MSS_TO_ADVERTISE = 3,
};
// IP protocols (prefixed with SYS_NET_)
enum lv2_ip_protocol : s32
{
SYS_NET_IPPROTO_IP = 0,
SYS_NET_IPPROTO_ICMP = 1,
SYS_NET_IPPROTO_IGMP = 2,
SYS_NET_IPPROTO_TCP = 6,
SYS_NET_IPPROTO_UDP = 17,
SYS_NET_IPPROTO_ICMPV6 = 58,
};
// Poll events (prefixed with SYS_NET_)
enum
{
SYS_NET_POLLIN = 0x0001,
SYS_NET_POLLPRI = 0x0002,
SYS_NET_POLLOUT = 0x0004,
SYS_NET_POLLERR = 0x0008, /* revent only */
SYS_NET_POLLHUP = 0x0010, /* revent only */
SYS_NET_POLLNVAL = 0x0020, /* revent only */
SYS_NET_POLLRDNORM = 0x0040,
SYS_NET_POLLWRNORM = SYS_NET_POLLOUT,
SYS_NET_POLLRDBAND = 0x0080,
SYS_NET_POLLWRBAND = 0x0100,
};
enum lv2_socket_abort_flags : s32
{
SYS_NET_ABORT_STRICT_CHECK = 1,
};
// in_addr_t type prefixed with sys_net_
using sys_net_in_addr_t = u32;
// in_port_t type prefixed with sys_net_
using sys_net_in_port_t = u16;
// sa_family_t type prefixed with sys_net_
using sys_net_sa_family_t = u8;
// socklen_t type prefixed with sys_net_
using sys_net_socklen_t = u32;
// fd_set prefixed with sys_net_
struct sys_net_fd_set
{
be_t<u32> fds_bits[32];
u32 bit(s32 s) const
{
return (fds_bits[(s >> 5) & 31] >> (s & 31)) & 1u;
}
void set(s32 s)
{
fds_bits[(s >> 5) & 31] |= (1u << (s & 31));
}
};
// hostent prefixed with sys_net_
struct sys_net_hostent
{
vm::bptr<char> h_name;
vm::bpptr<char> h_aliases;
be_t<s32> h_addrtype;
be_t<s32> h_length;
vm::bpptr<char> h_addr_list;
};
// in_addr prefixed with sys_net_
struct sys_net_in_addr
{
be_t<u32> _s_addr;
};
// iovec prefixed with sys_net_
struct sys_net_iovec
{
be_t<s32> zero1;
vm::bptr<void> iov_base;
be_t<s32> zero2;
be_t<u32> iov_len;
};
// ip_mreq prefixed with sys_net_
struct sys_net_ip_mreq
{
be_t<u32> imr_multiaddr;
be_t<u32> imr_interface;
};
// msghdr prefixed with sys_net_
struct sys_net_msghdr
{
be_t<s32> zero1;
vm::bptr<void> msg_name;
be_t<u32> msg_namelen;
be_t<s32> pad1;
be_t<s32> zero2;
vm::bptr<sys_net_iovec> msg_iov;
be_t<s32> msg_iovlen;
be_t<s32> pad2;
be_t<s32> zero3;
vm::bptr<void> msg_control;
be_t<u32> msg_controllen;
be_t<s32> msg_flags;
};
// pollfd prefixed with sys_net_
struct sys_net_pollfd
{
be_t<s32> fd;
be_t<s16> events;
be_t<s16> revents;
};
// sockaddr prefixed with sys_net_
struct sys_net_sockaddr
{
ENABLE_BITWISE_SERIALIZATION;
u8 sa_len;
u8 sa_family;
char sa_data[14];
};
// sockaddr_dl prefixed with sys_net_
struct sys_net_sockaddr_dl
{
ENABLE_BITWISE_SERIALIZATION;
u8 sdl_len;
u8 sdl_family;
be_t<u16> sdl_index;
u8 sdl_type;
u8 sdl_nlen;
u8 sdl_alen;
u8 sdl_slen;
char sdl_data[12];
};
// sockaddr_in prefixed with sys_net_
struct sys_net_sockaddr_in
{
ENABLE_BITWISE_SERIALIZATION;
u8 sin_len;
u8 sin_family;
be_t<u16> sin_port;
be_t<u32> sin_addr;
be_t<u64> sin_zero;
};
// sockaddr_in_p2p prefixed with sys_net_
struct sys_net_sockaddr_in_p2p
{
ENABLE_BITWISE_SERIALIZATION;
u8 sin_len;
u8 sin_family;
be_t<u16> sin_port;
be_t<u32> sin_addr;
be_t<u16> sin_vport;
char sin_zero[6];
};
// timeval prefixed with sys_net_
struct sys_net_timeval
{
be_t<s64> tv_sec;
be_t<s64> tv_usec;
};
// linger prefixed with sys_net_
struct sys_net_linger
{
be_t<s32> l_onoff;
be_t<s32> l_linger;
};
class ppu_thread;
// Syscalls
error_code sys_net_bnet_accept(ppu_thread&, s32 s, vm::ptr<sys_net_sockaddr> addr, vm::ptr<u32> paddrlen);
error_code sys_net_bnet_bind(ppu_thread&, s32 s, vm::cptr<sys_net_sockaddr> addr, u32 addrlen);
error_code sys_net_bnet_connect(ppu_thread&, s32 s, vm::ptr<sys_net_sockaddr> addr, u32 addrlen);
error_code sys_net_bnet_getpeername(ppu_thread&, s32 s, vm::ptr<sys_net_sockaddr> addr, vm::ptr<u32> paddrlen);
error_code sys_net_bnet_getsockname(ppu_thread&, s32 s, vm::ptr<sys_net_sockaddr> addr, vm::ptr<u32> paddrlen);
error_code sys_net_bnet_getsockopt(ppu_thread&, s32 s, s32 level, s32 optname, vm::ptr<void> optval, vm::ptr<u32> optlen);
error_code sys_net_bnet_listen(ppu_thread&, s32 s, s32 backlog);
error_code sys_net_bnet_recvfrom(ppu_thread&, s32 s, vm::ptr<void> buf, u32 len, s32 flags, vm::ptr<sys_net_sockaddr> addr, vm::ptr<u32> paddrlen);
error_code sys_net_bnet_recvmsg(ppu_thread&, s32 s, vm::ptr<sys_net_msghdr> msg, s32 flags);
error_code sys_net_bnet_sendmsg(ppu_thread&, s32 s, vm::cptr<sys_net_msghdr> msg, s32 flags);
error_code sys_net_bnet_sendto(ppu_thread&, s32 s, vm::cptr<void> buf, u32 len, s32 flags, vm::cptr<sys_net_sockaddr> addr, u32 addrlen);
error_code sys_net_bnet_setsockopt(ppu_thread&, s32 s, s32 level, s32 optname, vm::cptr<void> optval, u32 optlen);
error_code sys_net_bnet_shutdown(ppu_thread&, s32 s, s32 how);
error_code sys_net_bnet_socket(ppu_thread&, lv2_socket_family family, lv2_socket_type type, lv2_ip_protocol protocol);
error_code sys_net_bnet_close(ppu_thread&, s32 s);
error_code sys_net_bnet_poll(ppu_thread&, vm::ptr<sys_net_pollfd> fds, s32 nfds, s32 ms);
error_code sys_net_bnet_select(ppu_thread&, s32 nfds, vm::ptr<sys_net_fd_set> readfds, vm::ptr<sys_net_fd_set> writefds, vm::ptr<sys_net_fd_set> exceptfds, vm::ptr<sys_net_timeval> timeout);
error_code _sys_net_open_dump(ppu_thread&, s32 len, s32 flags);
error_code _sys_net_read_dump(ppu_thread&, s32 id, vm::ptr<void> buf, s32 len, vm::ptr<s32> pflags);
error_code _sys_net_close_dump(ppu_thread&, s32 id, vm::ptr<s32> pflags);
error_code _sys_net_write_dump(ppu_thread&, s32 id, vm::cptr<void> buf, s32 len, u32 unknown);
error_code sys_net_abort(ppu_thread&, s32 type, u64 arg, s32 flags);
error_code sys_net_infoctl(ppu_thread&, s32 cmd, vm::ptr<void> arg);
error_code sys_net_control(ppu_thread&, u32 arg1, s32 arg2, vm::ptr<void> arg3, s32 arg4);
error_code sys_net_bnet_ioctl(ppu_thread&, s32 arg1, u32 arg2, u32 arg3);
error_code sys_net_bnet_sysctl(ppu_thread&, u32 arg1, u32 arg2, u32 arg3, vm::ptr<void> arg4, u32 arg5, u32 arg6);
error_code sys_net_eurus_post_command(ppu_thread&, s32 arg1, u32 arg2, u32 arg3);
| 9,976
|
C++
|
.h
| 325
| 29
| 190
| 0.667048
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
5,924
|
sys_ss.h
|
RPCS3_rpcs3/rpcs3/Emu/Cell/lv2/sys_ss.h
|
#pragma once
#include "Emu/Memory/vm_ptr.h"
#include "Emu/Cell/ErrorCodes.h"
// Unofficial error code names
enum sys_ss_rng_error : u32
{
SYS_SS_RNG_ERROR_INVALID_PKG = 0x80010500,
SYS_SS_RNG_ERROR_ENOMEM = 0x80010501,
SYS_SS_RNG_ERROR_EAGAIN = 0x80010503,
SYS_SS_RNG_ERROR_EFAULT = 0x80010509,
SYS_SS_RTC_ERROR_UNK = 0x8001050f,
};
struct CellSsOpenPSID
{
be_t<u64> high;
be_t<u64> low;
};
error_code sys_ss_random_number_generator(u64 pkg_id, vm::ptr<void> buf, u64 size);
error_code sys_ss_access_control_engine(u64 pkg_id, u64 a2, u64 a3);
error_code sys_ss_get_console_id(vm::ptr<u8> buf);
error_code sys_ss_get_open_psid(vm::ptr<CellSsOpenPSID> psid);
error_code sys_ss_appliance_info_manager(u32 code, vm::ptr<u8> buffer);
error_code sys_ss_get_cache_of_product_mode(vm::ptr<u8> ptr);
error_code sys_ss_secure_rtc(u64 cmd, u64 a2, u64 a3, u64 a4);
error_code sys_ss_get_cache_of_flash_ext_flag(vm::ptr<u64> flag);
error_code sys_ss_get_boot_device(vm::ptr<u64> dev);
error_code sys_ss_update_manager(u64 pkg_id, u64 a1, u64 a2, u64 a3, u64 a4, u64 a5, u64 a6);
error_code sys_ss_virtual_trm_manager(u64 pkg_id, u64 a1, u64 a2, u64 a3, u64 a4);
error_code sys_ss_individual_info_manager(u64 pkg_id, u64 a2, vm::ptr<u64> out_size, u64 a4, u64 a5, u64 a6);
| 1,272
|
C++
|
.h
| 29
| 42.482759
| 109
| 0.737692
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
5,925
|
sys_event_flag.h
|
RPCS3_rpcs3/rpcs3/Emu/Cell/lv2/sys_event_flag.h
|
#pragma once
#include "sys_sync.h"
#include "Emu/Memory/vm_ptr.h"
enum
{
SYS_SYNC_WAITER_SINGLE = 0x10000,
SYS_SYNC_WAITER_MULTIPLE = 0x20000,
SYS_EVENT_FLAG_WAIT_AND = 0x01,
SYS_EVENT_FLAG_WAIT_OR = 0x02,
SYS_EVENT_FLAG_WAIT_CLEAR = 0x10,
SYS_EVENT_FLAG_WAIT_CLEAR_ALL = 0x20,
};
struct sys_event_flag_attribute_t
{
be_t<u32> protocol;
be_t<u32> pshared;
be_t<u64> ipc_key;
be_t<s32> flags;
be_t<s32> type;
union
{
nse_t<u64, 1> name_u64;
char name[sizeof(u64)];
};
};
struct lv2_event_flag final : lv2_obj
{
static const u32 id_base = 0x98000000;
const lv2_protocol protocol;
const u64 key;
const s32 type;
const u64 name;
shared_mutex mutex;
atomic_t<u64> pattern;
ppu_thread* sq{};
lv2_event_flag(u32 protocol, u64 key, s32 type, u64 name, u64 pattern) noexcept
: protocol{static_cast<u8>(protocol)}
, key(key)
, type(type)
, name(name)
, pattern(pattern)
{
}
lv2_event_flag(utils::serial& ar);
static std::shared_ptr<void> load(utils::serial& ar);
void save(utils::serial& ar);
// Check mode arg
static bool check_mode(u32 mode)
{
switch (mode & 0xf)
{
case SYS_EVENT_FLAG_WAIT_AND: break;
case SYS_EVENT_FLAG_WAIT_OR: break;
default: return false;
}
switch (mode & ~0xf)
{
case 0: break;
case SYS_EVENT_FLAG_WAIT_CLEAR: break;
case SYS_EVENT_FLAG_WAIT_CLEAR_ALL: break;
default: return false;
}
return true;
}
// Check and clear pattern (must be atomic op)
static bool check_pattern(u64& pattern, u64 bitptn, u64 mode, u64* result)
{
// Write pattern
if (result)
{
*result = pattern;
}
// Check pattern
if (((mode & 0xf) == SYS_EVENT_FLAG_WAIT_AND && (pattern & bitptn) != bitptn) ||
((mode & 0xf) == SYS_EVENT_FLAG_WAIT_OR && (pattern & bitptn) == 0))
{
return false;
}
// Clear pattern if necessary
if ((mode & ~0xf) == SYS_EVENT_FLAG_WAIT_CLEAR)
{
pattern &= ~bitptn;
}
else if ((mode & ~0xf) == SYS_EVENT_FLAG_WAIT_CLEAR_ALL)
{
pattern = 0;
}
return true;
}
};
// Aux
class ppu_thread;
// Syscalls
error_code sys_event_flag_create(ppu_thread& ppu, vm::ptr<u32> id, vm::ptr<sys_event_flag_attribute_t> attr, u64 init);
error_code sys_event_flag_destroy(ppu_thread& ppu, u32 id);
error_code sys_event_flag_wait(ppu_thread& ppu, u32 id, u64 bitptn, u32 mode, vm::ptr<u64> result, u64 timeout);
error_code sys_event_flag_trywait(ppu_thread& ppu, u32 id, u64 bitptn, u32 mode, vm::ptr<u64> result);
error_code sys_event_flag_set(cpu_thread& cpu, u32 id, u64 bitptn);
error_code sys_event_flag_clear(ppu_thread& ppu, u32 id, u64 bitptn);
error_code sys_event_flag_cancel(ppu_thread& ppu, u32 id, vm::ptr<u32> num);
error_code sys_event_flag_get(ppu_thread& ppu, u32 id, vm::ptr<u64> flags);
| 2,742
|
C++
|
.h
| 101
| 24.693069
| 119
| 0.693776
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
5,926
|
sys_hid.h
|
RPCS3_rpcs3/rpcs3/Emu/Cell/lv2/sys_hid.h
|
#pragma once
#include "Emu/Memory/vm_ptr.h"
#include "Emu/Cell/ErrorCodes.h"
// set sensor mode? also getinfo?
struct sys_hid_info_5
{
le_t<u16> vid;
le_t<u16> pid;
u8 status;
// todo: more in this, not sure what tho
};
struct sys_hid_info_2
{
be_t<u16> vid;
be_t<u16> pid;
u8 unk[17];
};
struct sys_hid_ioctl_68
{
u8 unk;
u8 unk2;
};
// unk
struct sys_hid_manager_514_pkg_d
{
be_t<u32> unk1;
u8 unk2;
};
// SysCalls
error_code sys_hid_manager_open(ppu_thread& ppu, u64 device_type, u64 port_no, vm::ptr<u32> handle);
error_code sys_hid_manager_ioctl(u32 hid_handle, u32 pkg_id, vm::ptr<void> buf, u64 buf_size);
error_code sys_hid_manager_add_hot_key_observer(u32 event_queue, vm::ptr<u32> unk);
error_code sys_hid_manager_check_focus();
error_code sys_hid_manager_is_process_permission_root(u32 pid);
error_code sys_hid_manager_513(u64 a1, u64 a2, vm::ptr<void> buf, u64 buf_size);
error_code sys_hid_manager_514(u32 pkg_id, vm::ptr<void> buf, u64 buf_size);
error_code sys_hid_manager_read(u32 handle, u32 pkg_id, vm::ptr<void> buf, u64 buf_size);
| 1,069
|
C++
|
.h
| 37
| 27.405405
| 100
| 0.724878
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
5,927
|
sys_crypto_engine.h
|
RPCS3_rpcs3/rpcs3/Emu/Cell/lv2/sys_crypto_engine.h
|
#pragma once
#include "Emu/Memory/vm_ptr.h"
#include "Emu/Cell/ErrorCodes.h"
// SysCalls
error_code sys_crypto_engine_create(vm::ptr<u32> id);
error_code sys_crypto_engine_destroy(u32 id);
error_code sys_crypto_engine_random_generate(vm::ptr<void> buffer, u64 buffer_size);
| 277
|
C++
|
.h
| 7
| 38.142857
| 84
| 0.775281
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
5,928
|
sys_lwmutex.h
|
RPCS3_rpcs3/rpcs3/Emu/Cell/lv2/sys_lwmutex.h
|
#pragma once
#include "sys_sync.h"
#include "Emu/Memory/vm_ptr.h"
struct sys_lwmutex_attribute_t
{
be_t<u32> protocol;
be_t<u32> recursive;
union
{
nse_t<u64, 1> name_u64;
char name[sizeof(u64)];
};
};
enum : u32
{
lwmutex_free = 0xffffffffu,
lwmutex_dead = 0xfffffffeu,
lwmutex_reserved = 0xfffffffdu,
};
struct sys_lwmutex_t
{
struct alignas(8) sync_var_t
{
be_t<u32> owner;
be_t<u32> waiter;
};
union
{
atomic_t<sync_var_t> lock_var;
struct
{
atomic_be_t<u32> owner;
atomic_be_t<u32> waiter;
}
vars;
atomic_be_t<u64> all_info;
};
be_t<u32> attribute;
be_t<u32> recursive_count;
be_t<u32> sleep_queue; // lwmutex pseudo-id
be_t<u32> pad;
};
struct lv2_lwmutex final : lv2_obj
{
static const u32 id_base = 0x95000000;
const lv2_protocol protocol;
const vm::ptr<sys_lwmutex_t> control;
const be_t<u64> name;
shared_mutex mutex;
atomic_t<s32> lwcond_waiters{0};
struct alignas(16) control_data_t
{
s32 signaled{0};
u32 reserved{};
ppu_thread* sq{};
};
atomic_t<control_data_t> lv2_control{};
lv2_lwmutex(u32 protocol, vm::ptr<sys_lwmutex_t> control, u64 name) noexcept
: protocol{static_cast<u8>(protocol)}
, control(control)
, name(std::bit_cast<be_t<u64>>(name))
{
}
lv2_lwmutex(utils::serial& ar);
void save(utils::serial& ar);
ppu_thread* load_sq() const
{
return atomic_storage<ppu_thread*>::load(lv2_control.raw().sq);
}
template <typename T>
s32 try_own(T* cpu, bool wait_only = false)
{
const s32 signal = lv2_control.fetch_op([&](control_data_t& data)
{
if (!data.signaled)
{
cpu->prio.atomic_op([tag = ++g_priority_order_tag](std::common_type_t<decltype(T::prio)>& prio)
{
prio.order = tag;
});
cpu->next_cpu = data.sq;
data.sq = cpu;
}
else
{
ensure(!wait_only);
data.signaled = 0;
}
}).signaled;
if (signal)
{
cpu->next_cpu = nullptr;
}
else
{
const bool notify = lwcond_waiters.fetch_op([](s32& val)
{
if (val + 0u <= 1u << 31)
{
// Value was either positive or INT32_MIN
return false;
}
// lwmutex was set to be destroyed, but there are lwcond waiters
// Turn off the "lwcond_waiters notification" bit as we are adding an lwmutex waiter
val &= 0x7fff'ffff;
return true;
}).second;
if (notify)
{
// Notify lwmutex destroyer (may cause EBUSY to be returned for it)
lwcond_waiters.notify_all();
}
}
return signal;
}
bool try_unlock(bool unlock2)
{
if (!load_sq())
{
control_data_t old{};
old.signaled = atomic_storage<s32>::load(lv2_control.raw().signaled);
control_data_t store = old;
store.signaled |= (unlock2 ? s32{smin} : 1);
if (lv2_control.compare_exchange(old, store))
{
return true;
}
}
return false;
}
template <typename T>
T* reown(bool unlock2 = false)
{
T* res = nullptr;
lv2_control.fetch_op([&](control_data_t& data)
{
res = nullptr;
if (auto sq = static_cast<T*>(data.sq))
{
res = schedule<T>(data.sq, protocol, false);
if (sq == data.sq)
{
return false;
}
return true;
}
else
{
data.signaled |= (unlock2 ? s32{smin} : 1);
return true;
}
});
if (res && cpu_flag::again - res->state)
{
// Detach manually (fetch_op can fail, so avoid side-effects on the first node in this case)
res->next_cpu = nullptr;
}
return res;
}
};
// Aux
class ppu_thread;
// Syscalls
error_code _sys_lwmutex_create(ppu_thread& ppu, vm::ptr<u32> lwmutex_id, u32 protocol, vm::ptr<sys_lwmutex_t> control, s32 has_name, u64 name);
error_code _sys_lwmutex_destroy(ppu_thread& ppu, u32 lwmutex_id);
error_code _sys_lwmutex_lock(ppu_thread& ppu, u32 lwmutex_id, u64 timeout);
error_code _sys_lwmutex_trylock(ppu_thread& ppu, u32 lwmutex_id);
error_code _sys_lwmutex_unlock(ppu_thread& ppu, u32 lwmutex_id);
error_code _sys_lwmutex_unlock2(ppu_thread& ppu, u32 lwmutex_id);
| 3,947
|
C++
|
.h
| 169
| 20.159763
| 143
| 0.66239
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
5,929
|
sys_interrupt.h
|
RPCS3_rpcs3/rpcs3/Emu/Cell/lv2/sys_interrupt.h
|
#pragma once
#include "sys_sync.h"
#include "Emu/Memory/vm_ptr.h"
class ppu_thread;
struct lv2_int_tag final : public lv2_obj
{
static const u32 id_base = 0x0a000000;
const u32 id;
std::shared_ptr<struct lv2_int_serv> handler;
lv2_int_tag() noexcept;
lv2_int_tag(utils::serial& ar) noexcept;
void save(utils::serial& ar);
};
struct lv2_int_serv final : public lv2_obj
{
static const u32 id_base = 0x0b000000;
const u32 id;
const std::shared_ptr<named_thread<ppu_thread>> thread;
const u64 arg1;
const u64 arg2;
lv2_int_serv(const std::shared_ptr<named_thread<ppu_thread>>& thread, u64 arg1, u64 arg2) noexcept;
lv2_int_serv(utils::serial& ar) noexcept;
void save(utils::serial& ar);
void exec() const;
void join() const;
};
// Syscalls
error_code sys_interrupt_tag_destroy(ppu_thread& ppu, u32 intrtag);
error_code _sys_interrupt_thread_establish(ppu_thread& ppu, vm::ptr<u32> ih, u32 intrtag, u32 intrthread, u64 arg1, u64 arg2);
error_code _sys_interrupt_thread_disestablish(ppu_thread& ppu, u32 ih, vm::ptr<u64> r13);
void sys_interrupt_thread_eoi(ppu_thread& ppu);
| 1,097
|
C++
|
.h
| 31
| 33.483871
| 126
| 0.748577
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
5,930
|
sys_mutex.h
|
RPCS3_rpcs3/rpcs3/Emu/Cell/lv2/sys_mutex.h
|
#pragma once
#include "sys_sync.h"
#include "Emu/Memory/vm_ptr.h"
#include "Emu/Cell/PPUThread.h"
struct sys_mutex_attribute_t
{
be_t<u32> protocol; // SYS_SYNC_FIFO, SYS_SYNC_PRIORITY or SYS_SYNC_PRIORITY_INHERIT
be_t<u32> recursive; // SYS_SYNC_RECURSIVE or SYS_SYNC_NOT_RECURSIVE
be_t<u32> pshared;
be_t<u32> adaptive;
be_t<u64> ipc_key;
be_t<s32> flags;
be_t<u32> pad;
union
{
nse_t<u64, 1> name_u64;
char name[sizeof(u64)];
};
};
class ppu_thread;
struct lv2_mutex final : lv2_obj
{
static const u32 id_base = 0x85000000;
const lv2_protocol protocol;
const u32 recursive;
const u32 adaptive;
const u64 key;
const u64 name;
u32 cond_count = 0; // Condition Variables
shared_mutex mutex;
atomic_t<u32> lock_count{0}; // Recursive Locks
struct alignas(16) control_data_t
{
u32 owner{};
u32 reserved{};
ppu_thread* sq{};
};
atomic_t<control_data_t> control{};
lv2_mutex(u32 protocol, u32 recursive,u32 adaptive, u64 key, u64 name) noexcept
: protocol{static_cast<u8>(protocol)}
, recursive(recursive)
, adaptive(adaptive)
, key(key)
, name(name)
{
}
lv2_mutex(utils::serial& ar);
static std::shared_ptr<void> load(utils::serial& ar);
void save(utils::serial& ar);
template <typename T>
CellError try_lock(T& cpu)
{
auto it = control.load();
if (!it.owner)
{
auto store = it;
store.owner = cpu.id;
if (!control.compare_and_swap_test(it, store))
{
return CELL_EBUSY;
}
return {};
}
if (it.owner == cpu.id)
{
// Recursive locking
if (recursive == SYS_SYNC_RECURSIVE)
{
if (lock_count == 0xffffffffu)
{
return CELL_EKRESOURCE;
}
lock_count++;
return {};
}
return CELL_EDEADLK;
}
return CELL_EBUSY;
}
template <typename T>
bool try_own(T& cpu)
{
if (control.atomic_op([&](control_data_t& data)
{
if (data.owner)
{
cpu.prio.atomic_op([tag = ++g_priority_order_tag](std::common_type_t<decltype(T::prio)>& prio)
{
prio.order = tag;
});
cpu.next_cpu = data.sq;
data.sq = &cpu;
return false;
}
else
{
data.owner = cpu.id;
return true;
}
}))
{
cpu.next_cpu = nullptr;
return true;
}
return false;
}
template <typename T>
CellError try_unlock(T& cpu)
{
auto it = control.load();
if (it.owner != cpu.id)
{
return CELL_EPERM;
}
if (lock_count)
{
lock_count--;
return {};
}
if (!it.sq)
{
auto store = it;
store.owner = 0;
if (control.compare_and_swap_test(it, store))
{
return {};
}
}
return CELL_EBUSY;
}
template <typename T>
T* reown()
{
T* res{};
control.fetch_op([&](control_data_t& data)
{
res = nullptr;
if (auto sq = static_cast<T*>(data.sq))
{
res = schedule<T>(data.sq, protocol, false);
if (sq == data.sq)
{
atomic_storage<u32>::release(control.raw().owner, res->id);
return false;
}
data.owner = res->id;
return true;
}
else
{
data.owner = 0;
return true;
}
});
if (res && cpu_flag::again - res->state)
{
// Detach manually (fetch_op can fail, so avoid side-effects on the first node in this case)
res->next_cpu = nullptr;
}
return res;
}
};
// Syscalls
error_code sys_mutex_create(ppu_thread& ppu, vm::ptr<u32> mutex_id, vm::ptr<sys_mutex_attribute_t> attr);
error_code sys_mutex_destroy(ppu_thread& ppu, u32 mutex_id);
error_code sys_mutex_lock(ppu_thread& ppu, u32 mutex_id, u64 timeout);
error_code sys_mutex_trylock(ppu_thread& ppu, u32 mutex_id);
error_code sys_mutex_unlock(ppu_thread& ppu, u32 mutex_id);
| 3,602
|
C++
|
.h
| 168
| 18.119048
| 105
| 0.650957
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
5,931
|
sys_bdemu.h
|
RPCS3_rpcs3/rpcs3/Emu/Cell/lv2/sys_bdemu.h
|
#pragma once
#include "Emu/Memory/vm_ptr.h"
#include "Emu/Cell/ErrorCodes.h"
// SysCalls
error_code sys_bdemu_send_command(u64 cmd, u64 a2, u64 a3, vm::ptr<void> buf, u64 buf_len);
| 184
|
C++
|
.h
| 5
| 35.2
| 91
| 0.738636
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
5,932
|
sys_overlay.h
|
RPCS3_rpcs3/rpcs3/Emu/Cell/lv2/sys_overlay.h
|
#pragma once
#include "Emu/Cell/PPUAnalyser.h"
#include "Emu/Memory/vm_ptr.h"
#include "sys_sync.h"
#include <vector>
struct lv2_overlay final : lv2_obj, ppu_module
{
static const u32 id_base = 0x25000000;
u32 entry{};
u32 seg0_code_end{};
std::vector<u32> applied_patches;
lv2_overlay() = default;
lv2_overlay(utils::serial&){}
static std::shared_ptr<void> load(utils::serial& ar);
void save(utils::serial& ar);
};
error_code sys_overlay_load_module(vm::ptr<u32> ovlmid, vm::cptr<char> path, u64 flags, vm::ptr<u32> entry);
error_code sys_overlay_load_module_by_fd(vm::ptr<u32> ovlmid, u32 fd, u64 offset, u64 flags, vm::ptr<u32> entry);
error_code sys_overlay_unload_module(u32 ovlmid);
//error_code sys_overlay_get_module_list(sys_pid_t pid, usz ovlmids_num, sys_overlay_t * ovlmids, usz * num_of_modules);
//error_code sys_overlay_get_module_info(sys_pid_t pid, sys_overlay_t ovlmid, sys_overlay_module_info_t * info);
//error_code sys_overlay_get_module_info2(sys_pid_t pid, sys_overlay_t ovlmid, sys_overlay_module_info2_t * info);//
//error_code sys_overlay_get_sdk_version(); //2 params
//error_code sys_overlay_get_module_dbg_info(); //3 params?
//error_code _sys_prx_load_module(vm::ps3::cptr<char> path, u64 flags, vm::ps3::ptr<sys_prx_load_module_option_t> pOpt);
| 1,290
|
C++
|
.h
| 25
| 50.04
| 120
| 0.732327
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
5,933
|
sys_io.h
|
RPCS3_rpcs3/rpcs3/Emu/Cell/lv2/sys_io.h
|
#pragma once
#include "Emu/Memory/vm_ptr.h"
struct lv2_io_buf
{
static const u32 id_base = 0x44000000;
static const u32 id_step = 1;
static const u32 id_count = 2048;
SAVESTATE_INIT_POS(41);
const u32 block_count;
const u32 block_size;
const u32 blocks;
const u32 unk1;
lv2_io_buf(u32 block_count, u32 block_size, u32 blocks, u32 unk1)
: block_count(block_count)
, block_size(block_size)
, blocks(blocks)
, unk1(unk1)
{
}
};
// SysCalls
error_code sys_io_buffer_create(u32 block_count, u32 block_size, u32 blocks, u32 unk1, vm::ptr<u32> handle);
error_code sys_io_buffer_destroy(u32 handle);
error_code sys_io_buffer_allocate(u32 handle, vm::ptr<u32> block);
error_code sys_io_buffer_free(u32 handle, u32 block);
| 738
|
C++
|
.h
| 25
| 27.52
| 108
| 0.74116
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
5,934
|
sys_tty.h
|
RPCS3_rpcs3/rpcs3/Emu/Cell/lv2/sys_tty.h
|
#pragma once
#include "Emu/Memory/vm_ptr.h"
#include "Emu/Cell/ErrorCodes.h"
// TTY channels
enum
{
SYS_TTYP_PPU_STDIN = 0,
SYS_TTYP_PPU_STDOUT = 0,
SYS_TTYP_PPU_STDERR = 1,
SYS_TTYP_SPU_STDOUT = 2,
SYS_TTYP_USER1 = 3,
SYS_TTYP_USER2 = 4,
SYS_TTYP_USER3 = 5,
SYS_TTYP_USER4 = 6,
SYS_TTYP_USER5 = 7,
SYS_TTYP_USER6 = 8,
SYS_TTYP_USER7 = 9,
SYS_TTYP_USER8 = 10,
SYS_TTYP_USER9 = 11,
SYS_TTYP_USER10 = 12,
SYS_TTYP_USER11 = 13,
SYS_TTYP_USER12 = 14,
SYS_TTYP_USER13 = 15,
};
class ppu_thread;
// SysCalls
error_code sys_tty_read(s32 ch, vm::ptr<char> buf, u32 len, vm::ptr<u32> preadlen);
error_code sys_tty_write(ppu_thread& ppu, s32 ch, vm::cptr<char> buf, u32 len, vm::ptr<u32> pwritelen);
| 790
|
C++
|
.h
| 28
| 26.464286
| 103
| 0.621372
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
5,935
|
sys_config.h
|
RPCS3_rpcs3/rpcs3/Emu/Cell/lv2/sys_config.h
|
#pragma once
#include <map>
#include <list>
/*
* sys_config is a "subscription-based data storage API"
*
* It has the concept of services and listeners. Services provide data, listeners subscribe to registration/unregistration events from specific services.
*
* Services are divided into two classes: LV2 services (positive service IDs) and User services (negative service IDs).
* LV2 services seem to be implictly "available", probably constructed on-demand with internal LV2 code generating the data. An example is PadManager (service ID 0x11).
* User services may be registered through a syscall, and have negative IDs. An example is libPad (service ID 0x8000'0000'0000'0001).
* Note that user-mode *cannot* register positive service IDs.
*
* To start with, you have to get a sys_config handle by calling sys_config_open and providing an event queue.
* This event queue will be used for sys_config notifications if a subscribed config event is registered.
*
* With a sys_config handle, listeners can be added to specific services using sys_config_add_service_listener.
* This syscall returns a service listener handle, which can be used to close the listener and stop further notifications.
* Once subscribed, any matching past service registrations will be automatically sent to the supplied queue (thus the "data storage").
*
* Services exist "implicitly", and data may be registered *onto* a service by calling sys_config_register_service.
* You can remove config events by calling sys_config_unregister_service and providing the handle returned when registering a service.
*
* If a service is registered (or unregistered) and matches any active listener, that listener will get an event sent to the event queue provided in the call to sys_config_open.
*
* This event will contain the type of config event ("service event" or "IO event", in event.source),
* the corresponding sys_config handle (event.data1), the config event ID (event.data2 & 0xffff'ffff),
* whether the service was registered or unregistered ('data2 >> 32'), and what buffer size will be needed to read the corresponding service event (event.data3).
*
* NOTE: if multiple listeners exist, each gets a separate event ID even though all events are the same!
*
* After receiving such an event from the event queue, the user should allocate enough buffer and call sys_config_get_service_event
* (or sys_config_io_event) with the given event ID, in order to obtain a sys_config_service_event_t (or sys_config_io_event_t) structure
* with the contents of the service that was (un)registered.
*/
class lv2_config_handle;
class lv2_config_service;
class lv2_config_service_listener;
class lv2_config_service_event;
// Known sys_config service IDs
enum sys_config_service_id : s64 {
SYS_CONFIG_SERVICE_PADMANAGER = 0x11,
SYS_CONFIG_SERVICE_PADMANAGER2 = 0x12, // lv2 seems to send padmanager events to both 0x11 and 0x12
SYS_CONFIG_SERVICE_0x20 = 0x20,
SYS_CONFIG_SERVICE_0x30 = 0x30,
SYS_CONFIG_SERVICE_USER_BASE = static_cast<s64>(UINT64_C(0x8000'0000'0000'0000)),
SYS_CONFIG_SERVICE_USER_LIBPAD = SYS_CONFIG_SERVICE_USER_BASE + 1,
SYS_CONFIG_SERVICE_USER_LIBKB = SYS_CONFIG_SERVICE_USER_BASE + 2,
SYS_CONFIG_SERVICE_USER_LIBMOUSE = SYS_CONFIG_SERVICE_USER_BASE + 3,
SYS_CONFIG_SERVICE_USER_0x1000 = SYS_CONFIG_SERVICE_USER_BASE + 0x1000,
SYS_CONFIG_SERVICE_USER_0x1010 = SYS_CONFIG_SERVICE_USER_BASE + 0x1010,
SYS_CONFIG_SERVICE_USER_0x1011 = SYS_CONFIG_SERVICE_USER_BASE + 0x1011,
SYS_CONFIG_SERVICE_USER_0x1013 = SYS_CONFIG_SERVICE_USER_BASE + 0x1013,
SYS_CONFIG_SERVICE_USER_0x1020 = SYS_CONFIG_SERVICE_USER_BASE + 0x1020,
SYS_CONFIG_SERVICE_USER_0x1030 = SYS_CONFIG_SERVICE_USER_BASE + 0x1030,
};
enum sys_config_service_listener_type : u32 {
SYS_CONFIG_SERVICE_LISTENER_ONCE = 0,
SYS_CONFIG_SERVICE_LISTENER_REPEATING = 1
};
enum sys_config_event_source : u64 {
SYS_CONFIG_EVENT_SOURCE_SERVICE = 1,
SYS_CONFIG_EVENT_SOURCE_IO = 2
};
/*
* Dynamic-sized struct to describe a sys_config_service_event
* We never allocate it - the guest does it for us and provides a pointer
*/
struct sys_config_service_event_t {
// Handle to the service listener for whom this event is destined
be_t<u32> service_listener_handle;
// 1 if this service is currently registered or unregistered
be_t<u32> registered;
// Service ID that triggered this event
be_t<u64> service_id;
// Custom ID provided by the user, used to uniquely identify service events (provided to sys_config_register_event)
// When a service is unregistered, this is the only value available to distinguish which service event was unregistered.
be_t<u64> user_id;
/* if added==0, the structure ends here */
// Verbosity of this service event (provided to sys_config_register_event)
be_t<u64> verbosity;
// Size of 'data'
be_t<u32> data_size;
// Ignored, seems to be simply 32-bits of padding
be_t<u32> padding;
// Buffer containing event data (copy of the buffer supplied to sys_config_register_service)
// NOTE: This buffer size is dynamic, according to 'data_size', and can be 0. Here it is set to 1 since zero-sized buffers are not standards-compliant
u8 data[1];
};
/*
* Event data structure for SYS_CONFIG_SERVICE_PADMANAGER
* This is a guess
*/
struct sys_config_padmanager_data_t {
be_t<u16> unk[5]; // hid device type ?
be_t<u16> vid;
be_t<u16> pid;
be_t<u16> unk2[6]; // bluetooth address?
};
static_assert(sizeof(sys_config_padmanager_data_t) == 26);
/*
* Global sys_config state
*/
class lv2_config
{
atomic_t<u32> m_state = 0;
// LV2 Config mutex
shared_mutex m_mutex;
// Map of LV2 Service Events
std::unordered_map<u32, std::weak_ptr<lv2_config_service_event>> events;
public:
void initialize();
// Service Events
void add_service_event(const std::shared_ptr<lv2_config_service_event>& event);
void remove_service_event(u32 id);
std::shared_ptr<lv2_config_service_event> find_event(u32 id)
{
reader_lock lock(m_mutex);
const auto it = events.find(id);
if (it == events.cend())
return nullptr;
if (auto event = it->second.lock())
{
return event;
}
return nullptr;
}
};
/*
* LV2 Config Handle object, managed by IDM
*/
class lv2_config_handle
{
public:
static const u32 id_base = 0x41000000;
static const u32 id_step = 0x100;
static const u32 id_count = 2048;
SAVESTATE_INIT_POS(37);
private:
u32 idm_id;
// queue for service/io event notifications
const std::weak_ptr<lv2_event_queue> queue;
bool send_queue_event(u64 source, u64 d1, u64 d2, u64 d3) const
{
if (auto sptr = queue.lock())
{
return sptr->send(source, d1, d2, d3) == 0;
}
return false;
}
public:
// Constructors (should not be used directly)
lv2_config_handle(std::weak_ptr<lv2_event_queue>&& _queue)
: queue(std::move(_queue))
{}
// Factory
template <typename... Args>
static std::shared_ptr<lv2_config_handle> create(Args&&... args)
{
if (auto cfg = idm::make_ptr<lv2_config_handle>(std::forward<Args>(args)...))
{
cfg->idm_id = idm::last_id();
return cfg;
}
return nullptr;
}
// Notify event queue for this handle
bool notify(u64 source, u64 data2, u64 data3) const
{
return send_queue_event(source, idm_id, data2, data3);
}
};
/*
* LV2 Service object, managed by IDM
*/
class lv2_config_service
{
public:
static const u32 id_base = 0x43000000;
static const u32 id_step = 0x100;
static const u32 id_count = 2048;
SAVESTATE_INIT_POS(38);
private:
// IDM data
u32 idm_id;
std::weak_ptr<lv2_config_service> wkptr;
// Whether this service is currently registered or not
bool registered = true;
public:
const u64 timestamp;
const sys_config_service_id id;
const u64 user_id;
const u64 verbosity;
const u32 padding; // not used, but stored here just in case
const std::vector<u8> data;
// Constructors (should not be used directly)
lv2_config_service(sys_config_service_id _id, u64 _user_id, u64 _verbosity, u32 _padding, const u8 _data[], usz size)
: timestamp(get_system_time())
, id(_id)
, user_id(_user_id)
, verbosity(_verbosity)
, padding(_padding)
, data(&_data[0], &_data[size])
{}
// Factory
template <typename... Args>
static std::shared_ptr<lv2_config_service> create(Args&&... args)
{
if (auto service = idm::make_ptr<lv2_config_service>(std::forward<Args>(args)...))
{
service->wkptr = service;
service->idm_id = idm::last_id();
return service;
}
return nullptr;
}
// Registration
bool is_registered() const { return registered; }
void unregister();
// Notify listeners
void notify() const;
// Utilities
usz get_size() const { return sizeof(sys_config_service_event_t)-1 + data.size(); }
std::shared_ptr<lv2_config_service> get_shared_ptr () const { return wkptr.lock(); }
u32 get_id() const { return idm_id; }
};
/*
* LV2 Service Event Listener object, managed by IDM
*/
class lv2_config_service_listener
{
public:
static const u32 id_base = 0x42000000;
static const u32 id_step = 0x100;
static const u32 id_count = 2048;
SAVESTATE_INIT_POS(39);
private:
// IDM data
u32 idm_id;
std::weak_ptr<lv2_config_service_listener> wkptr;
// The service listener owns the service events - service events will not be freed as long as their corresponding listener exists
// This has been confirmed to be the case in realhw
std::vector<std::shared_ptr<lv2_config_service_event>> service_events;
std::weak_ptr<lv2_config_handle> handle;
bool notify(const std::shared_ptr<lv2_config_service_event>& event);
public:
const sys_config_service_id service_id;
const u64 min_verbosity;
const sys_config_service_listener_type type;
const std::vector<u8> data;
// Constructors (should not be used directly)
lv2_config_service_listener(std::shared_ptr<lv2_config_handle>& _handle, sys_config_service_id _service_id, u64 _min_verbosity, sys_config_service_listener_type _type, const u8 _data[], usz size)
: handle(_handle)
, service_id(_service_id)
, min_verbosity(_min_verbosity)
, type(_type)
, data(&_data[0], &_data[size])
{}
// Factory
template <typename... Args>
static std::shared_ptr<lv2_config_service_listener> create(Args&&... args)
{
if (auto listener = idm::make_ptr<lv2_config_service_listener>(std::forward<Args>(args)...))
{
listener->wkptr = listener;
listener->idm_id = idm::last_id();
return listener;
}
return nullptr;
}
// Check whether service matches
bool check_service(const lv2_config_service& service) const;
// Register new event, and notify queue
bool notify(const std::shared_ptr<lv2_config_service>& service);
// (Re-)notify about all still-registered past events
void notify_all();
// Utilities
u32 get_id() const { return idm_id; }
std::shared_ptr<lv2_config_service_listener> get_shared_ptr() const { return wkptr.lock(); }
};
/*
* LV2 Service Event object (*not* managed by IDM)
*/
class lv2_config_service_event
{
static u32 get_next_id()
{
struct service_event_id
{
atomic_t<u32> next_id = 0;
};
return g_fxo->get<service_event_id>().next_id++;
}
public:
const u32 id;
// Note: Events hold a shared_ptr to their corresponding service - services only get freed once there are no more pending service events
// This has been confirmed to be the case in realhw
const std::weak_ptr<lv2_config_handle> handle;
const std::shared_ptr<lv2_config_service> service;
const lv2_config_service_listener& listener;
// Constructors (should not be used directly)
lv2_config_service_event(const std::weak_ptr<lv2_config_handle>& _handle, const std::shared_ptr<lv2_config_service>& _service, const lv2_config_service_listener& _listener)
: id(get_next_id())
, handle(_handle)
, service(_service)
, listener(_listener)
{}
lv2_config_service_event(const std::weak_ptr<lv2_config_handle>&& _handle, const std::shared_ptr<lv2_config_service>&& _service, const lv2_config_service_listener& _listener)
: id(get_next_id())
, handle(std::move(_handle))
, service(std::move(_service))
, listener(_listener)
{}
// Factory
template <typename... Args>
static std::shared_ptr<lv2_config_service_event> create(Args&&... args)
{
auto ev = std::make_shared<lv2_config_service_event>(std::forward<Args>(args)...);
g_fxo->get<lv2_config>().add_service_event(ev);
return ev;
}
// Destructor
~lv2_config_service_event()
{
if (auto global = g_fxo->try_get<lv2_config>())
{
global->remove_service_event(id);
}
}
// Notify queue that this event exists
bool notify() const;
// Write event to buffer
void write(sys_config_service_event_t *dst) const;
// Check if the buffer can fit the current event, return false otherwise
bool check_buffer_size(usz size) const { return service->get_size() <= size; }
};
/*
* Syscalls
*/
/*516*/ error_code sys_config_open(u32 equeue_hdl, vm::ptr<u32> out_config_hdl);
/*517*/ error_code sys_config_close(u32 config_hdl);
/*518*/ error_code sys_config_get_service_event(u32 config_hdl, u32 event_id, vm::ptr<sys_config_service_event_t> dst, u64 size);
/*519*/ error_code sys_config_add_service_listener(u32 config_hdl, sys_config_service_id service_id, u64 min_verbosity, vm::ptr<void> in, u64 size, sys_config_service_listener_type type, vm::ptr<u32> out_listener_hdl);
/*520*/ error_code sys_config_remove_service_listener(u32 config_hdl, u32 listener_hdl);
/*521*/ error_code sys_config_register_service(u32 config_hdl, sys_config_service_id service_id, u64 user_id, u64 verbosity, vm::ptr<u8> data_buf, u64 size, vm::ptr<u32> out_service_hdl);
/*522*/ error_code sys_config_unregister_service(u32 config_hdl, u32 service_hdl);
// Following syscalls have not been REd yet
/*523*/ error_code sys_config_get_io_event(u32 config_hdl, u32 event_id /*?*/, vm::ptr<void> out_buf /*?*/, u64 size /*?*/);
/*524*/ error_code sys_config_register_io_error_listener(u32 config_hdl);
/*525*/ error_code sys_config_unregister_io_error_listener(u32 config_hdl);
| 13,968
|
C++
|
.h
| 350
| 37.64
| 218
| 0.7358
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
5,936
|
sys_rsxaudio.h
|
RPCS3_rpcs3/rpcs3/Emu/Cell/lv2/sys_rsxaudio.h
|
#pragma once
#include "sys_sync.h"
#include "sys_event.h"
#include "Utilities/simple_ringbuf.h"
#include "Utilities/transactional_storage.h"
#include "Utilities/cond.h"
#include "Emu/system_config_types.h"
#include "Emu/Memory/vm_ptr.h"
#include "Emu/Cell/ErrorCodes.h"
#include "Emu/Audio/AudioDumper.h"
#include "Emu/Audio/AudioBackend.h"
#include "Emu/Audio/audio_resampler.h"
#if defined(unix) || defined(__unix) || defined(__unix__)
// For BSD detection
#include <sys/param.h>
#endif
#ifdef _WIN32
#include <windows.h>
#elif defined(BSD) || defined(__APPLE__)
#include <sys/event.h>
#endif
enum : u32
{
SYS_RSXAUDIO_SERIAL_STREAM_CNT = 4,
SYS_RSXAUDIO_STREAM_DATA_BLK_CNT = 4,
SYS_RSXAUDIO_DATA_BLK_SIZE = 256,
SYS_RSXAUDIO_STREAM_SIZE = SYS_RSXAUDIO_DATA_BLK_SIZE * SYS_RSXAUDIO_STREAM_DATA_BLK_CNT,
SYS_RSXAUDIO_CH_PER_STREAM = 2,
SYS_RSXAUDIO_SERIAL_MAX_CH = 8,
SYS_RSXAUDIO_SPDIF_MAX_CH = 2,
SYS_RSXAUDIO_STREAM_SAMPLE_CNT = SYS_RSXAUDIO_STREAM_SIZE / SYS_RSXAUDIO_CH_PER_STREAM / sizeof(f32),
SYS_RSXAUDIO_RINGBUF_BLK_SZ_SERIAL = SYS_RSXAUDIO_STREAM_SIZE * SYS_RSXAUDIO_SERIAL_STREAM_CNT,
SYS_RSXAUDIO_RINGBUF_BLK_SZ_SPDIF = SYS_RSXAUDIO_STREAM_SIZE,
SYS_RSXAUDIO_RINGBUF_SZ = 16,
SYS_RSXAUDIO_AVPORT_CNT = 5,
SYS_RSXAUDIO_FREQ_BASE_384K = 384000,
SYS_RSXAUDIO_FREQ_BASE_352K = 352800,
SYS_RSXAUDIO_PORT_CNT = 3,
SYS_RSXAUDIO_SPDIF_CNT = 2,
};
enum class RsxaudioAvportIdx : u8
{
HDMI_0 = 0,
HDMI_1 = 1,
AVMULTI = 2,
SPDIF_0 = 3,
SPDIF_1 = 4,
};
enum class RsxaudioPort : u8
{
SERIAL = 0,
SPDIF_0 = 1,
SPDIF_1 = 2,
INVALID = 0xFF,
};
enum class RsxaudioSampleSize : u8
{
_16BIT = 2,
_32BIT = 4,
};
struct rsxaudio_shmem
{
struct ringbuf_t
{
struct entry_t
{
be_t<u32> valid{};
be_t<u32> unk1{};
be_t<u64> audio_blk_idx{};
be_t<u64> timestamp{};
be_t<u32> buf_addr{};
be_t<u32> dma_addr{};
};
be_t<u32> active{};
be_t<u32> unk2{};
be_t<s32> read_idx{};
be_t<u32> write_idx{};
be_t<s32> rw_max_idx{};
be_t<s32> queue_notify_idx{};
be_t<s32> queue_notify_step{};
be_t<u32> unk6{};
be_t<u32> dma_silence_addr{};
be_t<u32> unk7{};
be_t<u64> next_blk_idx{};
entry_t entries[16]{};
};
struct uf_event_t
{
be_t<u64> unk1{};
be_t<u32> uf_event_cnt{};
u8 unk2[244]{};
};
struct ctrl_t
{
ringbuf_t ringbuf[SYS_RSXAUDIO_PORT_CNT]{};
be_t<u32> unk1{};
be_t<u32> event_queue_1_id{};
u8 unk2[16]{};
be_t<u32> event_queue_2_id{};
be_t<u32> spdif_ch0_channel_data_lo{};
be_t<u32> spdif_ch0_channel_data_hi{};
be_t<u32> spdif_ch0_channel_data_tx_cycles{};
be_t<u32> unk3{};
be_t<u32> event_queue_3_id{};
be_t<u32> spdif_ch1_channel_data_lo{};
be_t<u32> spdif_ch1_channel_data_hi{};
be_t<u32> spdif_ch1_channel_data_tx_cycles{};
be_t<u32> unk4{};
be_t<u32> intr_thread_prio{};
be_t<u32> unk5{};
u8 unk6[248]{};
uf_event_t channel_uf[SYS_RSXAUDIO_PORT_CNT]{};
u8 pad[0x3530]{};
};
u8 dma_serial_region[0x10000]{};
u8 dma_spdif_0_region[0x4000]{};
u8 dma_spdif_1_region[0x4000]{};
u8 dma_silence_region[0x4000]{};
ctrl_t ctrl{};
};
static_assert(sizeof(rsxaudio_shmem::ringbuf_t) == 0x230U, "rsxAudioRingBufSizeTest");
static_assert(sizeof(rsxaudio_shmem::uf_event_t) == 0x100U, "rsxAudioUfEventTest");
static_assert(sizeof(rsxaudio_shmem::ctrl_t) == 0x4000U, "rsxAudioCtrlSizeTest");
static_assert(sizeof(rsxaudio_shmem) == 0x20000U, "rsxAudioShmemSizeTest");
enum rsxaudio_dma_flag : u32
{
IO_BASE = 0,
IO_ID = 1
};
struct lv2_rsxaudio final : lv2_obj
{
static constexpr u32 id_base = 0x60000000;
static constexpr u64 dma_io_id = 1;
static constexpr u32 dma_io_base = 0x30000000;
shared_mutex mutex{};
bool init = false;
vm::addr_t shmem{};
std::array<std::shared_ptr<lv2_event_queue>, SYS_RSXAUDIO_PORT_CNT> event_queue{};
// lv2 uses port memory addresses for their names
static constexpr std::array<u64, SYS_RSXAUDIO_PORT_CNT> event_port_name{ 0x8000000000400100, 0x8000000000400200, 0x8000000000400300 };
lv2_rsxaudio() noexcept = default;
lv2_rsxaudio(utils::serial& ar) noexcept; void save(utils::serial& ar);
void page_lock()
{
ensure(shmem && vm::page_protect(shmem, sizeof(rsxaudio_shmem), 0, 0, vm::page_readable | vm::page_writable | vm::page_executable));
}
void page_unlock()
{
ensure(shmem && vm::page_protect(shmem, sizeof(rsxaudio_shmem), 0, vm::page_readable | vm::page_writable));
}
rsxaudio_shmem* get_rw_shared_page() const
{
return reinterpret_cast<rsxaudio_shmem*>(vm::g_sudo_addr + u32{shmem});
}
};
class rsxaudio_periodic_tmr
{
public:
enum class wait_result
{
SUCCESS,
INVALID_PARAM,
TIMEOUT,
TIMER_ERROR,
TIMER_CANCELED,
};
rsxaudio_periodic_tmr();
~rsxaudio_periodic_tmr();
rsxaudio_periodic_tmr(const rsxaudio_periodic_tmr&) = delete;
rsxaudio_periodic_tmr& operator=(const rsxaudio_periodic_tmr&) = delete;
// Wait until timer fires and calls callback.
wait_result wait(const std::function<void()> &callback);
// Cancel wait() call
void cancel_wait();
// VTimer funtions
void vtimer_access_sec(std::invocable<> auto func)
{
std::lock_guard lock(mutex);
std::invoke(func);
// Adjust timer expiration
cancel_timer_unlocked();
sched_timer();
}
void enable_vtimer(u32 vtimer_id, u32 rate, u64 crnt_time);
void disable_vtimer(u32 vtimer_id);
bool is_vtimer_behind(u32 vtimer_id, u64 crnt_time) const;
void vtimer_skip_periods(u32 vtimer_id, u64 crnt_time);
void vtimer_incr(u32 vtimer_id, u64 crnt_time);
bool is_vtimer_active(u32 vtimer_id) const;
u64 vtimer_get_sched_time(u32 vtimer_id) const;
private:
static constexpr u64 MAX_BURST_PERIODS = SYS_RSXAUDIO_RINGBUF_SZ;
static constexpr u32 VTIMER_MAX = 4;
struct vtimer
{
u64 blk_cnt = 0;
f64 blk_time = 0.0;
bool active = false;
};
std::array<vtimer, VTIMER_MAX> vtmr_pool{};
shared_mutex mutex{};
bool in_wait = false;
bool zero_period = false;
#if defined(_WIN32)
HANDLE cancel_event{};
HANDLE timer_handle{};
#elif defined(__linux__)
int cancel_event{};
int timer_handle{};
int epoll_fd{};
#elif defined(BSD) || defined(__APPLE__)
static constexpr u64 TIMER_ID = 0;
static constexpr u64 CANCEL_ID = 1;
int kq{};
struct kevent handle[2]{};
#else
#error "Implement"
#endif
void sched_timer();
void cancel_timer_unlocked();
void reset_cancel_flag();
bool is_vtimer_behind(const vtimer& vtimer, u64 crnt_time) const;
u64 get_crnt_blk(u64 crnt_time, f64 blk_time) const;
f64 get_blk_time(u32 data_rate) const;
u64 get_rel_next_time();
};
struct rsxaudio_hw_param_t
{
struct serial_param_t
{
bool dma_en = false;
bool buf_empty_en = false;
bool muted = true;
bool en = false;
u8 freq_div = 8;
RsxaudioSampleSize depth = RsxaudioSampleSize::_16BIT;
};
struct spdif_param_t
{
bool dma_en = false;
bool buf_empty_en = false;
bool muted = true;
bool en = false;
bool use_serial_buf = true;
u8 freq_div = 8;
RsxaudioSampleSize depth = RsxaudioSampleSize::_16BIT;
std::array<u8, 6> cs_data = { 0x00, 0x90, 0x00, 0x40, 0x80, 0x00 }; // HW supports only 6 bytes (uart pkt has 8)
};
struct hdmi_param_t
{
struct hdmi_ch_cfg_t
{
std::array<u8, SYS_RSXAUDIO_SERIAL_MAX_CH> map{};
AudioChannelCnt total_ch_cnt = AudioChannelCnt::STEREO;
};
static constexpr u8 MAP_SILENT_CH = umax;
bool init = false;
hdmi_ch_cfg_t ch_cfg{};
std::array<u8, 5> info_frame{}; // TODO: check chstat and info_frame for info on audio layout, add default values
std::array<u8, 5> chstat{};
bool muted = true;
bool force_mute = true;
bool use_spdif_1 = false; // TODO: unused for now
};
u32 serial_freq_base = SYS_RSXAUDIO_FREQ_BASE_384K;
u32 spdif_freq_base = SYS_RSXAUDIO_FREQ_BASE_352K;
bool avmulti_av_muted = true;
serial_param_t serial{};
spdif_param_t spdif[2]{};
hdmi_param_t hdmi[2]{};
std::array<RsxaudioPort, SYS_RSXAUDIO_AVPORT_CNT> avport_src =
{
RsxaudioPort::INVALID,
RsxaudioPort::INVALID,
RsxaudioPort::INVALID,
RsxaudioPort::INVALID,
RsxaudioPort::INVALID
};
};
// 16-bit PCM converted into float, so buffer must be twice as big
using ra_stream_blk_t = std::array<f32, SYS_RSXAUDIO_STREAM_SAMPLE_CNT * 2>;
class rsxaudio_data_container
{
public:
struct buf_t
{
std::array<ra_stream_blk_t, SYS_RSXAUDIO_SERIAL_MAX_CH> serial{};
std::array<ra_stream_blk_t, SYS_RSXAUDIO_SPDIF_MAX_CH> spdif[SYS_RSXAUDIO_SPDIF_CNT]{};
};
using data_blk_t = std::array<f32, SYS_RSXAUDIO_STREAM_SAMPLE_CNT * SYS_RSXAUDIO_SERIAL_MAX_CH * 2>;
rsxaudio_data_container(const rsxaudio_hw_param_t& hw_param, const buf_t& buf, bool serial_rdy, bool spdif_0_rdy, bool spdif_1_rdy);
u32 get_data_size(RsxaudioAvportIdx avport);
void get_data(RsxaudioAvportIdx avport, data_blk_t& data_out);
bool data_was_used();
private:
const rsxaudio_hw_param_t& hwp;
const buf_t& out_buf;
std::array<bool, 5> avport_data_avail{};
u8 hdmi_stream_cnt[2]{};
bool data_was_written = false;
rsxaudio_data_container(const rsxaudio_data_container&) = delete;
rsxaudio_data_container& operator=(const rsxaudio_data_container&) = delete;
rsxaudio_data_container(rsxaudio_data_container&&) = delete;
rsxaudio_data_container& operator=(rsxaudio_data_container&&) = delete;
// Mix individual channels into final PCM stream. Channels in channel map that are > input_ch_cnt treated as silent.
template<usz output_ch_cnt, usz input_ch_cnt>
requires (output_ch_cnt > 0 && output_ch_cnt <= 8 && input_ch_cnt > 0)
constexpr void mix(const std::array<u8, 8> &ch_map, RsxaudioSampleSize sample_size, const std::array<ra_stream_blk_t, input_ch_cnt> &input_channels, data_blk_t& data_out)
{
const ra_stream_blk_t silent_channel{};
// Build final map
std::array<const ra_stream_blk_t*, output_ch_cnt> real_input_ch = {};
for (u64 ch_idx = 0; ch_idx < output_ch_cnt; ch_idx++)
{
if (ch_map[ch_idx] >= input_ch_cnt)
{
real_input_ch[ch_idx] = &silent_channel;
}
else
{
real_input_ch[ch_idx] = &input_channels[ch_map[ch_idx]];
}
}
const u32 samples_in_buf = sample_size == RsxaudioSampleSize::_16BIT ? SYS_RSXAUDIO_STREAM_SAMPLE_CNT * 2 : SYS_RSXAUDIO_STREAM_SAMPLE_CNT;
for (u32 sample_idx = 0; sample_idx < samples_in_buf * output_ch_cnt; sample_idx += output_ch_cnt)
{
const u32 src_sample_idx = sample_idx / output_ch_cnt;
if constexpr (output_ch_cnt >= 1) data_out[sample_idx + 0] = (*real_input_ch[0])[src_sample_idx];
if constexpr (output_ch_cnt >= 2) data_out[sample_idx + 1] = (*real_input_ch[1])[src_sample_idx];
if constexpr (output_ch_cnt >= 3) data_out[sample_idx + 2] = (*real_input_ch[2])[src_sample_idx];
if constexpr (output_ch_cnt >= 4) data_out[sample_idx + 3] = (*real_input_ch[3])[src_sample_idx];
if constexpr (output_ch_cnt >= 5) data_out[sample_idx + 4] = (*real_input_ch[4])[src_sample_idx];
if constexpr (output_ch_cnt >= 6) data_out[sample_idx + 5] = (*real_input_ch[5])[src_sample_idx];
if constexpr (output_ch_cnt >= 7) data_out[sample_idx + 6] = (*real_input_ch[6])[src_sample_idx];
if constexpr (output_ch_cnt >= 8) data_out[sample_idx + 7] = (*real_input_ch[7])[src_sample_idx];
}
}
};
namespace audio
{
void configure_rsxaudio();
}
class rsxaudio_backend_thread
{
public:
struct port_config
{
AudioFreq freq = AudioFreq::FREQ_48K;
AudioChannelCnt ch_cnt = AudioChannelCnt::STEREO;
auto operator<=>(const port_config&) const = default;
};
struct avport_bit
{
bool hdmi_0 : 1;
bool hdmi_1 : 1;
bool avmulti : 1;
bool spdif_0 : 1;
bool spdif_1 : 1;
};
rsxaudio_backend_thread();
~rsxaudio_backend_thread();
void operator()();
rsxaudio_backend_thread& operator=(thread_state state);
void set_new_stream_param(const std::array<port_config, SYS_RSXAUDIO_AVPORT_CNT> &cfg, avport_bit muted_avports);
void set_mute_state(avport_bit muted_avports);
void add_data(rsxaudio_data_container& cont);
void update_emu_cfg();
u32 get_sample_rate() const;
u8 get_channel_count() const;
static constexpr auto thread_name = "RsxAudio Backend Thread"sv;
SAVESTATE_INIT_POS(8.91); // Depends on audio_out_configuration
private:
struct emu_audio_cfg
{
std::string audio_device{};
s64 desired_buffer_duration = 0;
f64 time_stretching_threshold = 0;
bool buffering_enabled = false;
bool convert_to_s16 = false;
bool enable_time_stretching = false;
bool dump_to_file = false;
AudioChannelCnt channels = AudioChannelCnt::STEREO;
audio_channel_layout channel_layout = audio_channel_layout::automatic;
audio_renderer renderer = audio_renderer::null;
audio_provider provider = audio_provider::none;
RsxaudioAvportIdx avport = RsxaudioAvportIdx::HDMI_0;
auto operator<=>(const emu_audio_cfg&) const = default;
};
struct rsxaudio_state
{
std::array<port_config, SYS_RSXAUDIO_AVPORT_CNT> port{};
};
struct alignas(16) callback_config
{
static constexpr u16 VOL_NOMINAL = 10000;
static constexpr f32 VOL_NOMINAL_INV = 1.0f / VOL_NOMINAL;
u32 freq : 20 = 48000;
u16 target_volume = 10000;
u16 initial_volume = 10000;
u16 current_volume = 10000;
RsxaudioAvportIdx avport_idx = RsxaudioAvportIdx::HDMI_0;
u8 mute_state : SYS_RSXAUDIO_AVPORT_CNT = 0b11111;
u8 input_ch_cnt : 4 = 2;
u8 output_channel_layout : 4 = static_cast<u8>(audio_channel_layout::stereo);
bool ready : 1 = false;
bool convert_to_s16 : 1 = false;
bool cfg_changed : 1 = false;
bool callback_active : 1 = false;
};
static_assert(sizeof(callback_config) <= 16);
struct backend_config
{
port_config cfg{};
RsxaudioAvportIdx avport = RsxaudioAvportIdx::HDMI_0;
};
static constexpr u64 ERROR_SERVICE_PERIOD = 500'000;
static constexpr u64 SERVICE_PERIOD = 10'000;
static constexpr f64 SERVICE_PERIOD_SEC = SERVICE_PERIOD / 1'000'000.0;
static constexpr u64 SERVICE_THRESHOLD = 1'500;
static constexpr f64 TIME_STRETCHING_STEP = 0.1f;
u64 start_time = get_system_time();
u64 time_period_idx = 1;
emu_audio_cfg new_emu_cfg{};
bool emu_cfg_changed = true;
rsxaudio_state new_ra_state{};
bool ra_state_changed = true;
shared_mutex state_update_m{};
cond_variable state_update_c{};
simple_ringbuf ringbuf{};
simple_ringbuf aux_ringbuf{};
std::vector<u8> thread_tmp_buf{};
std::vector<f32> callback_tmp_buf{};
bool use_aux_ringbuf = false;
shared_mutex ringbuf_mutex{};
std::shared_ptr<AudioBackend> backend{};
backend_config backend_current_cfg{ {}, new_emu_cfg.avport };
atomic_t<callback_config> callback_cfg{};
bool backend_error_occured = false;
bool backend_device_changed = false;
AudioDumper dumper{};
audio_resampler resampler{};
// Backend
void backend_init(const rsxaudio_state& ra_state, const emu_audio_cfg& emu_cfg, bool reset_backend = true);
void backend_start();
void backend_stop();
bool backend_playing();
u32 write_data_callback(u32 bytes, void* buf);
void state_changed_callback(AudioStateEvent event);
// Time management
u64 get_time_until_service();
void update_service_time();
void reset_service_time();
// Helpers
static emu_audio_cfg get_emu_cfg();
static u8 gen_mute_state(avport_bit avports);
static RsxaudioAvportIdx convert_avport(audio_avport avport);
};
class rsxaudio_data_thread
{
public:
// Prevent creation of multiple rsxaudio contexts
atomic_t<bool> rsxaudio_ctx_allocated = false;
shared_mutex rsxaudio_obj_upd_m{};
std::shared_ptr<lv2_rsxaudio> rsxaudio_obj_ptr{};
void operator()();
rsxaudio_data_thread& operator=(thread_state state);
rsxaudio_data_thread();
void update_hw_param(std::function<void(rsxaudio_hw_param_t&)> update_callback);
void update_mute_state(RsxaudioPort port, bool muted);
void update_av_mute_state(RsxaudioAvportIdx avport, bool muted, bool force_mute, bool set = true);
void reset_hw();
static constexpr auto thread_name = "RsxAudioData Thread"sv;
private:
rsxaudio_data_container::buf_t output_buf{};
transactional_storage<rsxaudio_hw_param_t> hw_param_ts{std::make_shared<universal_pool>(), std::make_shared<rsxaudio_hw_param_t>()};
rsxaudio_periodic_tmr timer{};
void advance_all_timers();
void extract_audio_data();
static std::pair<bool /*data_present*/, void* /*addr*/> get_ringbuf_addr(RsxaudioPort dst, const lv2_rsxaudio& rsxaudio_obj);
static f32 pcm_to_float(s32 sample);
static f32 pcm_to_float(s16 sample);
static void pcm_serial_process_channel(RsxaudioSampleSize word_bits, ra_stream_blk_t& buf_out_l, ra_stream_blk_t& buf_out_r, const void* buf_in, u8 src_stream);
static void pcm_spdif_process_channel(RsxaudioSampleSize word_bits, ra_stream_blk_t& buf_out_l, ra_stream_blk_t& buf_out_r, const void* buf_in);
bool enqueue_data(RsxaudioPort dst, bool silence, const void* src_addr, const rsxaudio_hw_param_t& hwp);
static rsxaudio_backend_thread::avport_bit calc_avport_mute_state(const rsxaudio_hw_param_t& hwp);
static bool calc_port_active_state(RsxaudioPort port, const rsxaudio_hw_param_t& hwp);
};
using rsx_audio_backend = named_thread<rsxaudio_backend_thread>;
using rsx_audio_data = named_thread<rsxaudio_data_thread>;
// SysCalls
error_code sys_rsxaudio_initialize(vm::ptr<u32> handle);
error_code sys_rsxaudio_finalize(u32 handle);
error_code sys_rsxaudio_import_shared_memory(u32 handle, vm::ptr<u64> addr);
error_code sys_rsxaudio_unimport_shared_memory(u32 handle, vm::ptr<u64> addr);
error_code sys_rsxaudio_create_connection(u32 handle);
error_code sys_rsxaudio_close_connection(u32 handle);
error_code sys_rsxaudio_prepare_process(u32 handle);
error_code sys_rsxaudio_start_process(u32 handle);
error_code sys_rsxaudio_stop_process(u32 handle);
error_code sys_rsxaudio_get_dma_param(u32 handle, u32 flag, vm::ptr<u64> out);
| 17,881
|
C++
|
.h
| 500
| 33.308
| 171
| 0.712223
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
5,937
|
sys_storage.h
|
RPCS3_rpcs3/rpcs3/Emu/Cell/lv2/sys_storage.h
|
#pragma once
#include "Emu/Memory/vm_ptr.h"
#include "Emu/Cell/ErrorCodes.h"
enum Devices : u64
{
ATA_HDD = 0x101000000000007,
BDVD_DRIVE = 0x101000000000006,
PATA0_HDD_DRIVE = 0x101000000000008,
PATA0_BDVD_DRIVE = BDVD_DRIVE,
PATA1_HDD_DRIVE = ATA_HDD,
BUILTIN_FLASH = 0x100000000000001,
NAND_FLASH = BUILTIN_FLASH,
NAND_UNK = 0x100000000000003,
NOR_FLASH = 0x100000000000004,
MEMORY_STICK = 0x103000000000010,
SD_CARD = 0x103000100000010,
COMPACT_FLASH = 0x103000200000010,
USB_MASS_STORAGE_1_BASE = 0x10300000000000A,
USB_MASS_STORAGE_2_BASE = 0x10300000000001F,
};
struct lv2_storage
{
static const u32 id_base = 0x45000000;
static const u32 id_step = 1;
static const u32 id_count = 2048;
SAVESTATE_INIT_POS(45);
const u64 device_id;
const fs::file file;
const u64 mode;
const u64 flags;
lv2_storage(u64 device_id, fs::file&& file, u64 mode, u64 flags)
: device_id(device_id)
, file(std::move(file))
, mode(mode)
, flags(flags)
{
}
};
struct StorageDeviceInfo
{
u8 name[0x20]; // 0x0
be_t<u32> zero; // 0x20
be_t<u32> zero2; // 0x24
be_t<u64> sector_count; // 0x28
be_t<u32> sector_size; // 0x30
be_t<u32> one; // 0x34
u8 flags[8]; // 0x38
};
#define USB_MASS_STORAGE_1(n) (USB_MASS_STORAGE_1_BASE + n) /* For 0-5 */
#define USB_MASS_STORAGE_2(n) (USB_MASS_STORAGE_2_BASE + (n - 6)) /* For 6-127 */
// SysCalls
error_code sys_storage_open(u64 device, u64 mode, vm::ptr<u32> fd, u64 flags);
error_code sys_storage_close(u32 fd);
error_code sys_storage_read(u32 fd, u32 mode, u32 start_sector, u32 num_sectors, vm::ptr<void> bounce_buf, vm::ptr<u32> sectors_read, u64 flags);
error_code sys_storage_write(u32 fd, u32 mode, u32 start_sector, u32 num_sectors, vm::ptr<void> data, vm::ptr<u32> sectors_wrote, u64 flags);
error_code sys_storage_send_device_command(u32 dev_handle, u64 cmd, vm::ptr<void> in, u64 inlen, vm::ptr<void> out, u64 outlen);
error_code sys_storage_async_configure(u32 fd, u32 io_buf, u32 equeue_id, u32 unk);
error_code sys_storage_async_read();
error_code sys_storage_async_write();
error_code sys_storage_async_cancel();
error_code sys_storage_get_device_info(u64 device, vm::ptr<StorageDeviceInfo> buffer);
error_code sys_storage_get_device_config(vm::ptr<u32> storages, vm::ptr<u32> devices);
error_code sys_storage_report_devices(u32 storages, u32 start, u32 devices, vm::ptr<u64> device_ids);
error_code sys_storage_configure_medium_event(u32 fd, u32 equeue_id, u32 c);
error_code sys_storage_set_medium_polling_interval();
error_code sys_storage_create_region();
error_code sys_storage_delete_region();
error_code sys_storage_execute_device_command(u32 fd, u64 cmd, vm::ptr<char> cmdbuf, u64 cmdbuf_size, vm::ptr<char> databuf, u64 databuf_size, vm::ptr<u32> driver_status);
error_code sys_storage_check_region_acl();
error_code sys_storage_set_region_acl();
error_code sys_storage_async_send_device_command(u32 dev_handle, u64 cmd, vm::ptr<void> in, u64 inlen, vm::ptr<void> out, u64 outlen, u64 unk);
error_code sys_storage_get_region_offset();
error_code sys_storage_set_emulated_speed();
| 3,123
|
C++
|
.h
| 73
| 41.109589
| 171
| 0.73101
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
5,938
|
sys_rsx.h
|
RPCS3_rpcs3/rpcs3/Emu/Cell/lv2/sys_rsx.h
|
#pragma once
#include "Utilities/mutex.h"
#include "Emu/Memory/vm_ptr.h"
#include "Emu/Cell/ErrorCodes.h"
class cpu_thread;
struct RsxDriverInfo
{
be_t<u32> version_driver; // 0x0
be_t<u32> version_gpu; // 0x4
be_t<u32> memory_size; // 0x8
be_t<u32> hardware_channel; // 0xC
be_t<u32> nvcore_frequency; // 0x10
be_t<u32> memory_frequency; // 0x14
be_t<u32> unk1[4]; // 0x18 - 0x24
be_t<u32> unk2; // 0x28 -- pgraph stuff
be_t<u32> reportsNotifyOffset;// 0x2C offset to notify memory
be_t<u32> reportsOffset; // 0x30 offset to reports memory
be_t<u32> reportsReportOffset;// 0x34 offset to reports in reports memory
be_t<u32> unk3[6]; // 0x38-0x54
be_t<u32> systemModeFlags; // 0x54
u8 unk4[0x1064]; // 0x10B8
struct Head
{
be_t<u64> lastFlipTime; // 0x0 last flip time
atomic_be_t<u32> flipFlags; // 0x8 flags to handle flip/queue
be_t<u32> offset; // 0xC
be_t<u32> flipBufferId; // 0x10
be_t<u32> lastQueuedBufferId; // 0x14 todo: this is definately not this variable but its 'unused' so im using it for queueId to pass to flip handler
be_t<u32> unk3; // 0x18
be_t<u32> lastVTimeLow; // 0x1C last time for first vhandler freq (low 32-bits)
atomic_be_t<u64> lastSecondVTime; // 0x20 last time for second vhandler freq
be_t<u64> unk4; // 0x28
atomic_be_t<u64> vBlankCount; // 0x30
be_t<u32> unk; // 0x38 possible u32, 'flip field', top/bottom for interlaced
be_t<u32> lastVTimeHigh; // 0x3C last time for first vhandler freq (high 32-bits)
} head[8]; // size = 0x40, 0x200
be_t<u32> unk7; // 0x12B8
be_t<u32> unk8; // 0x12BC
atomic_be_t<u32> handlers; // 0x12C0 -- flags showing which handlers are set
be_t<u32> unk9; // 0x12C4
be_t<u32> unk10; // 0x12C8
be_t<u32> userCmdParam; // 0x12CC
be_t<u32> handler_queue; // 0x12D0
be_t<u32> unk11; // 0x12D4
be_t<u32> unk12; // 0x12D8
be_t<u32> unk13; // 0x12DC
be_t<u32> unk14; // 0x12E0
be_t<u32> unk15; // 0x12E4
be_t<u32> unk16; // 0x12E8
be_t<u32> unk17; // 0x12F0
be_t<u32> lastError; // 0x12F4 error param for cellGcmSetGraphicsHandler
// todo: theres more to this
};
static_assert(sizeof(RsxDriverInfo) == 0x12F8, "rsxSizeTest");
static_assert(sizeof(RsxDriverInfo::Head) == 0x40, "rsxHeadSizeTest");
enum : u64
{
// Unused
SYS_RSX_IO_MAP_IS_STRICT = 1ull << 60
};
// Unofficial event names
enum : u64
{
//SYS_RSX_EVENT_GRAPHICS_ERROR = 1 << 0,
SYS_RSX_EVENT_VBLANK = 1 << 1,
SYS_RSX_EVENT_FLIP_BASE = 1 << 3,
SYS_RSX_EVENT_QUEUE_BASE = 1 << 5,
SYS_RSX_EVENT_USER_CMD = 1 << 7,
SYS_RSX_EVENT_SECOND_VBLANK_BASE = 1 << 10,
SYS_RSX_EVENT_UNMAPPED_BASE = 1ull << 32,
};
struct RsxDmaControl
{
u8 resv[0x40];
atomic_be_t<u32> put;
atomic_be_t<u32> get;
atomic_be_t<u32> ref;
be_t<u32> unk[2];
be_t<u32> unk1;
};
struct RsxSemaphore
{
atomic_be_t<u32> val;
};
struct alignas(16) RsxNotify
{
be_t<u64> timestamp;
be_t<u64> zero;
};
struct alignas(16) RsxReport
{
be_t<u64> timestamp;
be_t<u32> val;
be_t<u32> pad;
};
struct RsxReports
{
RsxSemaphore semaphore[1024];
RsxNotify notify[64];
RsxReport report[2048];
};
struct RsxDisplayInfo
{
be_t<u32> offset{0};
be_t<u32> pitch{0};
be_t<u32> width{0};
be_t<u32> height{0};
ENABLE_BITWISE_SERIALIZATION;
bool valid() const
{
return height != 0u && width != 0u;
}
};
// SysCalls
error_code sys_rsx_device_open(cpu_thread& cpu);
error_code sys_rsx_device_close(cpu_thread& cpu);
error_code sys_rsx_memory_allocate(cpu_thread& cpu, vm::ptr<u32> mem_handle, vm::ptr<u64> mem_addr, u32 size, u64 flags, u64 a5, u64 a6, u64 a7);
error_code sys_rsx_memory_free(cpu_thread& cpu, u32 mem_handle);
error_code sys_rsx_context_allocate(cpu_thread& cpu, vm::ptr<u32> context_id, vm::ptr<u64> lpar_dma_control, vm::ptr<u64> lpar_driver_info, vm::ptr<u64> lpar_reports, u64 mem_ctx, u64 system_mode);
error_code sys_rsx_context_free(ppu_thread& ppu, u32 context_id);
error_code sys_rsx_context_iomap(cpu_thread& cpu, u32 context_id, u32 io, u32 ea, u32 size, u64 flags);
error_code sys_rsx_context_iounmap(cpu_thread& cpu, u32 context_id, u32 io, u32 size);
error_code sys_rsx_context_attribute(u32 context_id, u32 package_id, u64 a3, u64 a4, u64 a5, u64 a6);
error_code sys_rsx_device_map(cpu_thread& cpu, vm::ptr<u64> dev_addr, vm::ptr<u64> a2, u32 dev_id);
error_code sys_rsx_device_unmap(cpu_thread& cpu, u32 dev_id);
error_code sys_rsx_attribute(cpu_thread& cpu, u32 packageId, u32 a2, u32 a3, u32 a4, u32 a5);
| 4,639
|
C++
|
.h
| 126
| 34.904762
| 197
| 0.672598
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
5,939
|
sys_rwlock.h
|
RPCS3_rpcs3/rpcs3/Emu/Cell/lv2/sys_rwlock.h
|
#pragma once
#include "sys_sync.h"
#include "Emu/Memory/vm_ptr.h"
struct sys_rwlock_attribute_t
{
be_t<u32> protocol;
be_t<u32> pshared;
be_t<u64> ipc_key;
be_t<s32> flags;
be_t<u32> pad;
union
{
nse_t<u64, 1> name_u64;
char name[sizeof(u64)];
};
};
struct lv2_rwlock final : lv2_obj
{
static const u32 id_base = 0x88000000;
const lv2_protocol protocol;
const u64 key;
const u64 name;
shared_mutex mutex;
atomic_t<s64> owner{0};
ppu_thread* rq{};
ppu_thread* wq{};
lv2_rwlock(u32 protocol, u64 key, u64 name) noexcept
: protocol{static_cast<u8>(protocol)}
, key(key)
, name(name)
{
}
lv2_rwlock(utils::serial& ar);
static std::shared_ptr<void> load(utils::serial& ar);
void save(utils::serial& ar);
};
// Aux
class ppu_thread;
// Syscalls
error_code sys_rwlock_create(ppu_thread& ppu, vm::ptr<u32> rw_lock_id, vm::ptr<sys_rwlock_attribute_t> attr);
error_code sys_rwlock_destroy(ppu_thread& ppu, u32 rw_lock_id);
error_code sys_rwlock_rlock(ppu_thread& ppu, u32 rw_lock_id, u64 timeout);
error_code sys_rwlock_tryrlock(ppu_thread& ppu, u32 rw_lock_id);
error_code sys_rwlock_runlock(ppu_thread& ppu, u32 rw_lock_id);
error_code sys_rwlock_wlock(ppu_thread& ppu, u32 rw_lock_id, u64 timeout);
error_code sys_rwlock_trywlock(ppu_thread& ppu, u32 rw_lock_id);
error_code sys_rwlock_wunlock(ppu_thread& ppu, u32 rw_lock_id);
constexpr auto _sys_rwlock_trywlock = sys_rwlock_trywlock;
| 1,425
|
C++
|
.h
| 48
| 27.75
| 109
| 0.730205
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
5,940
|
sys_memory.h
|
RPCS3_rpcs3/rpcs3/Emu/Cell/lv2/sys_memory.h
|
#pragma once
#include "Emu/Memory/vm_ptr.h"
#include "Emu/Cell/ErrorCodes.h"
class cpu_thread;
enum lv2_mem_container_id : u32
{
SYS_MEMORY_CONTAINER_ID_INVALID = 0xFFFFFFFF,
};
enum : u64
{
SYS_MEMORY_ACCESS_RIGHT_NONE = 0x00000000000000F0ULL,
SYS_MEMORY_ACCESS_RIGHT_ANY = 0x000000000000000FULL,
SYS_MEMORY_ACCESS_RIGHT_PPU_THR = 0x0000000000000008ULL,
SYS_MEMORY_ACCESS_RIGHT_HANDLER = 0x0000000000000004ULL,
SYS_MEMORY_ACCESS_RIGHT_SPU_THR = 0x0000000000000002ULL,
SYS_MEMORY_ACCESS_RIGHT_RAW_SPU = 0x0000000000000001ULL,
SYS_MEMORY_ATTR_READ_ONLY = 0x0000000000080000ULL,
SYS_MEMORY_ATTR_READ_WRITE = 0x0000000000040000ULL,
};
enum : u64
{
SYS_MEMORY_PAGE_SIZE_4K = 0x100ull,
SYS_MEMORY_PAGE_SIZE_64K = 0x200ull,
SYS_MEMORY_PAGE_SIZE_1M = 0x400ull,
SYS_MEMORY_PAGE_SIZE_MASK = 0xf00ull,
};
enum : u64
{
SYS_MEMORY_GRANULARITY_64K = 0x0000000000000200,
SYS_MEMORY_GRANULARITY_1M = 0x0000000000000400,
SYS_MEMORY_GRANULARITY_MASK = 0x0000000000000f00,
};
enum : u64
{
SYS_MEMORY_PROT_READ_WRITE = 0x0000000000040000,
SYS_MEMORY_PROT_READ_ONLY = 0x0000000000080000,
SYS_MEMORY_PROT_MASK = 0x00000000000f0000,
};
struct sys_memory_info_t
{
be_t<u32> total_user_memory;
be_t<u32> available_user_memory;
};
struct sys_page_attr_t
{
be_t<u64> attribute;
be_t<u64> access_right;
be_t<u32> page_size;
be_t<u32> pad;
};
struct lv2_memory_container
{
static const u32 id_base = 0x3F000000;
static const u32 id_step = 0x1;
static const u32 id_count = 16;
const u32 size; // Amount of "physical" memory in this container
const lv2_mem_container_id id; // ID of the container in if placed at IDM, otherwise SYS_MEMORY_CONTAINER_ID_INVALID
atomic_t<u32> used{}; // Amount of "physical" memory currently used
SAVESTATE_INIT_POS(1);
lv2_memory_container(u32 size, bool from_idm = false) noexcept;
lv2_memory_container(utils::serial& ar, bool from_idm = false) noexcept;
static std::shared_ptr<void> load(utils::serial& ar);
void save(utils::serial& ar);
static lv2_memory_container* search(u32 id);
// Try to get specified amount of "physical" memory
// Values greater than UINT32_MAX will fail
u32 take(u64 amount)
{
auto [_, result] = used.fetch_op([&](u32& value) -> u32
{
if (size - value >= amount)
{
value += static_cast<u32>(amount);
return static_cast<u32>(amount);
}
return 0;
});
return result;
}
u32 free(u64 amount)
{
auto [_, result] = used.fetch_op([&](u32& value) -> u32
{
if (value >= amount)
{
value -= static_cast<u32>(amount);
return static_cast<u32>(amount);
}
return 0;
});
// Sanity check
ensure(result == amount);
return result;
}
};
struct sys_memory_user_memory_stat_t
{
be_t<u32> a; // 0x0
be_t<u32> b; // 0x4
be_t<u32> c; // 0x8
be_t<u32> d; // 0xc
be_t<u32> e; // 0x10
be_t<u32> f; // 0x14
be_t<u32> g; // 0x18
};
// SysCalls
error_code sys_memory_allocate(cpu_thread& cpu, u64 size, u64 flags, vm::ptr<u32> alloc_addr);
error_code sys_memory_allocate_from_container(cpu_thread& cpu, u64 size, u32 cid, u64 flags, vm::ptr<u32> alloc_addr);
error_code sys_memory_free(cpu_thread& cpu, u32 start_addr);
error_code sys_memory_get_page_attribute(cpu_thread& cpu, u32 addr, vm::ptr<sys_page_attr_t> attr);
error_code sys_memory_get_user_memory_size(cpu_thread& cpu, vm::ptr<sys_memory_info_t> mem_info);
error_code sys_memory_get_user_memory_stat(cpu_thread& cpu, vm::ptr<sys_memory_user_memory_stat_t> mem_stat);
error_code sys_memory_container_create(cpu_thread& cpu, vm::ptr<u32> cid, u64 size);
error_code sys_memory_container_destroy(cpu_thread& cpu, u32 cid);
error_code sys_memory_container_get_size(cpu_thread& cpu, vm::ptr<sys_memory_info_t> mem_info, u32 cid);
error_code sys_memory_container_destroy_parent_with_childs(cpu_thread& cpu, u32 cid, u32 must_0, vm::ptr<u32> mc_child);
| 3,872
|
C++
|
.h
| 116
| 31.206897
| 120
| 0.725884
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
5,941
|
sys_time.h
|
RPCS3_rpcs3/rpcs3/Emu/Cell/lv2/sys_time.h
|
#pragma once
#include "Emu/Memory/vm_ptr.h"
#include "Emu/Cell/ErrorCodes.h"
// SysCalls
error_code sys_time_set_timezone(s32 timezone, s32 summertime);
error_code sys_time_get_timezone(vm::ptr<s32> timezone, vm::ptr<s32> summertime);
error_code sys_time_get_current_time(vm::ptr<s64> sec, vm::ptr<s64> nsec);
error_code sys_time_set_current_time(s64 sec, s64 nsec);
u64 sys_time_get_timebase_frequency();
error_code sys_time_get_rtc(vm::ptr<u64> rtc);
| 456
|
C++
|
.h
| 10
| 44.3
| 81
| 0.758465
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
5,942
|
sys_console.h
|
RPCS3_rpcs3/rpcs3/Emu/Cell/lv2/sys_console.h
|
#pragma once
#include "Emu/Memory/vm_ptr.h"
// SysCalls
error_code sys_console_write(vm::cptr<char> buf, u32 len);
constexpr auto sys_console_write2 = sys_console_write;
| 173
|
C++
|
.h
| 5
| 33
| 58
| 0.769697
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
5,943
|
sys_cond.h
|
RPCS3_rpcs3/rpcs3/Emu/Cell/lv2/sys_cond.h
|
#pragma once
#include "sys_sync.h"
#include "sys_mutex.h"
struct lv2_mutex;
struct sys_cond_attribute_t
{
be_t<u32> pshared;
be_t<s32> flags;
be_t<u64> ipc_key;
union
{
nse_t<u64, 1> name_u64;
char name[sizeof(u64)];
};
};
struct lv2_cond final : lv2_obj
{
static const u32 id_base = 0x86000000;
const u64 key;
const u64 name;
const u32 mtx_id;
std::shared_ptr<lv2_mutex> mutex; // Associated Mutex
ppu_thread* sq{};
lv2_cond(u64 key, u64 name, u32 mtx_id, std::shared_ptr<lv2_mutex> mutex)
: key(key)
, name(name)
, mtx_id(mtx_id)
, mutex(std::move(mutex))
{
}
lv2_cond(utils::serial& ar);
static std::shared_ptr<void> load(utils::serial& ar);
void save(utils::serial& ar);
CellError on_id_create();
};
class ppu_thread;
// Syscalls
error_code sys_cond_create(ppu_thread& ppu, vm::ptr<u32> cond_id, u32 mutex_id, vm::ptr<sys_cond_attribute_t> attr);
error_code sys_cond_destroy(ppu_thread& ppu, u32 cond_id);
error_code sys_cond_wait(ppu_thread& ppu, u32 cond_id, u64 timeout);
error_code sys_cond_signal(ppu_thread& ppu, u32 cond_id);
error_code sys_cond_signal_all(ppu_thread& ppu, u32 cond_id);
error_code sys_cond_signal_to(ppu_thread& ppu, u32 cond_id, u32 thread_id);
| 1,219
|
C++
|
.h
| 43
| 26.325581
| 116
| 0.714531
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
5,944
|
sys_game.h
|
RPCS3_rpcs3/rpcs3/Emu/Cell/lv2/sys_game.h
|
#pragma once
void abort_lv2_watchdog();
error_code _sys_game_watchdog_start(u32 timeout);
error_code _sys_game_watchdog_stop();
error_code _sys_game_watchdog_clear();
error_code _sys_game_set_system_sw_version(u64 version);
u64 _sys_game_get_system_sw_version();
error_code _sys_game_board_storage_read(vm::ptr<u8> buffer, vm::ptr<u8> status);
error_code _sys_game_board_storage_write(vm::ptr<u8> buffer, vm::ptr<u8> status);
error_code _sys_game_get_rtc_status(vm::ptr<s32> status);
| 486
|
C++
|
.h
| 10
| 47.4
| 81
| 0.753165
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
5,945
|
sys_prx.h
|
RPCS3_rpcs3/rpcs3/Emu/Cell/lv2/sys_prx.h
|
#pragma once
#include "sys_sync.h"
#include "Emu/Cell/PPUAnalyser.h"
#include "Emu/Cell/ErrorCodes.h"
#include "Emu/Memory/vm_ptr.h"
// Return codes
enum CellPrxError : u32
{
CELL_PRX_ERROR_ERROR = 0x80011001, // Error state
CELL_PRX_ERROR_ILLEGAL_PERM = 0x800110d1, // No permission to execute API
CELL_PRX_ERROR_UNKNOWN_MODULE = 0x8001112e, // Specified PRX could not be found
CELL_PRX_ERROR_ALREADY_STARTED = 0x80011133, // Specified PRX is already started
CELL_PRX_ERROR_NOT_STARTED = 0x80011134, // Specified PRX is not started
CELL_PRX_ERROR_ALREADY_STOPPED = 0x80011135, // Specified PRX is already stopped
CELL_PRX_ERROR_CAN_NOT_STOP = 0x80011136, // Specified PRX must not be stopped
CELL_PRX_ERROR_NOT_REMOVABLE = 0x80011138, // Specified PRX must not be deleted
CELL_PRX_ERROR_LIBRARY_NOT_YET_LINKED = 0x8001113a, // Called unlinked function
CELL_PRX_ERROR_LIBRARY_FOUND = 0x8001113b, // Specified library is already registered
CELL_PRX_ERROR_LIBRARY_NOTFOUND = 0x8001113c, // Specified library is not registered
CELL_PRX_ERROR_ILLEGAL_LIBRARY = 0x8001113d, // Library structure is invalid
CELL_PRX_ERROR_LIBRARY_INUSE = 0x8001113e, // Library cannot be deleted because it is linked
CELL_PRX_ERROR_ALREADY_STOPPING = 0x8001113f, // Specified PRX is in the process of stopping
CELL_PRX_ERROR_UNSUPPORTED_PRX_TYPE = 0x80011148, // Specified PRX format is invalid and cannot be loaded
CELL_PRX_ERROR_INVAL = 0x80011324, // Argument value is invalid
CELL_PRX_ERROR_ILLEGAL_PROCESS = 0x80011801, // Specified process does not exist
CELL_PRX_ERROR_NO_LIBLV2 = 0x80011881, // liblv2.sprx does not exist
CELL_PRX_ERROR_UNSUPPORTED_ELF_TYPE = 0x80011901, // ELF type of specified file is not supported
CELL_PRX_ERROR_UNSUPPORTED_ELF_CLASS = 0x80011902, // ELF class of specified file is not supported
CELL_PRX_ERROR_UNDEFINED_SYMBOL = 0x80011904, // References undefined symbols
CELL_PRX_ERROR_UNSUPPORTED_RELOCATION_TYPE = 0x80011905, // Uses unsupported relocation type
CELL_PRX_ERROR_ELF_IS_REGISTERED = 0x80011910, // Fixed ELF is already registered
CELL_PRX_ERROR_NO_EXIT_ENTRY = 0x80011911,
};
enum
{
SYS_PRX_MODULE_FILENAME_SIZE = 512
};
struct sys_prx_get_module_id_by_name_option_t
{
be_t<u64> size;
vm::ptr<void> base;
};
struct sys_prx_load_module_option_t
{
be_t<u64> size;
vm::bptr<void> base_addr;
};
struct sys_prx_segment_info_t
{
be_t<u64> base;
be_t<u64> filesz;
be_t<u64> memsz;
be_t<u64> index;
be_t<u64> type;
};
struct sys_prx_module_info_t
{
be_t<u64> size; // 0
char name[30]; // 8
char version[2]; // 0x26
be_t<u32> modattribute; // 0x28
be_t<u32> start_entry; // 0x2c
be_t<u32> stop_entry; // 0x30
be_t<u32> all_segments_num; // 0x34
vm::bptr<char> filename; // 0x38
be_t<u32> filename_size; // 0x3c
vm::bptr<sys_prx_segment_info_t> segments; // 0x40
be_t<u32> segments_num; // 0x44
};
struct sys_prx_module_info_option_t
{
be_t<u64> size; // 0x10
vm::bptr<sys_prx_module_info_t> info;
};
struct sys_prx_start_module_option_t
{
be_t<u64> size;
};
struct sys_prx_stop_module_option_t
{
be_t<u64> size;
};
struct sys_prx_start_stop_module_option_t
{
be_t<u64> size;
be_t<u64> cmd;
vm::bptr<s32(u32 argc, vm::ptr<void> argv), u64> entry;
be_t<u64> res;
vm::bptr<s32(vm::ptr<s32(u32, vm::ptr<void>), u64>, u32 argc, vm::ptr<void> argv), u64> entry2;
};
struct sys_prx_unload_module_option_t
{
be_t<u64> size;
};
struct sys_prx_get_module_list_t
{
be_t<u64> size;
be_t<u32> max;
be_t<u32> count;
vm::bptr<u32> idlist;
};
struct sys_prx_get_module_list_option_t
{
be_t<u64> size; // 0x20
be_t<u32> pad;
be_t<u32> max;
be_t<u32> count;
vm::bptr<u32> idlist;
be_t<u32> unk; // 0
};
struct sys_prx_register_module_0x20_t
{
be_t<u64> size; // 0x0
be_t<u32> toc; // 0x8
be_t<u32> toc_size; // 0xC
vm::bptr<void> stubs_ea; // 0x10
be_t<u32> stubs_size; // 0x14
vm::bptr<void> error_handler; // 0x18
char pad[4]; // 0x1C
};
struct sys_prx_register_module_0x30_type_1_t
{
be_t<u64> size; // 0x0
be_t<u64> type; // 0x8
be_t<u32> unk3; // 0x10
be_t<u32> unk4; // 0x14
vm::bptr<void> lib_entries_ea; // 0x18
be_t<u32> lib_entries_size; // 0x1C
vm::bptr<void> lib_stub_ea; // 0x20
be_t<u32> lib_stub_size; // 0x24
vm::bptr<void> error_handler; // 0x28
char pad[4]; // 0x2C
};
enum : u32
{
SYS_PRX_RESIDENT = 0,
SYS_PRX_NO_RESIDENT = 1,
SYS_PRX_START_OK = 0,
SYS_PRX_STOP_SUCCESS = 0,
SYS_PRX_STOP_OK = 0,
SYS_PRX_STOP_FAILED = 1
};
// Unofficial names for PRX state
enum : u32
{
PRX_STATE_INITIALIZED,
PRX_STATE_STARTING, // In-between state between initialized and started (internal)
PRX_STATE_STARTED,
PRX_STATE_STOPPING, // In-between state between started and stopped (internal)
PRX_STATE_STOPPED, // Last state, the module cannot be restarted
PRX_STATE_DESTROYED, // Last state, the module cannot be restarted
};
struct lv2_prx final : lv2_obj, ppu_module
{
static const u32 id_base = 0x23000000;
atomic_t<u32> state = PRX_STATE_INITIALIZED;
shared_mutex mutex;
std::unordered_map<u32, u32> specials;
std::unordered_map<u32, void*> imports;
vm::ptr<s32(u32 argc, vm::ptr<void> argv)> start = vm::null;
vm::ptr<s32(u32 argc, vm::ptr<void> argv)> stop = vm::null;
vm::ptr<s32(u64 callback, u64 argc, vm::ptr<void, u64> argv)> prologue = vm::null;
vm::ptr<s32(u64 callback, u64 argc, vm::ptr<void, u64> argv)> epilogue = vm::null;
vm::ptr<s32()> exit = vm::null;
char module_info_name[28]{};
u8 module_info_version[2]{};
be_t<u16> module_info_attributes{};
u32 exports_start = umax;
u32 exports_end = 0;
std::basic_string<char> m_loaded_flags;
std::basic_string<char> m_external_loaded_flags;
void load_exports(); // (Re)load exports
void restore_exports(); // For savestates
void unload_exports();
lv2_prx() noexcept = default;
lv2_prx(utils::serial&) {}
static std::shared_ptr<void> load(utils::serial&);
void save(utils::serial& ar);
};
enum : u64
{
SYS_PRX_LOAD_MODULE_FLAGS_FIXEDADDR = 0x1ull,
SYS_PRX_LOAD_MODULE_FLAGS_INVALIDMASK = ~SYS_PRX_LOAD_MODULE_FLAGS_FIXEDADDR,
};
// PPC
enum
{
SYS_PRX_R_PPC_ADDR32 = 1,
SYS_PRX_R_PPC_ADDR16_LO = 4,
SYS_PRX_R_PPC_ADDR16_HI = 5,
SYS_PRX_R_PPC_ADDR16_HA = 6,
SYS_PRX_R_PPC64_ADDR32 = SYS_PRX_R_PPC_ADDR32,
SYS_PRX_R_PPC64_ADDR16_LO = SYS_PRX_R_PPC_ADDR16_LO,
SYS_PRX_R_PPC64_ADDR16_HI = SYS_PRX_R_PPC_ADDR16_HI,
SYS_PRX_R_PPC64_ADDR16_HA = SYS_PRX_R_PPC_ADDR16_HA,
SYS_PRX_R_PPC64_ADDR64 = 38,
SYS_PRX_VARLINK_TERMINATE32 = 0x00000000
};
// SysCalls
error_code sys_prx_get_ppu_guid(ppu_thread& ppu);
error_code _sys_prx_load_module_by_fd(ppu_thread& ppu, s32 fd, u64 offset, u64 flags, vm::ptr<sys_prx_load_module_option_t> pOpt);
error_code _sys_prx_load_module_on_memcontainer_by_fd(ppu_thread& ppu, s32 fd, u64 offset, u32 mem_ct, u64 flags, vm::ptr<sys_prx_load_module_option_t> pOpt);
error_code _sys_prx_load_module_list(ppu_thread& ppu, s32 count, vm::cpptr<char, u32, u64> path_list, u64 flags, vm::ptr<sys_prx_load_module_option_t> pOpt, vm::ptr<u32> id_list);
error_code _sys_prx_load_module_list_on_memcontainer(ppu_thread& ppu, s32 count, vm::cpptr<char, u32, u64> path_list, u32 mem_ct, u64 flags, vm::ptr<sys_prx_load_module_option_t> pOpt, vm::ptr<u32> id_list);
error_code _sys_prx_load_module_on_memcontainer(ppu_thread& ppu, vm::cptr<char> path, u32 mem_ct, u64 flags, vm::ptr<sys_prx_load_module_option_t> pOpt);
error_code _sys_prx_load_module(ppu_thread& ppu, vm::cptr<char> path, u64 flags, vm::ptr<sys_prx_load_module_option_t> pOpt);
error_code _sys_prx_start_module(ppu_thread& ppu, u32 id, u64 flags, vm::ptr<sys_prx_start_stop_module_option_t> pOpt);
error_code _sys_prx_stop_module(ppu_thread& ppu, u32 id, u64 flags, vm::ptr<sys_prx_start_stop_module_option_t> pOpt);
error_code _sys_prx_unload_module(ppu_thread& ppu, u32 id, u64 flags, vm::ptr<sys_prx_unload_module_option_t> pOpt);
error_code _sys_prx_register_module(ppu_thread& ppu, vm::cptr<char> name, vm::ptr<void> opt);
error_code _sys_prx_query_module(ppu_thread& ppu);
error_code _sys_prx_register_library(ppu_thread& ppu, vm::ptr<void> library);
error_code _sys_prx_unregister_library(ppu_thread& ppu, vm::ptr<void> library);
error_code _sys_prx_link_library(ppu_thread& ppu);
error_code _sys_prx_unlink_library(ppu_thread& ppu);
error_code _sys_prx_query_library(ppu_thread& ppu);
error_code _sys_prx_get_module_list(ppu_thread& ppu, u64 flags, vm::ptr<sys_prx_get_module_list_option_t> pInfo);
error_code _sys_prx_get_module_info(ppu_thread& ppu, u32 id, u64 flags, vm::ptr<sys_prx_module_info_option_t> pOpt);
error_code _sys_prx_get_module_id_by_name(ppu_thread& ppu, vm::cptr<char> name, u64 flags, vm::ptr<sys_prx_get_module_id_by_name_option_t> pOpt);
error_code _sys_prx_get_module_id_by_address(ppu_thread& ppu, u32 addr);
error_code _sys_prx_start(ppu_thread& ppu);
error_code _sys_prx_stop(ppu_thread& ppu);
| 9,153
|
C++
|
.h
| 222
| 39.486486
| 207
| 0.700764
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
5,946
|
sys_timer.h
|
RPCS3_rpcs3/rpcs3/Emu/Cell/lv2/sys_timer.h
|
#pragma once
#include "sys_event.h"
#include "Utilities/Thread.h"
#include "Emu/Memory/vm_ptr.h"
// Timer State
enum : u32
{
SYS_TIMER_STATE_STOP = 0,
SYS_TIMER_STATE_RUN = 1,
};
struct sys_timer_information_t
{
be_t<u64> next_expire; // system_time_t
be_t<u64> period;
be_t<u32> timer_state;
be_t<u32> pad;
};
struct lv2_timer : lv2_obj
{
static const u32 id_base = 0x11000000;
shared_mutex mutex;
atomic_t<u32> state{SYS_TIMER_STATE_STOP};
std::shared_ptr<lv2_event_queue> port;
u64 source;
u64 data1;
u64 data2;
atomic_t<u64> expire{0}; // Next expiration time
atomic_t<u64> period{0}; // Period (oneshot if 0)
u64 check(u64 _now) noexcept;
u64 check_unlocked(u64 _now) noexcept;
lv2_timer() noexcept
: lv2_obj{1}
{
}
void get_information(sys_timer_information_t& info) const
{
if (state == SYS_TIMER_STATE_RUN)
{
info.timer_state = state;
info.next_expire = expire;
info.period = period;
}
else
{
info.timer_state = SYS_TIMER_STATE_STOP;
info.next_expire = 0;
info.period = 0;
}
}
lv2_timer(utils::serial& ar);
void save(utils::serial& ar);
};
class ppu_thread;
// Syscalls
error_code sys_timer_create(ppu_thread&, vm::ptr<u32> timer_id);
error_code sys_timer_destroy(ppu_thread&, u32 timer_id);
error_code sys_timer_get_information(ppu_thread&, u32 timer_id, vm::ptr<sys_timer_information_t> info);
error_code _sys_timer_start(ppu_thread&, u32 timer_id, u64 basetime, u64 period); // basetime type changed from s64
error_code sys_timer_stop(ppu_thread&, u32 timer_id);
error_code sys_timer_connect_event_queue(ppu_thread&, u32 timer_id, u32 queue_id, u64 name, u64 data1, u64 data2);
error_code sys_timer_disconnect_event_queue(ppu_thread&, u32 timer_id);
error_code sys_timer_sleep(ppu_thread&, u32 sleep_time);
error_code sys_timer_usleep(ppu_thread&, u64 sleep_time);
| 1,860
|
C++
|
.h
| 63
| 27.365079
| 115
| 0.720382
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
5,947
|
sys_uart.h
|
RPCS3_rpcs3/rpcs3/Emu/Cell/lv2/sys_uart.h
|
#pragma once
#include "Emu/Memory/vm_ptr.h"
#include "Utilities/mutex.h"
#include "Utilities/cond.h"
#include "Utilities/simple_ringbuf.h"
enum : u32
{
PS3AV_RX_BUF_SIZE = 0x800,
PS3AV_TX_BUF_SIZE = 0x800,
PS3AV_VERSION = 0x205,
PS3AV_CID_AV_INIT = 0x00000001,
PS3AV_CID_AV_FIN = 0x00000002,
PS3AV_CID_AV_GET_HW_CONF = 0x00000003,
PS3AV_CID_AV_GET_MONITOR_INFO = 0x00000004,
PS3AV_CID_AV_GET_BKSV_LIST = 0x00000005,
PS3AV_CID_AV_ENABLE_EVENT = 0x00000006,
PS3AV_CID_AV_DISABLE_EVENT = 0x00000007,
PS3AV_CID_AV_GET_PORT_STATE = 0x00000009,
PS3AV_CID_AV_TV_MUTE = 0x0000000A,
PS3AV_CID_AV_NULL_CMD = 0x0000000B,
PS3AV_CID_AV_GET_AKSV = 0x0000000C,
PS3AV_CID_AV_UNK4 = 0x0000000D,
PS3AV_CID_AV_UNK5 = 0x0000000E,
PS3AV_CID_AV_VIDEO_MUTE = 0x00010002,
PS3AV_CID_AV_VIDEO_DISABLE_SIG = 0x00010003,
PS3AV_CID_AV_VIDEO_YTRAPCONTROL = 0x00010004,
PS3AV_CID_AV_VIDEO_UNK5 = 0x00010005,
PS3AV_CID_AV_VIDEO_UNK6 = 0x00010006,
PS3AV_CID_AV_AUDIO_MUTE = 0x00020002,
PS3AV_CID_AV_ACP_CTRL = 0x00020003,
PS3AV_CID_AV_SET_ACP_PACKET = 0x00020004,
PS3AV_CID_AV_ADD_SIGNAL_CTL = 0x00030001,
PS3AV_CID_AV_SET_CC_CODE = 0x00030002,
PS3AV_CID_AV_SET_CGMS_WSS = 0x00030003,
PS3AV_CID_AV_SET_MACROVISION = 0x00030004,
PS3AV_CID_AV_UNK7 = 0x00030005,
PS3AV_CID_AV_UNK8 = 0x00030006,
PS3AV_CID_AV_UNK9 = 0x00030007,
PS3AV_CID_AV_HDMI_MODE = 0x00040001,
PS3AV_CID_AV_UNK15 = 0x00050001,
PS3AV_CID_AV_CEC_MESSAGE = 0x000A0001,
PS3AV_CID_AV_GET_CEC_CONFIG = 0x000A0002,
PS3AV_CID_AV_UNK11 = 0x000A0003,
PS3AV_CID_AV_UNK12 = 0x000A0004,
PS3AV_CID_AV_UNK13 = 0x000A0005,
PS3AV_CID_AV_UNK14 = 0x000A0006,
PS3AV_CID_VIDEO_INIT = 0x01000001,
PS3AV_CID_VIDEO_MODE = 0x01000002,
PS3AV_CID_VIDEO_ROUTE = 0x01000003,
PS3AV_CID_VIDEO_FORMAT = 0x01000004,
PS3AV_CID_VIDEO_PITCH = 0x01000005,
PS3AV_CID_VIDEO_GET_HW_CONF = 0x01000006,
PS3AV_CID_VIDEO_GET_REG = 0x01000008,
PS3AV_CID_VIDEO_UNK = 0x01000009,
PS3AV_CID_VIDEO_UNK1 = 0x0100000A,
PS3AV_CID_VIDEO_UNK2 = 0x0100000B,
PS3AV_CID_VIDEO_UNK3 = 0x0100000C,
PS3AV_CID_AUDIO_INIT = 0x02000001,
PS3AV_CID_AUDIO_MODE = 0x02000002,
PS3AV_CID_AUDIO_MUTE = 0x02000003,
PS3AV_CID_AUDIO_ACTIVE = 0x02000004,
PS3AV_CID_AUDIO_INACTIVE = 0x02000005,
PS3AV_CID_AUDIO_SPDIF_BIT = 0x02000006,
PS3AV_CID_AUDIO_CTRL = 0x02000007,
PS3AV_CID_AVB_PARAM = 0x04000001,
PS3AV_CID_EVENT_UNPLUGGED = 0x10000001,
PS3AV_CID_EVENT_PLUGGED = 0x10000002,
PS3AV_CID_EVENT_HDCP_DONE = 0x10000003,
PS3AV_CID_EVENT_HDCP_FAIL = 0x10000004,
PS3AV_CID_EVENT_HDCP_REAUTH = 0x10000005,
PS3AV_CID_EVENT_HDCP_ERROR = 0x10000006,
PS3AV_REPLY_BIT = 0x80000000,
PS3AV_RESBIT_720x480P = 0x0003, /* 0x0001 | 0x0002 */
PS3AV_RESBIT_720x576P = 0x0003, /* 0x0001 | 0x0002 */
PS3AV_RESBIT_1280x720P = 0x0004,
PS3AV_RESBIT_1920x1080I = 0x0008,
PS3AV_RESBIT_1920x1080P = 0x4000,
PS3AV_MONITOR_TYPE_NONE = 0,
PS3AV_MONITOR_TYPE_HDMI = 1,
PS3AV_MONITOR_TYPE_DVI = 2,
PS3AV_MONITOR_TYPE_AVMULTI = 3,
PS3AV_COLORIMETRY_xvYCC_601 = 1,
PS3AV_COLORIMETRY_xvYCC_709 = 2,
PS3AV_COLORIMETRY_MD0 = 1 << 4,
PS3AV_COLORIMETRY_MD1 = 1 << 5,
PS3AV_COLORIMETRY_MD2 = 1 << 6,
PS3AV_CS_SUPPORTED = 1,
PS3AV_RGB_SELECTABLE_QAUNTIZATION_RANGE = 8,
PS3AV_12BIT_COLOR = 16,
PS3AV_MON_INFO_AUDIO_BLK_MAX = 16,
PS3AV_MON_INFO_AUDIO_TYPE_LPCM = 1,
PS3AV_MON_INFO_AUDIO_TYPE_AC3 = 2,
PS3AV_MON_INFO_AUDIO_TYPE_AAC = 6,
PS3AV_MON_INFO_AUDIO_TYPE_DTS = 7,
PS3AV_MON_INFO_AUDIO_TYPE_DDP = 10,
PS3AV_MON_INFO_AUDIO_TYPE_DTS_HD = 11,
PS3AV_MON_INFO_AUDIO_TYPE_DOLBY_THD = 12,
PS3AV_HDMI_BEHAVIOR_HDCP_OFF = 0x01,
PS3AV_HDMI_BEHAVIOR_DVI = 0x40,
PS3AV_HDMI_BEHAVIOR_EDID_PASS = 0x80,
PS3AV_HDMI_BEHAVIOR_NORMAL = 0xFF,
PS3AV_EVENT_BIT_UNPLUGGED = 0x01,
PS3AV_EVENT_BIT_PLUGGED = 0x02,
PS3AV_EVENT_BIT_HDCP_DONE = 0x04,
PS3AV_EVENT_BIT_HDCP_FAIL = 0x08,
PS3AV_EVENT_BIT_HDCP_REAUTH = 0x10,
PS3AV_EVENT_BIT_HDCP_TOPOLOGY = 0x20,
PS3AV_EVENT_BIT_UNK = 0x80000000,
PS3AV_HEAD_A_HDMI = 0,
PS3AV_HEAD_B_ANALOG = 1,
PS3AV_AUDIO_PORT_HDMI_0 = 1 << 0,
PS3AV_AUDIO_PORT_HDMI_1 = 1 << 1,
PS3AV_AUDIO_PORT_AVMULTI = 1 << 10,
PS3AV_AUDIO_PORT_SPDIF_0 = 1 << 20,
PS3AV_AUDIO_PORT_SPDIF_1 = 1 << 21,
PS3AV_STATUS_SUCCESS = 0x00,
PS3AV_STATUS_RECEIVE_VUART_ERROR = 0x01,
PS3AV_STATUS_SYSCON_COMMUNICATE_FAIL = 0x02,
PS3AV_STATUS_INVALID_COMMAND = 0x03,
PS3AV_STATUS_INVALID_PORT = 0x04,
PS3AV_STATUS_INVALID_VID = 0x05,
PS3AV_STATUS_INVALID_COLOR_SPACE = 0x06,
PS3AV_STATUS_INVALID_FS = 0x07,
PS3AV_STATUS_INVALID_AUDIO_CH = 0x08,
PS3AV_STATUS_UNSUPPORTED_VERSION = 0x09,
PS3AV_STATUS_INVALID_SAMPLE_SIZE = 0x0A,
PS3AV_STATUS_FAILURE = 0x0B,
PS3AV_STATUS_UNSUPPORTED_COMMAND = 0x0C,
PS3AV_STATUS_BUFFER_OVERFLOW = 0x0D,
PS3AV_STATUS_INVALID_VIDEO_PARAM = 0x0E,
PS3AV_STATUS_NO_SEL = 0x0F,
PS3AV_STATUS_INVALID_AV_PARAM = 0x10,
PS3AV_STATUS_INVALID_AUDIO_PARAM = 0x11,
PS3AV_STATUS_UNSUPPORTED_HDMI_MODE = 0x12,
PS3AV_STATUS_NO_SYNC_HEAD = 0x13,
PS3AV_STATUS_UNK_0x14 = 0x14,
};
const u8 PS3AV_AKSV_VALUE[5] = { 0x00, 0x00, 0x0F, 0xFF, 0xFF };
const u8 PS3AV_BKSV_VALUE[5] = { 0xFF, 0xFF, 0xF0, 0x00, 0x00 };
enum PS3_AV_OP_MODE : u32
{
// BIG operation modes could send more then 4096 bytes
NOT_BLOCKING_BIG_OP = 0,
BLOCKING_BIG_OP = 1,
NOT_BLOCKING_OP = 2,
};
enum class UartHdmiEvent : u8
{
NONE = 0,
UNPLUGGED = 1,
PLUGGED = 2,
HDCP_DONE = 3,
};
enum class UartAudioCtrlID : u32
{
DAC_RESET = 0,
DAC_DE_EMPHASIS = 1,
AVCLK = 2,
};
enum class UartAudioAvport : u8
{
HDMI_0 = 0x0,
HDMI_1 = 0x1,
AVMULTI_0 = 0x10,
AVMULTI_1 = 0x11,
SPDIF_0 = 0x20,
SPDIF_1 = 0x21,
};
enum class UartAudioSource : u32
{
SERIAL = 0,
SPDIF = 1,
};
enum class UartAudioFreq : u32
{
_32K = 1,
_44K = 2,
_48K = 3,
_88K = 4,
_96K = 5,
_176K = 6,
_192K = 7,
};
enum class UartAudioFormat : u32
{
PCM = 1,
BITSTREAM = 0xFF,
};
enum class UartAudioSampleSize : u32
{
_16BIT = 1,
_20BIT = 2,
_24BIT = 3,
};
class vuart_hdmi_event_handler
{
public:
vuart_hdmi_event_handler(u64 time_offset = 0);
void set_target_state(UartHdmiEvent start_state, UartHdmiEvent end_state);
bool events_available();
u64 time_until_next();
UartHdmiEvent get_occured_event();
private:
static constexpr u64 EVENT_TIME_DURATION = 20000;
static constexpr u64 EVENT_TIME_THRESHOLD = 1000;
u64 time_of_next_event = 0;
const u64 time_offset = 0;
// Assume that syscon initialized hdmi to plugged state
UartHdmiEvent current_state = UartHdmiEvent::PLUGGED;
UartHdmiEvent current_to_state = UartHdmiEvent::PLUGGED;
UartHdmiEvent base_state = UartHdmiEvent::NONE;
UartHdmiEvent target_state = UartHdmiEvent::NONE;
void schedule_next();
void advance_state();
};
class vuart_av_thread;
struct ps3av_cmd
{
virtual u16 get_size(vuart_av_thread &vuart, const void *pkt_buf) = 0;
virtual void execute(vuart_av_thread &vuart, const void *pkt_buf) = 0;
virtual ~ps3av_cmd() {};
};
class vuart_av_thread
{
public:
atomic_t<bool> initialized{};
shared_mutex rx_mutex{};
shared_mutex tx_mutex{};
shared_mutex tx_wake_m{};
cond_variable tx_wake_c{};
shared_mutex tx_rdy_m{};
cond_variable tx_rdy_c{};
shared_mutex rx_wake_m{};
cond_variable rx_wake_c{};
bool head_b_initialized = false;
u8 hdmi_behavior_mode = PS3AV_HDMI_BEHAVIOR_NORMAL;
u16 av_cmd_ver = 0;
u32 hdmi_events_bitmask = 0;
bool hdmi_res_set[2]{ false, false };
void operator()();
void parse_tx_buffer(u32 buf_size);
vuart_av_thread &operator=(thread_state);
u32 enque_tx_data(const void *data, u32 data_sz);
u32 get_tx_bytes();
u32 read_rx_data(void *data, u32 data_sz);
u32 get_reply_buf_free_size();
template<bool UseScBuffer = false>
void write_resp(u32 cid, u32 status, const void *data = nullptr, u16 data_size = 0);
void add_hdmi_events(UartHdmiEvent first_event, UartHdmiEvent last_event, bool hdmi_0, bool hdmi_1);
void add_hdmi_events(UartHdmiEvent last_event, bool hdmi_0, bool hdmi_1);
static RsxaudioAvportIdx avport_to_idx(UartAudioAvport avport);
static constexpr auto thread_name = "VUART AV Thread"sv;
private:
struct temp_buf
{
u32 crnt_size = 0;
u8 buf[PS3AV_RX_BUF_SIZE]{};
};
simple_ringbuf tx_buf{PS3AV_TX_BUF_SIZE};
simple_ringbuf rx_buf{PS3AV_RX_BUF_SIZE};
// uart_mngr could sometimes read past the tx_buffer due to weird size checks in FW,
// but no further than size of largest packet
u8 temp_tx_buf[PS3AV_TX_BUF_SIZE * 2]{};
temp_buf temp_rx_buf{};
temp_buf temp_rx_sc_buf{};
vuart_hdmi_event_handler hdmi_event_handler[2]{ 0, 5000 };
bool hdcp_first_auth[2]{ true, true };
u32 read_tx_data(void *data, u32 data_sz);
std::shared_ptr<ps3av_cmd> get_cmd(u32 cid);
void commit_rx_buf(bool syscon_buf);
void add_unplug_event(bool hdmi_0, bool hdmi_1);
void add_plug_event(bool hdmi_0, bool hdmi_1);
void add_hdcp_done_event(bool hdmi_0, bool hdmi_1);
void commit_event_data(const void *data, u16 data_size);
void dispatch_hdmi_event(UartHdmiEvent event, UartAudioAvport hdmi);
};
using vuart_av = named_thread<vuart_av_thread>;
struct vuart_params
{
be_t<u64, 1> rx_buf_size;
be_t<u64, 1> tx_buf_size;
};
static_assert(sizeof(vuart_params) == 16);
struct ps3av_pkt_reply_hdr
{
be_t<u16, 1> version;
be_t<u16, 1> length;
be_t<u32, 1> cid;
be_t<u32, 1> status;
};
static_assert(sizeof(ps3av_pkt_reply_hdr) == 12);
struct ps3av_header
{
be_t<u16, 1> version;
be_t<u16, 1> length;
be_t<u32, 1> cid;
};
static_assert(sizeof(ps3av_header) == 8);
struct ps3av_info_resolution
{
be_t<u32, 1> res_bits;
be_t<u32, 1> native;
};
struct ps3av_info_cs
{
u8 rgb;
u8 yuv444;
u8 yuv422;
u8 colorimetry_data;
};
struct ps3av_info_color
{
be_t<u16, 1> red_x;
be_t<u16, 1> red_y;
be_t<u16, 1> green_x;
be_t<u16, 1> green_y;
be_t<u16, 1> blue_x;
be_t<u16, 1> blue_y;
be_t<u16, 1> white_x;
be_t<u16, 1> white_y;
be_t<u32, 1> gamma;
};
struct ps3av_info_audio
{
u8 type;
u8 max_num_of_ch;
u8 fs;
u8 sbit;
};
struct ps3av_get_monitor_info_reply
{
u8 avport;
u8 monitor_id[10];
u8 monitor_type;
u8 monitor_name[16];
ps3av_info_resolution res_60;
ps3av_info_resolution res_50;
ps3av_info_resolution res_other;
ps3av_info_resolution res_vesa;
ps3av_info_cs cs;
ps3av_info_color color;
u8 supported_ai;
u8 speaker_info;
be_t<u16, 1> num_of_audio_block;
ps3av_info_audio audio_info[PS3AV_MON_INFO_AUDIO_BLK_MAX];
be_t<u16, 1> hor_screen_size;
be_t<u16, 1> ver_screen_size;
u8 supported_content_types;
u8 reserved_1[3];
ps3av_info_resolution res_60_packed_3D;
ps3av_info_resolution res_50_packed_3D;
ps3av_info_resolution res_other_3D;
ps3av_info_resolution res_60_sbs_3D;
ps3av_info_resolution res_50_sbs_3D;
u8 vendor_specific_flags;
u8 reserved_2[7];
};
static_assert(sizeof(ps3av_get_monitor_info_reply) == 208);
struct ps3av_get_monitor_info
{
ps3av_header hdr;
be_t<u16, 1> avport;
be_t<u16, 1> reserved;
};
static_assert(sizeof(ps3av_get_monitor_info) == 12);
struct ps3av_get_hw_info_reply
{
be_t<u16, 1> num_of_hdmi;
be_t<u16, 1> num_of_avmulti;
be_t<u16, 1> num_of_spdif;
be_t<u16, 1> extra_bistream_support;
};
static_assert(sizeof(ps3av_get_hw_info_reply) == 8);
struct ps3av_pkt_set_hdmi_mode
{
ps3av_header hdr;
u8 mode;
u8 resv[3];
};
static_assert(sizeof(ps3av_pkt_set_hdmi_mode) == 12);
struct ps3av_pkt_audio_mode
{
ps3av_header hdr;
UartAudioAvport avport;
u8 reserved0[3];
be_t<u32, 1> mask;
be_t<u32, 1> audio_num_of_ch;
be_t<UartAudioFreq, 1> audio_fs;
be_t<UartAudioSampleSize, 1> audio_word_bits;
be_t<UartAudioFormat, 1> audio_format;
be_t<UartAudioSource, 1> audio_source;
u8 audio_enable[4];
u8 audio_swap[4];
u8 audio_map[4];
be_t<u32, 1> audio_layout;
be_t<u32, 1> audio_downmix;
be_t<u32, 1> audio_downmix_level;
u8 audio_cs_info[8];
};
static_assert(sizeof(ps3av_pkt_audio_mode) == 68);
struct ps3av_pkt_audio_mute
{
ps3av_header hdr;
UartAudioAvport avport;
u8 reserved0[3];
u8 mute;
};
static_assert(sizeof(ps3av_pkt_audio_mute) == 13);
struct ps3av_pkt_audio_set_active
{
ps3av_header hdr;
be_t<u32, 1> audio_port;
};
static_assert(sizeof(ps3av_pkt_audio_set_active) == 12);
struct ps3av_pkt_audio_spdif_bit
{
ps3av_header hdr;
UartAudioAvport avport;
u8 reserved0[3];
be_t<u32, 1> audio_port;
be_t<u32, 1> spdif_bit_data[12];
};
static_assert(sizeof(ps3av_pkt_audio_spdif_bit) == 64);
struct ps3av_pkt_audio_ctrl
{
ps3av_header hdr;
be_t<UartAudioCtrlID, 1> audio_ctrl_id;
be_t<u32, 1> audio_ctrl_data[4];
};
static_assert(sizeof(ps3av_pkt_audio_ctrl) == 28);
struct ps3av_pkt_hdmi_plugged_event
{
ps3av_header hdr;
ps3av_get_monitor_info_reply minfo;
};
static_assert(sizeof(ps3av_pkt_hdmi_plugged_event) == 216);
struct ps3av_pkt_hdmi_hdcp_done_event
{
ps3av_header hdr;
be_t<u32, 1> ksv_cnt;
u8 ksv_arr[20][5];
};
static_assert(sizeof(ps3av_pkt_hdmi_hdcp_done_event) == 112);
struct ps3av_pkt_av_init
{
ps3av_header hdr;
be_t<u32, 1> event_bit;
};
static_assert(sizeof(ps3av_pkt_av_init) == 12);
struct ps3av_pkt_av_init_reply
{
be_t<u32, 1> unk;
};
static_assert(sizeof(ps3av_pkt_av_init_reply) == 4);
struct ps3av_pkt_enable_event
{
ps3av_header hdr;
be_t<u32, 1> event_bit;
};
static_assert(sizeof(ps3av_pkt_enable_event) == 12);
struct ps3av_pkt_get_bksv
{
ps3av_header hdr;
be_t<u16, 1> avport;
u8 resv[2];
};
static_assert(sizeof(ps3av_pkt_get_bksv) == 12);
struct ps3av_pkt_get_bksv_reply
{
be_t<u16, 1> avport;
u8 resv[2];
be_t<u32, 1> ksv_cnt;
u8 ksv_arr[20][5];
};
static_assert(sizeof(ps3av_pkt_get_bksv_reply) == 108);
struct ps3av_pkt_video_get_hw_cfg_reply
{
be_t<u32, 1> gx_available;
};
static_assert(sizeof(ps3av_pkt_video_get_hw_cfg_reply) == 4);
struct ps3av_pkt_video_set_pitch
{
ps3av_header hdr;
be_t<u32, 1> video_head;
be_t<u32, 1> pitch;
};
static_assert(sizeof(ps3av_pkt_video_set_pitch) == 16);
struct ps3av_pkt_get_aksv_reply
{
be_t<u32, 1> ksv_size;
u8 ksv_arr[2][5];
u8 resv[2];
};
static_assert(sizeof(ps3av_pkt_get_aksv_reply) == 16);
struct ps3av_pkt_inc_avset
{
ps3av_header hdr;
be_t<u16, 1> num_of_video_pkt;
be_t<u16, 1> num_of_audio_pkt;
be_t<u16, 1> num_of_av_video_pkt;
be_t<u16, 1> num_of_av_audio_pkt;
};
static_assert(sizeof(ps3av_pkt_inc_avset) == 16);
struct ps3av_pkt_av_audio_param
{
ps3av_header hdr;
be_t<u16, 1> avport;
be_t<u16, 1> resv;
u8 mclk;
u8 ns[3];
u8 enable;
u8 swaplr;
u8 fifomap;
u8 inputctrl;
u8 inputlen;
u8 layout;
u8 info[5];
u8 chstat[5];
};
static_assert(sizeof(ps3av_pkt_av_audio_param) == 32);
struct ps3av_pkt_av_video_cs
{
ps3av_header hdr;
be_t<u16, 1> avport;
be_t<u16, 1> av_vid;
be_t<u16, 1> av_cs_out;
be_t<u16, 1> av_cs_in;
u8 dither;
u8 bitlen_out;
u8 super_white;
u8 aspect;
u8 unk1;
u8 unk2;
u8 resv[2];
};
static_assert(sizeof(ps3av_pkt_av_video_cs) == 24);
struct ps3av_pkt_video_mode
{
ps3av_header hdr;
be_t<u32, 1> video_head;
be_t<u16, 1> unk1;
be_t<u16, 1> unk2;
be_t<u32, 1> video_vid;
be_t<u32, 1> width;
be_t<u32, 1> height;
be_t<u32, 1> pitch;
be_t<u32, 1> video_out_format;
be_t<u32, 1> video_format;
be_t<u16, 1> unk3;
be_t<u16, 1> video_order;
be_t<u32, 1> unk4;
};
static_assert(sizeof(ps3av_pkt_video_mode) == 48);
struct ps3av_pkt_av_video_ytrapcontrol
{
ps3av_header hdr;
be_t<u16, 1> unk1;
be_t<u16, 1> unk2;
};
static_assert(sizeof(ps3av_pkt_av_video_ytrapcontrol) == 12);
struct ps3av_pkt_av_get_cec_config_reply
{
be_t<u32, 1> cec_present;
};
struct ps3av_pkt_video_format
{
ps3av_header hdr;
be_t<u32, 1> video_head;
be_t<u32, 1> video_format;
be_t<u16, 1> unk;
be_t<u16, 1> video_order;
};
static_assert(sizeof(ps3av_pkt_video_format) == 20);
struct ps3av_pkt_av_set_cgms_wss
{
ps3av_header hdr;
be_t<u16, 1> avport;
u8 resv[2];
be_t<u32, 1> cgms_wss;
};
static_assert(sizeof(ps3av_pkt_av_set_cgms_wss) == 16);
struct ps3av_pkt_set_acp_packet
{
ps3av_header hdr;
u8 avport;
u8 pkt_type;
u8 resv[2];
u8 pkt_data[32];
};
static_assert(sizeof(ps3av_pkt_set_acp_packet) == 44);
struct ps3av_pkt_acp_ctrl
{
ps3av_header hdr;
u8 avport;
u8 packetctl;
u8 resv[2];
};
static_assert(sizeof(ps3av_pkt_acp_ctrl) == 12);
struct ps3av_pkt_add_signal_ctl
{
ps3av_header hdr;
be_t<u16, 1> avport;
be_t<u16, 1> signal_ctl;
};
static_assert(sizeof(ps3av_pkt_add_signal_ctl) == 12);
struct ps3av_pkt_av_audio_mute
{
ps3av_header hdr;
be_t<u16, 1> avport;
be_t<u16, 1> mute;
};
static_assert(sizeof(ps3av_pkt_av_audio_mute) == 12);
struct ps3av_pkt_video_disable_sig
{
ps3av_header hdr;
be_t<u16, 1> avport;
be_t<u16, 1> resv;
};
static_assert(sizeof(ps3av_pkt_video_disable_sig) == 12);
// SysCalls
error_code sys_uart_initialize(ppu_thread &ppu);
error_code sys_uart_receive(ppu_thread &ppu, vm::ptr<void> buffer, u64 size, u32 mode);
error_code sys_uart_send(ppu_thread &ppu, vm::cptr<void> buffer, u64 size, u32 mode);
error_code sys_uart_get_params(vm::ptr<vuart_params> buffer);
| 18,784
|
C++
|
.h
| 624
| 28.205128
| 101
| 0.647522
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
5,948
|
sys_fs.h
|
RPCS3_rpcs3/rpcs3/Emu/Cell/lv2/sys_fs.h
|
#pragma once
#include "Emu/Memory/vm_ptr.h"
#include "Emu/Cell/ErrorCodes.h"
#include "Utilities/File.h"
#include "Utilities/StrUtil.h"
#include <string>
#include <mutex>
// Open Flags
enum : s32
{
CELL_FS_O_RDONLY = 000000,
CELL_FS_O_WRONLY = 000001,
CELL_FS_O_RDWR = 000002,
CELL_FS_O_ACCMODE = 000003,
CELL_FS_O_CREAT = 000100,
CELL_FS_O_EXCL = 000200,
CELL_FS_O_TRUNC = 001000,
CELL_FS_O_APPEND = 002000,
CELL_FS_O_MSELF = 010000,
CELL_FS_O_UNK = 01000000, // Tests have shown this is independent of other flags. Only known to be called in Rockband games.
};
// Seek Mode
enum : s32
{
CELL_FS_SEEK_SET,
CELL_FS_SEEK_CUR,
CELL_FS_SEEK_END,
};
enum : s32
{
CELL_FS_MAX_FS_PATH_LENGTH = 1024,
CELL_FS_MAX_FS_FILE_NAME_LENGTH = 255,
CELL_FS_MAX_MP_LENGTH = 31,
};
enum : s32
{
CELL_FS_S_IFMT = 0170000,
CELL_FS_S_IFDIR = 0040000, // directory
CELL_FS_S_IFREG = 0100000, // regular
CELL_FS_S_IFLNK = 0120000, // symbolic link
CELL_FS_S_IFWHT = 0160000, // unknown
CELL_FS_S_IRUSR = 0000400, // R for owner
CELL_FS_S_IWUSR = 0000200, // W for owner
CELL_FS_S_IXUSR = 0000100, // X for owner
CELL_FS_S_IRGRP = 0000040, // R for group
CELL_FS_S_IWGRP = 0000020, // W for group
CELL_FS_S_IXGRP = 0000010, // X for group
CELL_FS_S_IROTH = 0000004, // R for other
CELL_FS_S_IWOTH = 0000002, // W for other
CELL_FS_S_IXOTH = 0000001, // X for other
};
// CellFsDirent.d_type
enum : u8
{
CELL_FS_TYPE_UNKNOWN = 0,
CELL_FS_TYPE_DIRECTORY = 1,
CELL_FS_TYPE_REGULAR = 2,
CELL_FS_TYPE_SYMLINK = 3,
};
enum : u32
{
CELL_FS_IO_BUFFER_PAGE_SIZE_64KB = 0x0002,
CELL_FS_IO_BUFFER_PAGE_SIZE_1MB = 0x0004,
};
struct CellFsDirent
{
u8 d_type;
u8 d_namlen;
char d_name[256];
};
struct CellFsStat
{
be_t<s32> mode;
be_t<s32> uid;
be_t<s32> gid;
be_t<s64, 4> atime;
be_t<s64, 4> mtime;
be_t<s64, 4> ctime;
be_t<u64, 4> size;
be_t<u64, 4> blksize;
};
CHECK_SIZE_ALIGN(CellFsStat, 52, 4);
struct CellFsDirectoryEntry
{
CellFsStat attribute;
CellFsDirent entry_name;
};
struct CellFsUtimbuf
{
be_t<s64, 4> actime;
be_t<s64, 4> modtime;
};
CHECK_SIZE_ALIGN(CellFsUtimbuf, 16, 4);
// MSelf file structs
struct FsMselfHeader
{
be_t<u32> m_magic;
be_t<u32> m_format_version;
be_t<u64> m_file_size;
be_t<u32> m_entry_num;
be_t<u32> m_entry_size;
u8 m_reserve[40];
};
struct FsMselfEntry
{
char m_name[32];
be_t<u64> m_offset;
be_t<u64> m_size;
u8 m_reserve[16];
};
enum class lv2_mp_flag
{
read_only,
no_uid_gid,
strict_get_block_size,
cache,
__bitset_enum_max
};
enum class lv2_file_type
{
regular = 0,
sdata,
edata,
};
struct lv2_fs_mount_point
{
const std::string_view root;
const std::string_view file_system;
const std::string_view device;
const u32 sector_size = 512;
const u64 sector_count = 256;
const u32 block_size = 4096;
const bs_t<lv2_mp_flag> flags{};
lv2_fs_mount_point* const next = nullptr;
mutable shared_mutex mutex;
};
extern lv2_fs_mount_point g_mp_sys_dev_hdd0;
extern lv2_fs_mount_point g_mp_sys_no_device;
struct lv2_fs_mount_info
{
lv2_fs_mount_point* const mp;
const std::string device;
const std::string file_system;
const bool read_only;
lv2_fs_mount_info(lv2_fs_mount_point* mp = nullptr, std::string_view device = {}, std::string_view file_system = {}, bool read_only = false)
: mp(mp ? mp : &g_mp_sys_no_device)
, device(device.empty() ? this->mp->device : device)
, file_system(file_system.empty() ? this->mp->file_system : file_system)
, read_only((this->mp->flags & lv2_mp_flag::read_only) || read_only) // respect the original flags of the mount point as well
{
}
constexpr bool operator==(const lv2_fs_mount_info& rhs) const noexcept
{
return this == &rhs;
}
constexpr bool operator==(const lv2_fs_mount_point* const& rhs) const noexcept
{
return mp == rhs;
}
constexpr lv2_fs_mount_point* operator->() const noexcept
{
return mp;
}
};
extern lv2_fs_mount_info g_mi_sys_not_found;
struct CellFsMountInfo; // Forward Declaration
struct lv2_fs_mount_info_map
{
public:
SAVESTATE_INIT_POS(40);
lv2_fs_mount_info_map();
lv2_fs_mount_info_map(const lv2_fs_mount_info_map&) = delete;
lv2_fs_mount_info_map& operator=(const lv2_fs_mount_info_map&) = delete;
~lv2_fs_mount_info_map();
// Forwarding arguments to map.try_emplace(): refer to the constructor of lv2_fs_mount_info
template <typename... Args>
bool add(Args&&... args)
{
return map.try_emplace(std::forward<Args>(args)...).second;
}
bool remove(std::string_view path);
const lv2_fs_mount_info& lookup(std::string_view path, bool no_cell_fs_path = false, std::string* mount_path = nullptr) const;
u64 get_all(CellFsMountInfo* info = nullptr, u64 len = 0) const;
bool is_device_mounted(std::string_view device_name) const;
static bool vfs_unmount(std::string_view vpath, bool remove_from_map = true);
private:
std::unordered_map<std::string, lv2_fs_mount_info, fmt::string_hash, std::equal_to<>> map;
};
struct lv2_fs_object
{
static constexpr u32 id_base = 3;
static constexpr u32 id_step = 1;
static constexpr u32 id_count = 255 - id_base;
static constexpr bool id_lowest = true;
SAVESTATE_INIT_POS(49);
// File Name (max 1055)
const std::array<char, 0x420> name;
// Mount Info
const lv2_fs_mount_info& mp;
protected:
lv2_fs_object(std::string_view filename);
lv2_fs_object(utils::serial& ar, bool dummy);
public:
lv2_fs_object(const lv2_fs_object&) = delete;
lv2_fs_object& operator=(const lv2_fs_object&) = delete;
// Normalize a virtual path
static std::string get_normalized_path(std::string_view path);
// Get the device's root path (e.g. "/dev_hdd0") from a given path
static std::string get_device_root(std::string_view filename);
// Filename can be either a path starting with '/' or a CELL_FS device name
// This should be used only when handling devices that are not mounted
// Otherwise, use g_fxo->get<lv2_fs_mount_info_map>().lookup() to look up mounted devices accurately
static lv2_fs_mount_point* get_mp(std::string_view filename, std::string* vfs_path = nullptr);
static std::array<char, 0x420> get_name(std::string_view filename)
{
std::array<char, 0x420> name;
if (filename.size() >= 0x420)
{
filename = filename.substr(0, 0x420 - 1);
}
filename.copy(name.data(), filename.size());
name[filename.size()] = 0;
return name;
}
void save(utils::serial&) {}
};
struct lv2_file final : lv2_fs_object
{
static constexpr u32 id_type = 1;
fs::file file;
const s32 mode;
const s32 flags;
std::string real_path;
const lv2_file_type type;
// IO Container
u32 ct_id{}, ct_used{};
// Stream lock
atomic_t<u32> lock{0};
// Some variables for convenience of data restoration
struct save_restore_t
{
u64 seek_pos;
u64 atime;
u64 mtime;
} restore_data{};
lv2_file(std::string_view filename, fs::file&& file, s32 mode, s32 flags, const std::string& real_path, lv2_file_type type = {})
: lv2_fs_object(filename)
, file(std::move(file))
, mode(mode)
, flags(flags)
, real_path(real_path)
, type(type)
{
}
lv2_file(const lv2_file& host, fs::file&& file, s32 mode, s32 flags, const std::string& real_path, lv2_file_type type = {})
: lv2_fs_object(host.name.data())
, file(std::move(file))
, mode(mode)
, flags(flags)
, real_path(real_path)
, type(type)
{
}
lv2_file(utils::serial& ar);
void save(utils::serial& ar);
struct open_raw_result_t
{
CellError error;
fs::file file;
};
struct open_result_t
{
CellError error;
std::string ppath;
std::string real_path;
fs::file file;
lv2_file_type type;
};
// Open a file with wrapped logic of sys_fs_open
static open_raw_result_t open_raw(const std::string& path, s32 flags, s32 mode, lv2_file_type type = lv2_file_type::regular, const lv2_fs_mount_info& mp = g_mi_sys_not_found);
static open_result_t open(std::string_view vpath, s32 flags, s32 mode, const void* arg = {}, u64 size = 0);
// File reading with intermediate buffer
static u64 op_read(const fs::file& file, vm::ptr<void> buf, u64 size, u64 opt_pos = umax);
u64 op_read(vm::ptr<void> buf, u64 size, u64 opt_pos = umax) const
{
return op_read(file, buf, size, opt_pos);
}
// File writing with intermediate buffer
static u64 op_write(const fs::file& file, vm::cptr<void> buf, u64 size);
u64 op_write(vm::cptr<void> buf, u64 size) const
{
return op_write(file, buf, size);
}
// For MSELF support
struct file_view;
// Make file view from lv2_file object (for MSELF support)
static fs::file make_view(const std::shared_ptr<lv2_file>& _file, u64 offset);
};
struct lv2_dir final : lv2_fs_object
{
static constexpr u32 id_type = 2;
const std::vector<fs::dir_entry> entries;
// Current reading position
atomic_t<u64> pos{0};
lv2_dir(std::string_view filename, std::vector<fs::dir_entry>&& entries)
: lv2_fs_object(filename)
, entries(std::move(entries))
{
}
lv2_dir(utils::serial& ar);
void save(utils::serial& ar);
// Read next
const fs::dir_entry* dir_read()
{
const u64 old_pos = pos;
if (const u64 cur = (old_pos < entries.size() ? pos++ : old_pos); cur < entries.size())
{
return &entries[cur];
}
return nullptr;
}
};
// sys_fs_fcntl arg base class (left empty for PODness)
struct lv2_file_op
{
};
namespace vtable
{
struct lv2_file_op
{
// Speculation
vm::bptrb<vm::ptrb<void>(vm::ptrb<lv2_file_op>)> get_data;
vm::bptrb<u32(vm::ptrb<lv2_file_op>)> get_size;
vm::bptrb<void(vm::ptrb<lv2_file_op>)> _dtor1;
vm::bptrb<void(vm::ptrb<lv2_file_op>)> _dtor2;
};
}
// sys_fs_fcntl: read with offset, write with offset
struct lv2_file_op_rw : lv2_file_op
{
vm::bptrb<vtable::lv2_file_op> _vtable;
be_t<u32> op;
be_t<u32> _x8; // ???
be_t<u32> _xc; // ???
be_t<u32> fd; // File descriptor (3..255)
vm::bptrb<void> buf; // Buffer for data
be_t<u64> offset; // File offset
be_t<u64> size; // Access size
be_t<s32> out_code; // Op result
be_t<u64> out_size; // Size processed
};
CHECK_SIZE(lv2_file_op_rw, 0x38);
// sys_fs_fcntl: cellFsSdataOpenByFd
struct lv2_file_op_09 : lv2_file_op
{
vm::bptrb<vtable::lv2_file_op> _vtable;
be_t<u32> op;
be_t<u32> _x8;
be_t<u32> _xc;
be_t<u32> fd;
be_t<u64> offset;
be_t<u32> _vtabl2;
be_t<u32> arg1; // 0x180
be_t<u32> arg2; // 0x10
be_t<u32> arg_size; // 6th arg
be_t<u32> arg_ptr; // 5th arg
be_t<u32> _x34;
be_t<s32> out_code;
be_t<u32> out_fd;
};
CHECK_SIZE(lv2_file_op_09, 0x40);
struct lv2_file_e0000025 : lv2_file_op
{
be_t<u32> size; // 0x30
be_t<u32> _x4; // 0x10
be_t<u32> _x8; // 0x28 - offset of out_code
be_t<u32> name_size;
vm::bcptr<char> name;
be_t<u32> _x14;
be_t<u32> _x18; // 0
be_t<u32> _x1c; // 0
be_t<u32> _x20; // 16
be_t<u32> _x24; // unk, seems to be memory location
be_t<u32> out_code; // out_code
be_t<u32> fd; // 0xffffffff - likely fd out
};
CHECK_SIZE(lv2_file_e0000025, 0x30);
// sys_fs_fnctl: cellFsGetDirectoryEntries
struct lv2_file_op_dir : lv2_file_op
{
struct dir_info : lv2_file_op
{
be_t<s32> _code; // Op result
be_t<u32> _size; // Number of entries written
vm::bptrb<CellFsDirectoryEntry> ptr;
be_t<u32> max;
};
CHECK_SIZE(dir_info, 0x10);
vm::bptrb<vtable::lv2_file_op> _vtable;
be_t<u32> op;
be_t<u32> _x8;
dir_info arg;
};
CHECK_SIZE(lv2_file_op_dir, 0x1c);
// sys_fs_fcntl: cellFsGetFreeSize (for dev_hdd0)
struct lv2_file_c0000002 : lv2_file_op
{
vm::bptrb<vtable::lv2_file_op> _vtable;
be_t<u32> op;
be_t<u32> _x8;
vm::bcptr<char> path;
be_t<u32> _x10; // 0
be_t<u32> _x14;
be_t<u32> out_code; // CELL_ENOSYS
be_t<u32> out_block_size;
be_t<u64> out_block_count;
};
CHECK_SIZE(lv2_file_c0000002, 0x28);
// sys_fs_fcntl: unknown (called before cellFsOpen, for example)
struct lv2_file_c0000006 : lv2_file_op
{
be_t<u32> size; // 0x20
be_t<u32> _x4; // 0x10
be_t<u32> _x8; // 0x18 - offset of out_code
be_t<u32> name_size;
vm::bcptr<char> name;
be_t<u32> _x14; // 0
be_t<u32> out_code; // 0x80010003
be_t<u32> out_id; // set to 0, may return 0x1b5
};
CHECK_SIZE(lv2_file_c0000006, 0x20);
// sys_fs_fcntl: cellFsArcadeHddSerialNumber
struct lv2_file_c0000007 : lv2_file_op
{
be_t<u32> out_code; // set to 0
vm::bcptr<char> device; // CELL_FS_IOS:ATA_HDD
be_t<u32> device_size; // 0x14
vm::bptr<char> model;
be_t<u32> model_size; // 0x29
vm::bptr<char> serial;
be_t<u32> serial_size; // 0x15
};
CHECK_SIZE(lv2_file_c0000007, 0x1c);
struct lv2_file_c0000008 : lv2_file_op
{
u8 _x0[4];
be_t<u32> op; // 0xC0000008
u8 _x8[8];
be_t<u64> container_id;
be_t<u32> size;
be_t<u32> page_type; // 0x4000 for cellFsSetDefaultContainer
// 0x4000 | page_type given by user, valid values seem to be:
// CELL_FS_IO_BUFFER_PAGE_SIZE_64KB 0x0002
// CELL_FS_IO_BUFFER_PAGE_SIZE_1MB 0x0004
be_t<u32> out_code;
u8 _x24[4];
};
CHECK_SIZE(lv2_file_c0000008, 0x28);
struct lv2_file_c0000015 : lv2_file_op
{
be_t<u32> size; // 0x20
be_t<u32> _x4; // 0x10
be_t<u32> _x8; // 0x18 - offset of out_code
be_t<u32> path_size;
vm::bcptr<char> path;
be_t<u32> _x14; //
be_t<u16> vendorID;
be_t<u16> productID;
be_t<u32> out_code; // set to 0
};
CHECK_SIZE(lv2_file_c0000015, 0x20);
struct lv2_file_c000001a : lv2_file_op
{
be_t<u32> disc_retry_type; // CELL_FS_DISC_READ_RETRY_NONE results in a 0 here
// CELL_FS_DISC_READ_RETRY_DEFAULT results in a 0x63 here
be_t<u32> _x4; // 0
be_t<u32> _x8; // 0x000186A0
be_t<u32> _xC; // 0
be_t<u32> _x10; // 0
be_t<u32> _x14; // 0
};
CHECK_SIZE(lv2_file_c000001a, 0x18);
struct lv2_file_c000001c : lv2_file_op
{
be_t<u32> size; // 0x60
be_t<u32> _x4; // 0x10
be_t<u32> _x8; // 0x18 - offset of out_code
be_t<u32> path_size;
vm::bcptr<char> path;
be_t<u32> unk1;
be_t<u16> vendorID;
be_t<u16> productID;
be_t<u32> out_code; // set to 0
be_t<u16> serial[32];
};
CHECK_SIZE(lv2_file_c000001c, 0x60);
// sys_fs_fcntl: cellFsAllocateFileAreaWithoutZeroFill
struct lv2_file_e0000017 : lv2_file_op
{
be_t<u32> size; // 0x28
be_t<u32> _x4; // 0x10, offset
be_t<u32> _x8; // 0x20, offset
be_t<u32> _xc; // -
vm::bcptr<char> file_path;
be_t<u64> file_size;
be_t<u32> out_code;
};
CHECK_SIZE(lv2_file_e0000017, 0x28);
struct CellFsMountInfo
{
char mount_path[0x20]; // 0x0
char filesystem[0x20]; // 0x20
char dev_name[0x40]; // 0x40
be_t<u32> unk[5]; // 0x80, probably attributes
};
CHECK_SIZE(CellFsMountInfo, 0x94);
// Default IO container
struct default_sys_fs_container
{
shared_mutex mutex;
u32 id = 0;
u32 cap = 0;
u32 used = 0;
};
// Syscalls
error_code sys_fs_test(ppu_thread& ppu, u32 arg1, u32 arg2, vm::ptr<u32> arg3, u32 arg4, vm::ptr<char> buf, u32 buf_size);
error_code sys_fs_open(ppu_thread& ppu, vm::cptr<char> path, s32 flags, vm::ptr<u32> fd, s32 mode, vm::cptr<void> arg, u64 size);
error_code sys_fs_read(ppu_thread& ppu, u32 fd, vm::ptr<void> buf, u64 nbytes, vm::ptr<u64> nread);
error_code sys_fs_write(ppu_thread& ppu, u32 fd, vm::cptr<void> buf, u64 nbytes, vm::ptr<u64> nwrite);
error_code sys_fs_close(ppu_thread& ppu, u32 fd);
error_code sys_fs_opendir(ppu_thread& ppu, vm::cptr<char> path, vm::ptr<u32> fd);
error_code sys_fs_readdir(ppu_thread& ppu, u32 fd, vm::ptr<CellFsDirent> dir, vm::ptr<u64> nread);
error_code sys_fs_closedir(ppu_thread& ppu, u32 fd);
error_code sys_fs_stat(ppu_thread& ppu, vm::cptr<char> path, vm::ptr<CellFsStat> sb);
error_code sys_fs_fstat(ppu_thread& ppu, u32 fd, vm::ptr<CellFsStat> sb);
error_code sys_fs_link(ppu_thread& ppu, vm::cptr<char> from, vm::cptr<char> to);
error_code sys_fs_mkdir(ppu_thread& ppu, vm::cptr<char> path, s32 mode);
error_code sys_fs_rename(ppu_thread& ppu, vm::cptr<char> from, vm::cptr<char> to);
error_code sys_fs_rmdir(ppu_thread& ppu, vm::cptr<char> path);
error_code sys_fs_unlink(ppu_thread& ppu, vm::cptr<char> path);
error_code sys_fs_access(ppu_thread& ppu, vm::cptr<char> path, s32 mode);
error_code sys_fs_fcntl(ppu_thread& ppu, u32 fd, u32 op, vm::ptr<void> arg, u32 size);
error_code sys_fs_lseek(ppu_thread& ppu, u32 fd, s64 offset, s32 whence, vm::ptr<u64> pos);
error_code sys_fs_fdatasync(ppu_thread& ppu, u32 fd);
error_code sys_fs_fsync(ppu_thread& ppu, u32 fd);
error_code sys_fs_fget_block_size(ppu_thread& ppu, u32 fd, vm::ptr<u64> sector_size, vm::ptr<u64> block_size, vm::ptr<u64> arg4, vm::ptr<s32> out_flags);
error_code sys_fs_get_block_size(ppu_thread& ppu, vm::cptr<char> path, vm::ptr<u64> sector_size, vm::ptr<u64> block_size, vm::ptr<u64> arg4);
error_code sys_fs_truncate(ppu_thread& ppu, vm::cptr<char> path, u64 size);
error_code sys_fs_ftruncate(ppu_thread& ppu, u32 fd, u64 size);
error_code sys_fs_symbolic_link(ppu_thread& ppu, vm::cptr<char> target, vm::cptr<char> linkpath);
error_code sys_fs_chmod(ppu_thread& ppu, vm::cptr<char> path, s32 mode);
error_code sys_fs_chown(ppu_thread& ppu, vm::cptr<char> path, s32 uid, s32 gid);
error_code sys_fs_disk_free(ppu_thread& ppu, vm::cptr<char> path, vm::ptr<u64> total_free, vm::ptr<u64> avail_free);
error_code sys_fs_utime(ppu_thread& ppu, vm::cptr<char> path, vm::cptr<CellFsUtimbuf> timep);
error_code sys_fs_acl_read(ppu_thread& ppu, vm::cptr<char> path, vm::ptr<void>);
error_code sys_fs_acl_write(ppu_thread& ppu, vm::cptr<char> path, vm::ptr<void>);
error_code sys_fs_lsn_get_cda_size(ppu_thread& ppu, u32 fd, vm::ptr<u64> ptr);
error_code sys_fs_lsn_get_cda(ppu_thread& ppu, u32 fd, vm::ptr<void>, u64, vm::ptr<u64>);
error_code sys_fs_lsn_lock(ppu_thread& ppu, u32 fd);
error_code sys_fs_lsn_unlock(ppu_thread& ppu, u32 fd);
error_code sys_fs_lsn_read(ppu_thread& ppu, u32 fd, vm::cptr<void>, u64);
error_code sys_fs_lsn_write(ppu_thread& ppu, u32 fd, vm::cptr<void>, u64);
error_code sys_fs_mapped_allocate(ppu_thread& ppu, u32 fd, u64, vm::pptr<void> out_ptr);
error_code sys_fs_mapped_free(ppu_thread& ppu, u32 fd, vm::ptr<void> ptr);
error_code sys_fs_truncate2(ppu_thread& ppu, u32 fd, u64 size);
error_code sys_fs_newfs(ppu_thread& ppu, vm::cptr<char> dev_name, vm::cptr<char> file_system, s32 unk1, vm::cptr<char> str1);
error_code sys_fs_mount(ppu_thread& ppu, vm::cptr<char> dev_name, vm::cptr<char> file_system, vm::cptr<char> path, s32 unk1, s32 prot, s32 unk2, vm::cptr<char> str1, u32 str_len);
error_code sys_fs_unmount(ppu_thread& ppu, vm::cptr<char> path, s32 unk1, s32 force);
error_code sys_fs_get_mount_info_size(ppu_thread& ppu, vm::ptr<u64> len);
error_code sys_fs_get_mount_info(ppu_thread& ppu, vm::ptr<CellFsMountInfo> info, u64 len, vm::ptr<u64> out_len);
| 18,542
|
C++
|
.h
| 570
| 30.405263
| 179
| 0.69058
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
5,949
|
sys_gamepad.h
|
RPCS3_rpcs3/rpcs3/Emu/Cell/lv2/sys_gamepad.h
|
#pragma once
#include "Emu/Memory/vm_ptr.h"
//Syscalls
u32 sys_gamepad_ycon_if(u8 packet_id, vm::ptr<u8> in, vm::ptr<u8> out);
| 130
|
C++
|
.h
| 4
| 30.75
| 71
| 0.715447
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
5,950
|
sys_usbd.h
|
RPCS3_rpcs3/rpcs3/Emu/Cell/lv2/sys_usbd.h
|
#pragma once
#include "Emu/Memory/vm_ptr.h"
#include "Emu/Cell/ErrorCodes.h"
#include "Input/product_info.h"
class ppu_thread;
#define MAX_SYS_USBD_TRANSFERS 0x44
// PS3 internal codes
enum PS3StandardUsbErrors : u32
{
HC_CC_NOERR = 0x00,
EHCI_CC_MISSMF = 0x10,
EHCI_CC_XACT = 0x20,
EHCI_CC_BABBLE = 0x30,
EHCI_CC_DATABUF = 0x40,
EHCI_CC_HALTED = 0x50,
};
enum PS3IsochronousUsbErrors : u8
{
USBD_HC_CC_NOERR = 0x00,
USBD_HC_CC_MISSMF = 0x01,
USBD_HC_CC_XACT = 0x02,
USBD_HC_CC_BABBLE = 0x04,
USBD_HC_CC_DATABUF = 0x08,
};
enum SysUsbdEvents : u32
{
SYS_USBD_ATTACH = 0x01,
SYS_USBD_DETACH = 0x02,
SYS_USBD_TRANSFER_COMPLETE = 0x03,
SYS_USBD_TERMINATE = 0x04,
};
// PS3 internal structures
struct UsbInternalDevice
{
u8 device_high; // System flag maybe (used in generating actual device number)
u8 device_low; // Just a number identifying the device (used in generating actual device number)
u8 unk3; // ? Seems to always be 2?
u8 unk4; // ?
};
struct UsbDeviceRequest
{
u8 bmRequestType;
u8 bRequest;
be_t<u16> wValue;
be_t<u16> wIndex;
be_t<u16> wLength;
};
struct UsbDeviceIsoRequest
{
vm::ptr<void> buf;
be_t<u32> start_frame;
be_t<u32> num_packets;
be_t<u16> packets[8];
};
error_code sys_usbd_initialize(ppu_thread& ppu, vm::ptr<u32> handle);
error_code sys_usbd_finalize(ppu_thread& ppu, u32 handle);
error_code sys_usbd_get_device_list(ppu_thread& ppu, u32 handle, vm::ptr<UsbInternalDevice> device_list, u32 max_devices);
error_code sys_usbd_get_descriptor_size(ppu_thread& ppu, u32 handle, u32 device_handle);
error_code sys_usbd_get_descriptor(ppu_thread& ppu, u32 handle, u32 device_handle, vm::ptr<void> descriptor, u32 desc_size);
error_code sys_usbd_register_ldd(ppu_thread& ppu, u32 handle, vm::cptr<char> s_product, u16 slen_product);
error_code sys_usbd_unregister_ldd(ppu_thread& ppu, u32 handle, vm::cptr<char> s_product, u16 slen_product);
error_code sys_usbd_open_pipe(ppu_thread& ppu, u32 handle, u32 device_handle, u32 unk1, u64 unk2, u64 unk3, u32 endpoint, u64 unk4);
error_code sys_usbd_open_default_pipe(ppu_thread& ppu, u32 handle, u32 device_handle);
error_code sys_usbd_close_pipe(ppu_thread& ppu, u32 handle, u32 pipe_handle);
error_code sys_usbd_receive_event(ppu_thread& ppu, u32 handle, vm::ptr<u64> arg1, vm::ptr<u64> arg2, vm::ptr<u64> arg3);
error_code sys_usbd_detect_event(ppu_thread& ppu);
error_code sys_usbd_attach(ppu_thread& ppu, u32 handle, u32 unk1, u32 unk2, u32 device_handle);
error_code sys_usbd_transfer_data(ppu_thread& ppu, u32 handle, u32 id_pipe, vm::ptr<u8> buf, u32 buf_size, vm::ptr<UsbDeviceRequest> request, u32 type_transfer);
error_code sys_usbd_isochronous_transfer_data(ppu_thread& ppu, u32 handle, u32 id_pipe, vm::ptr<UsbDeviceIsoRequest> iso_request);
error_code sys_usbd_get_transfer_status(ppu_thread& ppu, u32 handle, u32 id_transfer, u32 unk1, vm::ptr<u32> result, vm::ptr<u32> count);
error_code sys_usbd_get_isochronous_transfer_status(ppu_thread& ppu, u32 handle, u32 id_transfer, u32 unk1, vm::ptr<UsbDeviceIsoRequest> request, vm::ptr<u32> result);
error_code sys_usbd_get_device_location(ppu_thread& ppu, u32 handle, u32 device_handle, vm::ptr<u8> location);
error_code sys_usbd_send_event(ppu_thread& ppu);
error_code sys_usbd_event_port_send(ppu_thread& ppu, u32 handle, u64 arg1, u64 arg2, u64 arg3);
error_code sys_usbd_allocate_memory(ppu_thread& ppu);
error_code sys_usbd_free_memory(ppu_thread& ppu);
error_code sys_usbd_get_device_speed(ppu_thread& ppu);
error_code sys_usbd_register_extra_ldd(ppu_thread& ppu, u32 handle, vm::cptr<char> s_product, u16 slen_product, u16 id_vendor, u16 id_product_min, u16 id_product_max);
error_code sys_usbd_unregister_extra_ldd(ppu_thread& ppu, u32 handle, vm::cptr<char> s_product, u16 slen_product);
void connect_usb_controller(u8 index, input::product_type);
| 3,886
|
C++
|
.h
| 80
| 47.0875
| 167
| 0.743347
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
5,951
|
sys_event.h
|
RPCS3_rpcs3/rpcs3/Emu/Cell/lv2/sys_event.h
|
#pragma once
#include "sys_sync.h"
#include "Emu/Memory/vm_ptr.h"
#include <deque>
class cpu_thread;
class spu_thrread;
// Event Queue Type
enum : u32
{
SYS_PPU_QUEUE = 1,
SYS_SPU_QUEUE = 2,
};
// Event Queue Destroy Mode
enum : s32
{
SYS_EVENT_QUEUE_DESTROY_FORCE = 1,
};
// Event Queue Ipc Key
enum : u64
{
SYS_EVENT_QUEUE_LOCAL = 0,
};
// Event Port Type
enum : s32
{
SYS_EVENT_PORT_LOCAL = 1,
SYS_EVENT_PORT_IPC = 3, // Unofficial name
};
// Event Port Name
enum : u64
{
SYS_EVENT_PORT_NO_NAME = 0,
};
// Event Source Type
enum : u32
{
SYS_SPU_THREAD_EVENT_USER = 1,
SYS_SPU_THREAD_EVENT_DMA = 2, // not supported
};
// Event Source Key
enum : u64
{
SYS_SPU_THREAD_EVENT_USER_KEY = 0xFFFFFFFF53505501ull,
SYS_SPU_THREAD_EVENT_DMA_KEY = 0xFFFFFFFF53505502ull,
SYS_SPU_THREAD_EVENT_EXCEPTION_KEY = 0xFFFFFFFF53505503ull,
};
struct sys_event_queue_attribute_t
{
be_t<u32> protocol; // SYS_SYNC_PRIORITY or SYS_SYNC_FIFO
be_t<s32> type; // SYS_PPU_QUEUE or SYS_SPU_QUEUE
union
{
nse_t<u64, 1> name_u64;
char name[sizeof(u64)];
};
};
struct sys_event_t
{
be_t<u64> source;
be_t<u64> data1;
be_t<u64> data2;
be_t<u64> data3;
};
// Source, data1, data2, data3
using lv2_event = std::tuple<u64, u64, u64, u64>;
struct lv2_event_port;
struct lv2_event_queue final : public lv2_obj
{
static const u32 id_base = 0x8d000000;
const u32 id;
const lv2_protocol protocol;
const u8 type;
const u8 size;
const u64 name;
const u64 key;
shared_mutex mutex;
std::deque<lv2_event> events;
spu_thread* sq{};
ppu_thread* pq{};
lv2_event_queue(u32 protocol, s32 type, s32 size, u64 name, u64 ipc_key) noexcept;
lv2_event_queue(utils::serial& ar) noexcept;
static std::shared_ptr<void> load(utils::serial& ar);
void save(utils::serial& ar);
static void save_ptr(utils::serial&, lv2_event_queue*);
static std::shared_ptr<lv2_event_queue> load_ptr(utils::serial& ar, std::shared_ptr<lv2_event_queue>& queue, std::string_view msg = {});
CellError send(lv2_event event, bool* notified_thread = nullptr, lv2_event_port* port = nullptr);
CellError send(u64 source, u64 d1, u64 d2, u64 d3, bool* notified_thread = nullptr, lv2_event_port* port = nullptr)
{
return send(std::make_tuple(source, d1, d2, d3), notified_thread, port);
}
// Get event queue by its global key
static std::shared_ptr<lv2_event_queue> find(u64 ipc_key);
};
struct lv2_event_port final : lv2_obj
{
static const u32 id_base = 0x0e000000;
const s32 type; // Port type, either IPC or local
const u64 name; // Event source (generated from id and process id if not set)
atomic_t<usz> is_busy = 0; // Counts threads waiting on event sending
std::shared_ptr<lv2_event_queue> queue; // Event queue this port is connected to
lv2_event_port(s32 type, u64 name)
: type(type)
, name(name)
{
}
lv2_event_port(utils::serial& ar);
void save(utils::serial& ar);
};
class ppu_thread;
// Syscalls
error_code sys_event_queue_create(cpu_thread& cpu, vm::ptr<u32> equeue_id, vm::ptr<sys_event_queue_attribute_t> attr, u64 event_queue_key, s32 size);
error_code sys_event_queue_destroy(ppu_thread& ppu, u32 equeue_id, s32 mode);
error_code sys_event_queue_receive(ppu_thread& ppu, u32 equeue_id, vm::ptr<sys_event_t> dummy_event, u64 timeout);
error_code sys_event_queue_tryreceive(ppu_thread& ppu, u32 equeue_id, vm::ptr<sys_event_t> event_array, s32 size, vm::ptr<u32> number);
error_code sys_event_queue_drain(ppu_thread& ppu, u32 event_queue_id);
error_code sys_event_port_create(cpu_thread& cpu, vm::ptr<u32> eport_id, s32 port_type, u64 name);
error_code sys_event_port_destroy(ppu_thread& ppu, u32 eport_id);
error_code sys_event_port_connect_local(cpu_thread& cpu, u32 event_port_id, u32 event_queue_id);
error_code sys_event_port_connect_ipc(ppu_thread& ppu, u32 eport_id, u64 ipc_key);
error_code sys_event_port_disconnect(ppu_thread& ppu, u32 eport_id);
error_code sys_event_port_send(u32 event_port_id, u64 data1, u64 data2, u64 data3);
| 3,982
|
C++
|
.h
| 121
| 31.107438
| 149
| 0.728056
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
5,952
|
sys_spu.h
|
RPCS3_rpcs3/rpcs3/Emu/Cell/lv2/sys_spu.h
|
#pragma once
#include "sys_sync.h"
#include "sys_event.h"
#include "Emu/Cell/SPUThread.h"
#include "Emu/Cell/ErrorCodes.h"
#include "Emu/Memory/vm_ptr.h"
#include "Utilities/File.h"
#include <span>
struct lv2_memory_container;
enum : s32
{
SYS_SPU_THREAD_GROUP_TYPE_NORMAL = 0x00,
//SYS_SPU_THREAD_GROUP_TYPE_SEQUENTIAL = 0x01, doesn't exist
SYS_SPU_THREAD_GROUP_TYPE_SYSTEM = 0x02,
SYS_SPU_THREAD_GROUP_TYPE_MEMORY_FROM_CONTAINER = 0x04,
SYS_SPU_THREAD_GROUP_TYPE_NON_CONTEXT = 0x08,
SYS_SPU_THREAD_GROUP_TYPE_EXCLUSIVE_NON_CONTEXT = 0x18,
SYS_SPU_THREAD_GROUP_TYPE_COOPERATE_WITH_SYSTEM = 0x20,
};
enum
{
SYS_SPU_THREAD_GROUP_JOIN_GROUP_EXIT = 0x0001,
SYS_SPU_THREAD_GROUP_JOIN_ALL_THREADS_EXIT = 0x0002,
SYS_SPU_THREAD_GROUP_JOIN_TERMINATED = 0x0004
};
enum
{
SYS_SPU_THREAD_GROUP_EVENT_RUN = 1,
SYS_SPU_THREAD_GROUP_EVENT_EXCEPTION = 2,
SYS_SPU_THREAD_GROUP_EVENT_SYSTEM_MODULE = 4,
};
enum : u64
{
SYS_SPU_THREAD_GROUP_EVENT_RUN_KEY = 0xFFFFFFFF53505500ull,
SYS_SPU_THREAD_GROUP_EVENT_EXCEPTION_KEY = 0xFFFFFFFF53505503ull,
SYS_SPU_THREAD_GROUP_EVENT_SYSTEM_MODULE_KEY = 0xFFFFFFFF53505504ull,
};
enum
{
SYS_SPU_THREAD_GROUP_LOG_ON = 0x0,
SYS_SPU_THREAD_GROUP_LOG_OFF = 0x1,
SYS_SPU_THREAD_GROUP_LOG_GET_STATUS = 0x2,
};
enum spu_group_status : u32
{
SPU_THREAD_GROUP_STATUS_NOT_INITIALIZED,
SPU_THREAD_GROUP_STATUS_INITIALIZED,
SPU_THREAD_GROUP_STATUS_READY,
SPU_THREAD_GROUP_STATUS_WAITING,
SPU_THREAD_GROUP_STATUS_SUSPENDED,
SPU_THREAD_GROUP_STATUS_WAITING_AND_SUSPENDED,
SPU_THREAD_GROUP_STATUS_RUNNING,
SPU_THREAD_GROUP_STATUS_STOPPED,
SPU_THREAD_GROUP_STATUS_DESTROYED, // Internal state
SPU_THREAD_GROUP_STATUS_UNKNOWN,
};
enum : s32
{
SYS_SPU_SEGMENT_TYPE_COPY = 1,
SYS_SPU_SEGMENT_TYPE_FILL = 2,
SYS_SPU_SEGMENT_TYPE_INFO = 4,
};
enum spu_stop_syscall : u32
{
SYS_SPU_THREAD_STOP_YIELD = 0x0100,
SYS_SPU_THREAD_STOP_GROUP_EXIT = 0x0101,
SYS_SPU_THREAD_STOP_THREAD_EXIT = 0x0102,
SYS_SPU_THREAD_STOP_RECEIVE_EVENT = 0x0110,
SYS_SPU_THREAD_STOP_TRY_RECEIVE_EVENT = 0x0111,
SYS_SPU_THREAD_STOP_SWITCH_SYSTEM_MODULE = 0x0120,
};
struct sys_spu_thread_group_attribute
{
be_t<u32> nsize; // name length including NULL terminator
vm::bcptr<char> name;
be_t<s32> type;
be_t<u32> ct; // memory container id
};
enum : u32
{
SYS_SPU_THREAD_OPTION_NONE = 0,
SYS_SPU_THREAD_OPTION_ASYNC_INTR_ENABLE = 1,
SYS_SPU_THREAD_OPTION_DEC_SYNC_TB_ENABLE = 2,
};
struct sys_spu_thread_attribute
{
vm::bcptr<char> name;
be_t<u32> name_len;
be_t<u32> option;
};
struct sys_spu_thread_argument
{
be_t<u64> arg1;
be_t<u64> arg2;
be_t<u64> arg3;
be_t<u64> arg4;
};
struct sys_spu_segment
{
ENABLE_BITWISE_SERIALIZATION;
be_t<s32> type; // copy, fill, info
be_t<u32> ls; // local storage address
be_t<u32> size;
union
{
be_t<u32> addr; // address or fill value
u64 pad;
};
};
CHECK_SIZE(sys_spu_segment, 0x18);
enum : u32
{
SYS_SPU_IMAGE_TYPE_USER = 0,
SYS_SPU_IMAGE_TYPE_KERNEL = 1,
};
struct sys_spu_image
{
be_t<u32> type; // user, kernel
be_t<u32> entry_point; // Note: in kernel mode it's used to store id
vm::bptr<sys_spu_segment> segs;
be_t<s32> nsegs;
template <bool CountInfo = true, typename Phdrs>
static s32 get_nsegs(const Phdrs& phdrs)
{
s32 num_segs = 0;
for (const auto& phdr : phdrs)
{
if (phdr.p_type != 1u && phdr.p_type != 4u)
{
return -1;
}
if (phdr.p_type == 1u && phdr.p_filesz != phdr.p_memsz && phdr.p_filesz)
{
num_segs += 2;
}
else if (phdr.p_type == 1u || CountInfo)
{
num_segs += 1;
}
}
return num_segs;
}
template <bool WriteInfo = true, typename Phdrs>
static s32 fill(vm::ptr<sys_spu_segment> segs, s32 nsegs, const Phdrs& phdrs, u32 src)
{
s32 num_segs = 0;
for (const auto& phdr : phdrs)
{
if (phdr.p_type == 1u)
{
if (phdr.p_filesz)
{
if (num_segs >= nsegs)
{
return -2;
}
auto* seg = &segs[num_segs++];
seg->type = SYS_SPU_SEGMENT_TYPE_COPY;
seg->ls = static_cast<u32>(phdr.p_vaddr);
seg->size = static_cast<u32>(phdr.p_filesz);
seg->addr = static_cast<u32>(phdr.p_offset + src);
}
if (phdr.p_memsz > phdr.p_filesz)
{
if (num_segs >= nsegs)
{
return -2;
}
auto* seg = &segs[num_segs++];
seg->type = SYS_SPU_SEGMENT_TYPE_FILL;
seg->ls = static_cast<u32>(phdr.p_vaddr + phdr.p_filesz);
seg->size = static_cast<u32>(phdr.p_memsz - phdr.p_filesz);
seg->addr = 0;
}
}
else if (WriteInfo && phdr.p_type == 4u)
{
if (num_segs >= nsegs)
{
return -2;
}
auto* seg = &segs[num_segs++];
seg->type = SYS_SPU_SEGMENT_TYPE_INFO;
seg->size = 0x20;
seg->addr = static_cast<u32>(phdr.p_offset + 0x14 + src);
}
else if (phdr.p_type != 4u)
{
return -1;
}
}
return num_segs;
}
void load(const fs::file& stream);
void free() const;
static void deploy(u8* loc, std::span<const sys_spu_segment> segs, bool is_verbose = true);
};
enum : u32
{
SYS_SPU_IMAGE_PROTECT = 0,
SYS_SPU_IMAGE_DIRECT = 1,
};
struct lv2_spu_image : lv2_obj
{
static const u32 id_base = 0x22000000;
const u32 e_entry;
const vm::ptr<sys_spu_segment> segs;
const s32 nsegs;
lv2_spu_image(u32 entry, vm::ptr<sys_spu_segment> segs, s32 nsegs)
: e_entry(entry)
, segs(segs)
, nsegs(nsegs)
{
}
lv2_spu_image(utils::serial& ar);
void save(utils::serial& ar);
};
struct sys_spu_thread_group_syscall_253_info
{
be_t<u32> deadlineMeetCounter; // From cellSpursGetInfo
be_t<u32> deadlineMissCounter; // Same
be_t<u64> timestamp;
be_t<u64> _x10[6];
};
struct lv2_spu_group
{
static const u32 id_base = 0x04000100;
static const u32 id_step = 0x100;
static const u32 id_count = 255;
static constexpr std::pair<u32, u32> id_invl_range = {0, 8};
static_assert(spu_thread::id_count == id_count * 6 + 5);
const std::string name;
const u32 id;
const u32 max_num;
const u32 mem_size;
const s32 type; // SPU Thread Group Type
lv2_memory_container* const ct; // Memory Container
const bool has_scheduler_context;
u32 max_run;
shared_mutex mutex;
atomic_t<u32> init; // Initialization Counter
atomic_t<typename spu_thread::spu_prio_t> prio{}; // SPU Thread Group Priority
atomic_t<spu_group_status> run_state; // SPU Thread Group State
atomic_t<s32> exit_status; // SPU Thread Group Exit Status
atomic_t<u32> join_state; // flags used to detect exit cause and signal
atomic_t<u32> running = 0; // Number of running threads
atomic_t<u32> spurs_running = 0;
atomic_t<u32> stop_count = 0;
atomic_t<u32> wait_term_count = 0;
u32 waiter_spu_index = -1; // Index of SPU executing a waiting syscall
class ppu_thread* waiter = nullptr;
bool set_terminate = false;
std::array<std::shared_ptr<named_thread<spu_thread>>, 8> threads; // SPU Threads
std::array<s8, 256> threads_map; // SPU Threads map based number
std::array<std::pair<u32, std::vector<sys_spu_segment>>, 8> imgs; // Entry points, SPU image segments
std::array<std::array<u64, 4>, 8> args; // SPU Thread Arguments
std::shared_ptr<lv2_event_queue> ep_run; // port for SYS_SPU_THREAD_GROUP_EVENT_RUN events
std::shared_ptr<lv2_event_queue> ep_exception; // TODO: SYS_SPU_THREAD_GROUP_EVENT_EXCEPTION
std::shared_ptr<lv2_event_queue> ep_sysmodule; // TODO: SYS_SPU_THREAD_GROUP_EVENT_SYSTEM_MODULE
lv2_spu_group(std::string name, u32 num, s32 _prio, s32 type, lv2_memory_container* ct, bool uses_scheduler, u32 mem_size) noexcept
: name(std::move(name))
, id(idm::last_id())
, max_num(num)
, mem_size(mem_size)
, type(type)
, ct(ct)
, has_scheduler_context(uses_scheduler)
, max_run(num)
, init(0)
, run_state(SPU_THREAD_GROUP_STATUS_NOT_INITIALIZED)
, exit_status(0)
, join_state(0)
, args({})
{
threads_map.fill(-1);
prio.raw().prio = _prio;
}
SAVESTATE_INIT_POS(8); // Dependency on SPUs
lv2_spu_group(utils::serial& ar) noexcept;
void save(utils::serial& ar);
CellError send_run_event(u64 data1, u64 data2, u64 data3) const
{
return ep_run ? ep_run->send(SYS_SPU_THREAD_GROUP_EVENT_RUN_KEY, data1, data2, data3) : CELL_ENOTCONN;
}
CellError send_exception_event(u64 data1, u64 data2, u64 data3) const
{
return ep_exception ? ep_exception->send(SYS_SPU_THREAD_GROUP_EVENT_EXCEPTION_KEY, data1, data2, data3) : CELL_ENOTCONN;
}
CellError send_sysmodule_event(u64 data1, u64 data2, u64 data3) const
{
return ep_sysmodule ? ep_sysmodule->send(SYS_SPU_THREAD_GROUP_EVENT_SYSTEM_MODULE_KEY, data1, data2, data3) : CELL_ENOTCONN;
}
static std::pair<named_thread<spu_thread>*, std::shared_ptr<lv2_spu_group>> get_thread(u32 id);
};
class ppu_thread;
// Syscalls
error_code sys_spu_initialize(ppu_thread&, u32 max_usable_spu, u32 max_raw_spu);
error_code _sys_spu_image_get_information(ppu_thread&, vm::ptr<sys_spu_image> img, vm::ptr<u32> entry_point, vm::ptr<s32> nsegs);
error_code sys_spu_image_open(ppu_thread&, vm::ptr<sys_spu_image> img, vm::cptr<char> path);
error_code _sys_spu_image_import(ppu_thread&, vm::ptr<sys_spu_image> img, u32 src, u32 size, u32 arg4);
error_code _sys_spu_image_close(ppu_thread&, vm::ptr<sys_spu_image> img);
error_code _sys_spu_image_get_segments(ppu_thread&, vm::ptr<sys_spu_image> img, vm::ptr<sys_spu_segment> segments, s32 nseg);
error_code sys_spu_thread_initialize(ppu_thread&, vm::ptr<u32> thread, u32 group, u32 spu_num, vm::ptr<sys_spu_image>, vm::ptr<sys_spu_thread_attribute>, vm::ptr<sys_spu_thread_argument>);
error_code sys_spu_thread_set_argument(ppu_thread&, u32 id, vm::ptr<sys_spu_thread_argument> arg);
error_code sys_spu_thread_group_create(ppu_thread&, vm::ptr<u32> id, u32 num, s32 prio, vm::ptr<sys_spu_thread_group_attribute> attr);
error_code sys_spu_thread_group_destroy(ppu_thread&, u32 id);
error_code sys_spu_thread_group_start(ppu_thread&, u32 id);
error_code sys_spu_thread_group_suspend(ppu_thread&, u32 id);
error_code sys_spu_thread_group_resume(ppu_thread&, u32 id);
error_code sys_spu_thread_group_yield(ppu_thread&, u32 id);
error_code sys_spu_thread_group_terminate(ppu_thread&, u32 id, s32 value);
error_code sys_spu_thread_group_join(ppu_thread&, u32 id, vm::ptr<u32> cause, vm::ptr<u32> status);
error_code sys_spu_thread_group_set_priority(ppu_thread&, u32 id, s32 priority);
error_code sys_spu_thread_group_get_priority(ppu_thread&, u32 id, vm::ptr<s32> priority);
error_code sys_spu_thread_group_connect_event(ppu_thread&, u32 id, u32 eq, u32 et);
error_code sys_spu_thread_group_disconnect_event(ppu_thread&, u32 id, u32 et);
error_code sys_spu_thread_group_connect_event_all_threads(ppu_thread&, u32 id, u32 eq_id, u64 req, vm::ptr<u8> spup);
error_code sys_spu_thread_group_disconnect_event_all_threads(ppu_thread&, u32 id, u32 spup);
error_code sys_spu_thread_group_set_cooperative_victims(ppu_thread&, u32 id, u32 threads_mask);
error_code sys_spu_thread_group_syscall_253(ppu_thread& ppu, u32 id, vm::ptr<sys_spu_thread_group_syscall_253_info> info);
error_code sys_spu_thread_group_log(ppu_thread&, s32 command, vm::ptr<s32> stat);
error_code sys_spu_thread_write_ls(ppu_thread&, u32 id, u32 lsa, u64 value, u32 type);
error_code sys_spu_thread_read_ls(ppu_thread&, u32 id, u32 lsa, vm::ptr<u64> value, u32 type);
error_code sys_spu_thread_write_spu_mb(ppu_thread&, u32 id, u32 value);
error_code sys_spu_thread_set_spu_cfg(ppu_thread&, u32 id, u64 value);
error_code sys_spu_thread_get_spu_cfg(ppu_thread&, u32 id, vm::ptr<u64> value);
error_code sys_spu_thread_write_snr(ppu_thread&, u32 id, u32 number, u32 value);
error_code sys_spu_thread_connect_event(ppu_thread&, u32 id, u32 eq, u32 et, u32 spup);
error_code sys_spu_thread_disconnect_event(ppu_thread&, u32 id, u32 et, u32 spup);
error_code sys_spu_thread_bind_queue(ppu_thread&, u32 id, u32 spuq, u32 spuq_num);
error_code sys_spu_thread_unbind_queue(ppu_thread&, u32 id, u32 spuq_num);
error_code sys_spu_thread_get_exit_status(ppu_thread&, u32 id, vm::ptr<s32> status);
error_code sys_spu_thread_recover_page_fault(ppu_thread&, u32 id);
error_code sys_raw_spu_create(ppu_thread&, vm::ptr<u32> id, vm::ptr<void> attr);
error_code sys_raw_spu_destroy(ppu_thread& ppu, u32 id);
error_code sys_raw_spu_create_interrupt_tag(ppu_thread&, u32 id, u32 class_id, u32 hwthread, vm::ptr<u32> intrtag);
error_code sys_raw_spu_set_int_mask(ppu_thread&, u32 id, u32 class_id, u64 mask);
error_code sys_raw_spu_get_int_mask(ppu_thread&, u32 id, u32 class_id, vm::ptr<u64> mask);
error_code sys_raw_spu_set_int_stat(ppu_thread&, u32 id, u32 class_id, u64 stat);
error_code sys_raw_spu_get_int_stat(ppu_thread&, u32 id, u32 class_id, vm::ptr<u64> stat);
error_code sys_raw_spu_read_puint_mb(ppu_thread&, u32 id, vm::ptr<u32> value);
error_code sys_raw_spu_set_spu_cfg(ppu_thread&, u32 id, u32 value);
error_code sys_raw_spu_get_spu_cfg(ppu_thread&, u32 id, vm::ptr<u32> value);
error_code sys_raw_spu_recover_page_fault(ppu_thread&, u32 id);
error_code sys_isolated_spu_create(ppu_thread&, vm::ptr<u32> id, vm::ptr<void> image, u64 arg1, u64 arg2, u64 arg3, u64 arg4);
error_code sys_isolated_spu_start(ppu_thread&, u32 id);
error_code sys_isolated_spu_destroy(ppu_thread& ppu, u32 id);
error_code sys_isolated_spu_create_interrupt_tag(ppu_thread&, u32 id, u32 class_id, u32 hwthread, vm::ptr<u32> intrtag);
error_code sys_isolated_spu_set_int_mask(ppu_thread&, u32 id, u32 class_id, u64 mask);
error_code sys_isolated_spu_get_int_mask(ppu_thread&, u32 id, u32 class_id, vm::ptr<u64> mask);
error_code sys_isolated_spu_set_int_stat(ppu_thread&, u32 id, u32 class_id, u64 stat);
error_code sys_isolated_spu_get_int_stat(ppu_thread&, u32 id, u32 class_id, vm::ptr<u64> stat);
error_code sys_isolated_spu_read_puint_mb(ppu_thread&, u32 id, vm::ptr<u32> value);
error_code sys_isolated_spu_set_spu_cfg(ppu_thread&, u32 id, u32 value);
error_code sys_isolated_spu_get_spu_cfg(ppu_thread&, u32 id, vm::ptr<u32> value);
| 13,994
|
C++
|
.h
| 355
| 37.053521
| 188
| 0.707216
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
5,953
|
sys_sync.h
|
RPCS3_rpcs3/rpcs3/Emu/Cell/lv2/sys_sync.h
|
#pragma once
#include "Utilities/mutex.h"
#include "Utilities/sema.h"
#include "Emu/CPU/CPUThread.h"
#include "Emu/Cell/ErrorCodes.h"
#include "Emu/Cell/timers.hpp"
#include "Emu/IdManager.h"
#include "Emu/IPC.h"
#include <thread>
// attr_protocol (waiting scheduling policy)
enum lv2_protocol : u8
{
SYS_SYNC_FIFO = 0x1, // First In, First Out Order
SYS_SYNC_PRIORITY = 0x2, // Priority Order
SYS_SYNC_PRIORITY_INHERIT = 0x3, // Basic Priority Inheritance Protocol
SYS_SYNC_RETRY = 0x4, // Not selected while unlocking
};
enum : u32
{
SYS_SYNC_ATTR_PROTOCOL_MASK = 0xf,
};
// attr_recursive (recursive locks policy)
enum
{
SYS_SYNC_RECURSIVE = 0x10,
SYS_SYNC_NOT_RECURSIVE = 0x20,
SYS_SYNC_ATTR_RECURSIVE_MASK = 0xf0,
};
// attr_pshared (sharing among processes policy)
enum
{
SYS_SYNC_PROCESS_SHARED = 0x100,
SYS_SYNC_NOT_PROCESS_SHARED = 0x200,
SYS_SYNC_ATTR_PSHARED_MASK = 0xf00,
};
// attr_flags (creation policy)
enum
{
SYS_SYNC_NEWLY_CREATED = 0x1, // Create new object, fails if specified IPC key exists
SYS_SYNC_NOT_CREATE = 0x2, // Reference existing object, fails if IPC key not found
SYS_SYNC_NOT_CARE = 0x3, // Reference existing object, create new one if IPC key not found
SYS_SYNC_ATTR_FLAGS_MASK = 0xf,
};
// attr_adaptive
enum
{
SYS_SYNC_ADAPTIVE = 0x1000,
SYS_SYNC_NOT_ADAPTIVE = 0x2000,
SYS_SYNC_ATTR_ADAPTIVE_MASK = 0xf000,
};
enum ppu_thread_status : u32;
struct ppu_non_sleeping_count_t
{
bool has_running; // no actual count for optimization sake
u32 onproc_count;
};
namespace vm
{
extern u8 g_reservations[65536 / 128 * 64];
}
// Base class for some kernel objects (shared set of 8192 objects).
struct lv2_obj
{
static const u32 id_step = 0x100;
static const u32 id_count = 8192;
static constexpr std::pair<u32, u32> id_invl_range = {0, 8};
private:
enum thread_cmd : s32
{
yield_cmd = smin,
enqueue_cmd,
};
// Function executed under IDM mutex, error will make the object creation fail and the error will be returned
CellError on_id_create()
{
exists++;
return {};
}
public:
SAVESTATE_INIT_POS(4); // Dependency on PPUs
lv2_obj() noexcept = default;
lv2_obj(u32 i) noexcept : exists{ i } {}
lv2_obj(utils::serial&) noexcept {}
void save(utils::serial&) {}
// Existence validation (workaround for shared-ptr ref-counting)
atomic_t<u32> exists = 0;
template <typename Ptr>
static bool check(Ptr&& ptr)
{
return ptr && ptr->exists;
}
// wrapper for name64 string formatting
struct name_64
{
u64 data;
};
static std::string name64(u64 name_u64);
// Find and remove the object from the linked list
template <bool ModifyNode = true, typename T>
static T* unqueue(T*& first, T* object, T* T::* mem_ptr = &T::next_cpu)
{
auto it = +first;
if (it == object)
{
atomic_storage<T*>::release(first, it->*mem_ptr);
if constexpr (ModifyNode)
{
atomic_storage<T*>::release(it->*mem_ptr, nullptr);
}
return it;
}
for (; it;)
{
const auto next = it->*mem_ptr + 0;
if (next == object)
{
atomic_storage<T*>::release(it->*mem_ptr, next->*mem_ptr);
if constexpr (ModifyNode)
{
atomic_storage<T*>::release(next->*mem_ptr, nullptr);
}
return next;
}
it = next;
}
return {};
}
// Remove an object from the linked set according to the protocol
template <typename E, typename T>
static E* schedule(T& first, u32 protocol, bool modify_node = true)
{
auto it = static_cast<E*>(first);
if (!it)
{
return it;
}
auto parent_found = &first;
if (protocol == SYS_SYNC_FIFO)
{
while (true)
{
const auto next = +it->next_cpu;
if (next)
{
parent_found = &it->next_cpu;
it = next;
continue;
}
if (cpu_flag::again - it->state)
{
atomic_storage<T>::release(*parent_found, nullptr);
}
return it;
}
}
auto prio = it->prio.load();
auto found = it;
while (true)
{
auto& node = it->next_cpu;
const auto next = static_cast<E*>(node);
if (!next)
{
break;
}
const auto _prio = static_cast<E*>(next)->prio.load();
// This condition tests for equality as well so the earliest element to be pushed is popped
if (_prio.prio < prio.prio || (_prio.prio == prio.prio && _prio.order < prio.order))
{
found = next;
parent_found = &node;
prio = _prio;
}
it = next;
}
if (cpu_flag::again - found->state)
{
atomic_storage<T>::release(*parent_found, found->next_cpu);
if (modify_node)
{
atomic_storage<T>::release(found->next_cpu, nullptr);
}
}
return found;
}
template <typename T>
static void emplace(T& first, T object)
{
atomic_storage<T>::release(object->next_cpu, first);
atomic_storage<T>::release(first, object);
object->prio.atomic_op([order = ++g_priority_order_tag](std::common_type_t<decltype(std::declval<T>()->prio.load())>& prio)
{
if constexpr (requires { +std::declval<decltype(prio)>().preserve_bit; } )
{
if (prio.preserve_bit)
{
// Restoring state on load
prio.preserve_bit = 0;
return;
}
}
prio.order = order;
});
}
private:
// Remove the current thread from the scheduling queue, register timeout
static bool sleep_unlocked(cpu_thread&, u64 timeout, u64 current_time);
// Schedule the thread
static bool awake_unlocked(cpu_thread*, s32 prio = enqueue_cmd);
public:
static constexpr u64 max_timeout = u64{umax} / 1000;
static bool sleep(cpu_thread& cpu, const u64 timeout = 0);
static bool awake(cpu_thread* thread, s32 prio = enqueue_cmd);
// Returns true on successful context switch, false otherwise
static bool yield(cpu_thread& thread);
static void set_priority(cpu_thread& thread, s32 prio)
{
ensure(prio + 512u < 3712);
awake(&thread, prio);
}
static inline void awake_all()
{
awake({});
g_to_awake.clear();
}
static void make_scheduler_ready();
static std::pair<ppu_thread_status, u32> ppu_state(ppu_thread* ppu, bool lock_idm = true, bool lock_lv2 = true);
static inline void append(cpu_thread* const thread)
{
g_to_awake.emplace_back(thread);
}
// Serialization related
static void set_future_sleep(cpu_thread* cpu);
static bool is_scheduler_ready();
// Must be called under IDM lock
static ppu_non_sleeping_count_t count_non_sleeping_threads();
static inline bool has_ppus_in_running_state() noexcept
{
return count_non_sleeping_threads().has_running != 0;
}
static void set_yield_frequency(u64 freq, u64 max_allowed_tsx);
static void cleanup();
template <typename T>
static inline u64 get_key(const T& attr)
{
return (attr.pshared == SYS_SYNC_PROCESS_SHARED ? +attr.ipc_key : 0);
}
template <typename T, typename F>
static error_code create(u32 pshared, u64 ipc_key, s32 flags, F&& make, bool key_not_zero = true)
{
switch (pshared)
{
case SYS_SYNC_PROCESS_SHARED:
{
if (key_not_zero && ipc_key == 0)
{
return CELL_EINVAL;
}
switch (flags)
{
case SYS_SYNC_NEWLY_CREATED:
case SYS_SYNC_NOT_CARE:
case SYS_SYNC_NOT_CREATE:
{
break;
}
default: return CELL_EINVAL;
}
break;
}
case SYS_SYNC_NOT_PROCESS_SHARED:
{
break;
}
default: return CELL_EINVAL;
}
// EAGAIN for IDM IDs shortage
CellError error = CELL_EAGAIN;
if (!idm::import<lv2_obj, T>([&]() -> std::shared_ptr<T>
{
std::shared_ptr<T> result = make();
auto finalize_construct = [&]() -> std::shared_ptr<T>
{
if ((error = result->on_id_create()))
{
result.reset();
}
return std::move(result);
};
if (pshared != SYS_SYNC_PROCESS_SHARED)
{
// Creation of unique (non-shared) object handle
return finalize_construct();
}
auto& ipc_container = g_fxo->get<ipc_manager<T, u64>>();
if (flags == SYS_SYNC_NOT_CREATE)
{
result = ipc_container.get(ipc_key);
if (!result)
{
error = CELL_ESRCH;
return result;
}
// Run on_id_create() on existing object
return finalize_construct();
}
bool added = false;
std::tie(added, result) = ipc_container.add(ipc_key, finalize_construct, flags != SYS_SYNC_NEWLY_CREATED);
if (!added)
{
if (flags == SYS_SYNC_NEWLY_CREATED)
{
// Object already exists but flags does not allow it
error = CELL_EEXIST;
// We specified we do not want to peek pointer's value, result must be empty
AUDIT(!result);
return result;
}
// Run on_id_create() on existing object
return finalize_construct();
}
return result;
}))
{
return error;
}
return CELL_OK;
}
template <typename T>
static void on_id_destroy(T& obj, u64 ipc_key, u64 pshared = -1)
{
if (pshared == umax)
{
// Default is to check key
pshared = ipc_key != 0;
}
if (obj.exists-- == 1u && pshared)
{
g_fxo->get<ipc_manager<T, u64>>().remove(ipc_key);
}
}
template <typename T>
static std::shared_ptr<T> load(u64 ipc_key, std::shared_ptr<T> make, u64 pshared = -1)
{
if (pshared == umax ? ipc_key != 0 : pshared != 0)
{
g_fxo->need<ipc_manager<T, u64>>();
make = g_fxo->get<ipc_manager<T, u64>>().add(ipc_key, [&]()
{
return make;
}, true).second;
}
// Ensure no error
ensure(!make->on_id_create());
return make;
}
static bool wait_timeout(u64 usec, ppu_thread* cpu = {}, bool scale = true, bool is_usleep = false);
static inline void notify_all()
{
for (auto cpu : g_to_notify)
{
if (!cpu)
{
break;
}
if (cpu != &g_to_notify)
{
if (cpu >= vm::g_reservations && cpu <= vm::g_reservations + (std::size(vm::g_reservations) - 1))
{
atomic_wait_engine::notify_all(cpu);
}
else
{
// Note: by the time of notification the thread could have been deallocated which is why the direct function is used
atomic_wait_engine::notify_one(cpu);
}
}
}
g_to_notify[0] = nullptr;
g_postpone_notify_barrier = false;
}
// Can be called before the actual sleep call in order to move it out of mutex scope
static void prepare_for_sleep(cpu_thread& cpu);
struct notify_all_t
{
notify_all_t() noexcept
{
g_postpone_notify_barrier = true;
}
notify_all_t(const notify_all_t&) = delete;
static void cleanup()
{
for (auto& cpu : g_to_notify)
{
if (!cpu)
{
return;
}
// While IDM mutex is still locked (this function assumes so) check if the notification is still needed
// Pending flag is meant for forced notification (if the CPU really has pending work it can restore the flag in theory)
// Disabled to allow reservation notifications from here
if (false && cpu != &g_to_notify && static_cast<const decltype(cpu_thread::state)*>(cpu)->none_of(cpu_flag::signal + cpu_flag::pending))
{
// Omit it (this is a void pointer, it can hold anything)
cpu = &g_to_notify;
}
}
}
~notify_all_t() noexcept
{
lv2_obj::notify_all();
}
};
// Scheduler mutex
static shared_mutex g_mutex;
// Proirity tags
static atomic_t<u64> g_priority_order_tag;
private:
// Pending list of threads to run
static thread_local std::vector<class cpu_thread*> g_to_awake;
// Scheduler queue for active PPU threads
static class ppu_thread* g_ppu;
// Waiting for the response from
static u32 g_pending;
// Pending list of threads to notify (cpu_thread::state ptr)
static thread_local std::add_pointer_t<const void> g_to_notify[4];
// If a notify_all_t object exists locally, postpone notifications to the destructor of it (not recursive, notifies on the first destructor for safety)
static thread_local bool g_postpone_notify_barrier;
static void schedule_all(u64 current_time = 0);
};
| 11,758
|
C++
|
.h
| 430
| 24.004651
| 152
| 0.667291
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
5,954
|
sys_btsetting.h
|
RPCS3_rpcs3/rpcs3/Emu/Cell/lv2/sys_btsetting.h
|
#pragma once
#include "Emu/Memory/vm_ptr.h"
#include "Emu/Cell/ErrorCodes.h"
// SysCalls
error_code sys_btsetting_if(u64 cmd, vm::ptr<void> msg);
| 149
|
C++
|
.h
| 5
| 28.2
| 56
| 0.751773
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
5,955
|
sys_sm.h
|
RPCS3_rpcs3/rpcs3/Emu/Cell/lv2/sys_sm.h
|
#pragma once
#include "Emu/Memory/vm_ptr.h"
#include "Emu/Cell/ErrorCodes.h"
// SysCalls
error_code sys_sm_get_ext_event2(vm::ptr<u64> a1, vm::ptr<u64> a2, vm::ptr<u64> a3, u64 a4);
error_code sys_sm_shutdown(ppu_thread& ppu, u16 op, vm::ptr<void> param, u64 size);
error_code sys_sm_get_params(vm::ptr<u8> a, vm::ptr<u8> b, vm::ptr<u32> c, vm::ptr<u64> d);
error_code sys_sm_set_shop_mode(s32 mode);
error_code sys_sm_control_led(u8 led, u8 action);
error_code sys_sm_ring_buzzer(u64 packet, u64 a1, u64 a2);
constexpr auto sys_sm_ring_buzzer2 = sys_sm_ring_buzzer;
| 570
|
C++
|
.h
| 11
| 50.545455
| 92
| 0.717626
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
5,956
|
sys_gpio.h
|
RPCS3_rpcs3/rpcs3/Emu/Cell/lv2/sys_gpio.h
|
#pragma once
#include "Emu/Memory/vm_ptr.h"
#include "Emu/Cell/ErrorCodes.h"
enum : u64
{
SYS_GPIO_UNKNOWN_DEVICE_ID = 0x0,
SYS_GPIO_LED_DEVICE_ID = 0x1,
SYS_GPIO_DIP_SWITCH_DEVICE_ID = 0x2,
};
error_code sys_gpio_get(u64 device_id, vm::ptr<u64> value);
error_code sys_gpio_set(u64 device_id, u64 mask, u64 value);
| 331
|
C++
|
.h
| 11
| 28.545455
| 60
| 0.709779
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
5,957
|
sys_trace.h
|
RPCS3_rpcs3/rpcs3/Emu/Cell/lv2/sys_trace.h
|
#pragma once
#include "util/types.hpp"
// SysCalls
s32 sys_trace_create();
s32 sys_trace_start();
s32 sys_trace_stop();
s32 sys_trace_update_top_index();
s32 sys_trace_destroy();
s32 sys_trace_drain();
s32 sys_trace_attach_process();
s32 sys_trace_allocate_buffer();
s32 sys_trace_free_buffer();
s32 sys_trace_create2();
| 323
|
C++
|
.h
| 13
| 23.692308
| 33
| 0.753247
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
5,958
|
sys_semaphore.h
|
RPCS3_rpcs3/rpcs3/Emu/Cell/lv2/sys_semaphore.h
|
#pragma once
#include "sys_sync.h"
#include "Emu/Memory/vm_ptr.h"
struct sys_semaphore_attribute_t
{
be_t<u32> protocol;
be_t<u32> pshared;
be_t<u64> ipc_key;
be_t<s32> flags;
be_t<u32> pad;
union
{
nse_t<u64, 1> name_u64;
char name[sizeof(u64)];
};
};
struct lv2_sema final : lv2_obj
{
static const u32 id_base = 0x96000000;
const lv2_protocol protocol;
const u64 key;
const u64 name;
const s32 max;
shared_mutex mutex;
atomic_t<s32> val;
ppu_thread* sq{};
lv2_sema(u32 protocol, u64 key, u64 name, s32 max, s32 value) noexcept
: protocol{static_cast<u8>(protocol)}
, key(key)
, name(name)
, max(max)
, val(value)
{
}
lv2_sema(utils::serial& ar);
static std::shared_ptr<void> load(utils::serial& ar);
void save(utils::serial& ar);
};
// Aux
class ppu_thread;
// Syscalls
error_code sys_semaphore_create(ppu_thread& ppu, vm::ptr<u32> sem_id, vm::ptr<sys_semaphore_attribute_t> attr, s32 initial_val, s32 max_val);
error_code sys_semaphore_destroy(ppu_thread& ppu, u32 sem_id);
error_code sys_semaphore_wait(ppu_thread& ppu, u32 sem_id, u64 timeout);
error_code sys_semaphore_trywait(ppu_thread& ppu, u32 sem_id);
error_code sys_semaphore_post(ppu_thread& ppu, u32 sem_id, s32 count);
error_code sys_semaphore_get_value(ppu_thread& ppu, u32 sem_id, vm::ptr<s32> count);
| 1,317
|
C++
|
.h
| 47
| 26
| 141
| 0.721781
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
5,959
|
sys_process.h
|
RPCS3_rpcs3/rpcs3/Emu/Cell/lv2/sys_process.h
|
#pragma once
#include "Crypto/unself.h"
#include "Emu/Memory/vm_ptr.h"
#include "Emu/Cell/ErrorCodes.h"
// Process Local Object Type
enum : u32
{
SYS_MEM_OBJECT = 0x08,
SYS_MUTEX_OBJECT = 0x85,
SYS_COND_OBJECT = 0x86,
SYS_RWLOCK_OBJECT = 0x88,
SYS_INTR_TAG_OBJECT = 0x0A,
SYS_INTR_SERVICE_HANDLE_OBJECT = 0x0B,
SYS_EVENT_QUEUE_OBJECT = 0x8D,
SYS_EVENT_PORT_OBJECT = 0x0E,
SYS_TRACE_OBJECT = 0x21,
SYS_SPUIMAGE_OBJECT = 0x22,
SYS_PRX_OBJECT = 0x23,
SYS_SPUPORT_OBJECT = 0x24,
SYS_OVERLAY_OBJECT = 0x25,
SYS_LWMUTEX_OBJECT = 0x95,
SYS_TIMER_OBJECT = 0x11,
SYS_SEMAPHORE_OBJECT = 0x96,
SYS_FS_FD_OBJECT = 0x73,
SYS_LWCOND_OBJECT = 0x97,
SYS_EVENT_FLAG_OBJECT = 0x98,
SYS_RSXAUDIO_OBJECT = 0x60,
};
enum : u64
{
SYS_PROCESS_PRIMARY_STACK_SIZE_32K = 0x0000000000000010,
SYS_PROCESS_PRIMARY_STACK_SIZE_64K = 0x0000000000000020,
SYS_PROCESS_PRIMARY_STACK_SIZE_96K = 0x0000000000000030,
SYS_PROCESS_PRIMARY_STACK_SIZE_128K = 0x0000000000000040,
SYS_PROCESS_PRIMARY_STACK_SIZE_256K = 0x0000000000000050,
SYS_PROCESS_PRIMARY_STACK_SIZE_512K = 0x0000000000000060,
SYS_PROCESS_PRIMARY_STACK_SIZE_1M = 0x0000000000000070,
};
constexpr auto SYS_PROCESS_PARAM_SECTION_NAME = ".sys_proc_param";
enum
{
SYS_PROCESS_PARAM_INVALID_PRIO = -32768,
};
enum : u32
{
SYS_PROCESS_PARAM_INVALID_STACK_SIZE = 0xffffffff,
SYS_PROCESS_PARAM_STACK_SIZE_MIN = 0x1000, // 4KB
SYS_PROCESS_PARAM_STACK_SIZE_MAX = 0x100000, // 1MB
SYS_PROCESS_PARAM_VERSION_INVALID = 0xffffffff,
SYS_PROCESS_PARAM_VERSION_1 = 0x00000001, // for SDK 08X
SYS_PROCESS_PARAM_VERSION_084_0 = 0x00008400,
SYS_PROCESS_PARAM_VERSION_090_0 = 0x00009000,
SYS_PROCESS_PARAM_VERSION_330_0 = 0x00330000,
SYS_PROCESS_PARAM_MAGIC = 0x13bcc5f6,
SYS_PROCESS_PARAM_MALLOC_PAGE_SIZE_NONE = 0x00000000,
SYS_PROCESS_PARAM_MALLOC_PAGE_SIZE_64K = 0x00010000,
SYS_PROCESS_PARAM_MALLOC_PAGE_SIZE_1M = 0x00100000,
SYS_PROCESS_PARAM_PPC_SEG_DEFAULT = 0x00000000,
SYS_PROCESS_PARAM_PPC_SEG_OVLM = 0x00000001,
SYS_PROCESS_PARAM_PPC_SEG_FIXEDADDR_PRX = 0x00000002,
SYS_PROCESS_PARAM_SDK_VERSION_UNKNOWN = 0xffffffff,
};
struct sys_exit2_param
{
be_t<u64> x0; // 0x85
be_t<u64> this_size; // 0x30
be_t<u64> next_size;
be_t<s64> prio;
be_t<u64> flags;
vm::bpptr<char, u64, u64> args;
};
struct ps3_process_info_t
{
u32 sdk_ver;
u32 ppc_seg;
SelfAdditionalInfo self_info;
u32 ctrl_flags1 = 0;
bool has_root_perm() const;
bool has_debug_perm() const;
bool debug_or_root() const;
std::string_view get_cellos_appname() const;
};
extern ps3_process_info_t g_ps3_process_info;
// Auxiliary functions
s32 process_getpid();
s32 process_get_sdk_version(u32 pid, s32& ver);
void lv2_exitspawn(ppu_thread& ppu, std::vector<std::string>& argv, std::vector<std::string>& envp, std::vector<u8>& data);
enum CellError : u32;
CellError process_is_spu_lock_line_reservation_address(u32 addr, u64 flags);
// SysCalls
s32 sys_process_getpid();
s32 sys_process_getppid();
error_code sys_process_get_number_of_object(u32 object, vm::ptr<u32> nump);
error_code sys_process_get_id(u32 object, vm::ptr<u32> buffer, u32 size, vm::ptr<u32> set_size);
error_code sys_process_get_id2(u32 object, vm::ptr<u32> buffer, u32 size, vm::ptr<u32> set_size);
error_code _sys_process_get_paramsfo(vm::ptr<char> buffer);
error_code sys_process_get_sdk_version(u32 pid, vm::ptr<s32> version);
error_code sys_process_get_status(u64 unk);
error_code sys_process_is_spu_lock_line_reservation_address(u32 addr, u64 flags);
error_code sys_process_kill(u32 pid);
error_code sys_process_wait_for_child(u32 pid, vm::ptr<u32> status, u64 unk);
error_code sys_process_wait_for_child2(u64 unk1, u64 unk2, u64 unk3, u64 unk4, u64 unk5, u64 unk6);
error_code sys_process_detach_child(u64 unk);
void _sys_process_exit(ppu_thread& ppu, s32 status, u32 arg2, u32 arg3);
void _sys_process_exit2(ppu_thread& ppu, s32 status, vm::ptr<sys_exit2_param> arg, u32 arg_size, u32 arg4);
void sys_process_exit3(ppu_thread& ppu, s32 status);
error_code sys_process_spawns_a_self2(vm::ptr<u32> pid, u32 primary_prio, u64 flags, vm::ptr<void> stack, u32 stack_size, u32 mem_id, vm::ptr<void> param_sfo, vm::ptr<void> dbg_data);
| 4,465
|
C++
|
.h
| 107
| 40.009346
| 183
| 0.700853
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
5,960
|
sys_mmapper.h
|
RPCS3_rpcs3/rpcs3/Emu/Cell/lv2/sys_mmapper.h
|
#pragma once
#include "sys_sync.h"
#include "Emu/Memory/vm_ptr.h"
#include "Emu/Cell/ErrorCodes.h"
#include <vector>
struct lv2_memory_container;
namespace utils
{
class shm;
}
struct lv2_memory : lv2_obj
{
static const u32 id_base = 0x08000000;
const u32 size; // Memory size
const u32 align; // Alignment required
const u64 flags;
const u64 key; // IPC key
const bool pshared; // Process shared flag
lv2_memory_container* const ct; // Associated memory container
const std::shared_ptr<utils::shm> shm;
atomic_t<u32> counter{0};
lv2_memory(u32 size, u32 align, u64 flags, u64 key, bool pshared, lv2_memory_container* ct);
lv2_memory(utils::serial& ar);
static std::shared_ptr<void> load(utils::serial& ar);
void save(utils::serial& ar);
CellError on_id_create();
};
enum : u64
{
SYS_MEMORY_PAGE_FAULT_EVENT_KEY = 0xfffe000000000000ULL,
};
enum : u64
{
SYS_MMAPPER_NO_SHM_KEY = 0xffff000000000000ull, // Unofficial name
};
enum : u64
{
SYS_MEMORY_PAGE_FAULT_CAUSE_NON_MAPPED = 0x2ULL,
SYS_MEMORY_PAGE_FAULT_CAUSE_READ_ONLY = 0x1ULL,
SYS_MEMORY_PAGE_FAULT_TYPE_PPU_THREAD = 0x0ULL,
SYS_MEMORY_PAGE_FAULT_TYPE_SPU_THREAD = 0x1ULL,
SYS_MEMORY_PAGE_FAULT_TYPE_RAW_SPU = 0x2ULL,
};
struct page_fault_notification_entry
{
ENABLE_BITWISE_SERIALIZATION;
u32 start_addr; // Starting address of region to monitor.
u32 event_queue_id; // Queue to be notified.
u32 port_id; // Port used to notify the queue.
};
// Used to hold list of queues to be notified on page fault event.
struct page_fault_notification_entries
{
std::vector<page_fault_notification_entry> entries;
shared_mutex mutex;
SAVESTATE_INIT_POS(44);
page_fault_notification_entries() = default;
page_fault_notification_entries(utils::serial& ar);
void save(utils::serial& ar);
};
struct page_fault_event_entries
{
// First = thread, second = addr
std::unordered_map<class cpu_thread*, u32> events;
shared_mutex pf_mutex;
};
struct mmapper_unk_entry_struct0
{
be_t<u32> a; // 0x0
be_t<u32> b; // 0x4
be_t<u32> c; // 0x8
be_t<u32> d; // 0xc
be_t<u64> type; // 0x10
};
// Aux
class ppu_thread;
error_code mmapper_thread_recover_page_fault(cpu_thread* cpu);
// SysCalls
error_code sys_mmapper_allocate_address(ppu_thread&, u64 size, u64 flags, u64 alignment, vm::ptr<u32> alloc_addr);
error_code sys_mmapper_allocate_fixed_address(ppu_thread&);
error_code sys_mmapper_allocate_shared_memory(ppu_thread&, u64 ipc_key, u64 size, u64 flags, vm::ptr<u32> mem_id);
error_code sys_mmapper_allocate_shared_memory_from_container(ppu_thread&, u64 ipc_key, u64 size, u32 cid, u64 flags, vm::ptr<u32> mem_id);
error_code sys_mmapper_allocate_shared_memory_ext(ppu_thread&, u64 ipc_key, u64 size, u32 flags, vm::ptr<mmapper_unk_entry_struct0> entries, s32 entry_count, vm::ptr<u32> mem_id);
error_code sys_mmapper_allocate_shared_memory_from_container_ext(ppu_thread&, u64 ipc_key, u64 size, u64 flags, u32 cid, vm::ptr<mmapper_unk_entry_struct0> entries, s32 entry_count, vm::ptr<u32> mem_id);
error_code sys_mmapper_change_address_access_right(ppu_thread&, u32 addr, u64 flags);
error_code sys_mmapper_free_address(ppu_thread&, u32 addr);
error_code sys_mmapper_free_shared_memory(ppu_thread&, u32 mem_id);
error_code sys_mmapper_map_shared_memory(ppu_thread&, u32 addr, u32 mem_id, u64 flags);
error_code sys_mmapper_search_and_map(ppu_thread&, u32 start_addr, u32 mem_id, u64 flags, vm::ptr<u32> alloc_addr);
error_code sys_mmapper_unmap_shared_memory(ppu_thread&, u32 addr, vm::ptr<u32> mem_id);
error_code sys_mmapper_enable_page_fault_notification(ppu_thread&, u32 start_addr, u32 event_queue_id);
| 3,632
|
C++
|
.h
| 91
| 38.208791
| 203
| 0.747228
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
5,961
|
lv2_socket_p2p.h
|
RPCS3_rpcs3/rpcs3/Emu/Cell/lv2/sys_net/lv2_socket_p2p.h
|
#pragma once
#include "lv2_socket.h"
class lv2_socket_p2p : public lv2_socket
{
public:
lv2_socket_p2p(lv2_socket_family family, lv2_socket_type type, lv2_ip_protocol protocol);
lv2_socket_p2p(utils::serial& ar, lv2_socket_type type);
void save(utils::serial& ar);
std::tuple<bool, s32, std::shared_ptr<lv2_socket>, sys_net_sockaddr> accept(bool is_lock = true) override;
s32 bind(const sys_net_sockaddr& addr) override;
std::optional<s32> connect(const sys_net_sockaddr& addr) override;
s32 connect_followup() override;
std::pair<s32, sys_net_sockaddr> getpeername() override;
std::pair<s32, sys_net_sockaddr> getsockname() override;
std::tuple<s32, sockopt_data, u32> getsockopt(s32 level, s32 optname, u32 len) override;
s32 setsockopt(s32 level, s32 optname, const std::vector<u8>& optval) override;
s32 listen(s32 backlog) override;
std::optional<std::tuple<s32, std::vector<u8>, sys_net_sockaddr>> recvfrom(s32 flags, u32 len, bool is_lock = true) override;
std::optional<s32> sendto(s32 flags, const std::vector<u8>& buf, std::optional<sys_net_sockaddr> opt_sn_addr, bool is_lock = true) override;
std::optional<s32> sendmsg(s32 flags, const sys_net_msghdr& msg, bool is_lock = true) override;
void close() override;
s32 shutdown(s32 how) override;
s32 poll(sys_net_pollfd& sn_pfd, pollfd& native_pfd) override;
std::tuple<bool, bool, bool> select(bs_t<poll_t> selected, pollfd& native_pfd) override;
void handle_new_data(sys_net_sockaddr_in_p2p p2p_addr, std::vector<u8> p2p_data);
protected:
// Port(actual bound port) and Virtual Port(indicated by u16 at the start of the packet)
u16 port = 3658, vport = 0;
u32 bound_addr = 0;
// Queue containing received packets from network_thread for SYS_NET_SOCK_DGRAM_P2P sockets
std::queue<std::pair<sys_net_sockaddr_in_p2p, std::vector<u8>>> data{};
// List of sock options
std::map<u64, sockopt_cache> sockopts;
};
| 1,907
|
C++
|
.h
| 34
| 53.941176
| 141
| 0.747448
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
5,962
|
lv2_socket_native.h
|
RPCS3_rpcs3/rpcs3/Emu/Cell/lv2/sys_net/lv2_socket_native.h
|
#pragma once
#ifdef _WIN32
#include <winsock2.h>
#include <WS2tcpip.h>
#else
#ifdef __clang__
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wold-style-cast"
#endif
#include <errno.h>
#include <sys/time.h>
#include <sys/types.h>
#include <sys/socket.h>
#include <netinet/in.h>
#include <netinet/ip.h>
#include <netinet/tcp.h>
#include <arpa/inet.h>
#include <unistd.h>
#include <fcntl.h>
#include <poll.h>
#ifdef __clang__
#pragma GCC diagnostic pop
#endif
#endif
#include "lv2_socket.h"
class lv2_socket_native final : public lv2_socket
{
public:
lv2_socket_native(lv2_socket_family family, lv2_socket_type type, lv2_ip_protocol protocol);
lv2_socket_native(utils::serial& ar, lv2_socket_type type);
void save(utils::serial& ar);
~lv2_socket_native();
s32 create_socket();
std::tuple<bool, s32, std::shared_ptr<lv2_socket>, sys_net_sockaddr> accept(bool is_lock = true) override;
s32 bind(const sys_net_sockaddr& addr) override;
std::optional<s32> connect(const sys_net_sockaddr& addr) override;
s32 connect_followup() override;
std::pair<s32, sys_net_sockaddr> getpeername() override;
std::pair<s32, sys_net_sockaddr> getsockname() override;
std::tuple<s32, sockopt_data, u32> getsockopt(s32 level, s32 optname, u32 len) override;
s32 setsockopt(s32 level, s32 optname, const std::vector<u8>& optval) override;
std::optional<std::tuple<s32, std::vector<u8>, sys_net_sockaddr>> recvfrom(s32 flags, u32 len, bool is_lock = true) override;
std::optional<s32> sendto(s32 flags, const std::vector<u8>& buf, std::optional<sys_net_sockaddr> opt_sn_addr, bool is_lock = true) override;
std::optional<s32> sendmsg(s32 flags, const sys_net_msghdr& msg, bool is_lock = true) override;
s32 poll(sys_net_pollfd& sn_pfd, pollfd& native_pfd) override;
std::tuple<bool, bool, bool> select(bs_t<poll_t> selected, pollfd& native_pfd) override;
bool is_socket_connected();
s32 listen(s32 backlog) override;
void close() override;
s32 shutdown(s32 how) override;
private:
void set_socket(socket_type socket, lv2_socket_family family, lv2_socket_type type, lv2_ip_protocol protocol);
void set_default_buffers();
void set_non_blocking();
private:
// Value keepers
#ifdef _WIN32
s32 so_reuseaddr = 0;
s32 so_reuseport = 0;
#endif
u16 bound_port = 0;
bool feign_tcp_conn_failure = false; // Savestate load related
};
| 2,351
|
C++
|
.h
| 63
| 35.650794
| 141
| 0.751318
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
5,963
|
network_context.h
|
RPCS3_rpcs3/rpcs3/Emu/Cell/lv2/sys_net/network_context.h
|
#pragma once
#include <vector>
#include <map>
#include "Utilities/mutex.h"
#include "Emu/Cell/PPUThread.h"
#include "nt_p2p_port.h"
struct base_network_thread
{
std::vector<ppu_thread*> ppu_to_awake;
void wake_threads();
};
struct network_thread : base_network_thread
{
shared_mutex mutex_thread_loop;
atomic_t<u32> num_polls = 0;
static constexpr auto thread_name = "Network Thread";
void operator()();
};
struct p2p_thread : base_network_thread
{
shared_mutex list_p2p_ports_mutex;
std::map<u16, nt_p2p_port> list_p2p_ports;
atomic_t<u32> num_p2p_ports = 0;
static constexpr auto thread_name = "Network P2P Thread";
p2p_thread();
void create_p2p_port(u16 p2p_port);
void bind_sce_np_port();
void operator()();
};
using network_context = named_thread<network_thread>;
using p2p_context = named_thread<p2p_thread>;
| 842
|
C++
|
.h
| 31
| 25.290323
| 58
| 0.74812
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
5,964
|
lv2_socket.h
|
RPCS3_rpcs3/rpcs3/Emu/Cell/lv2/sys_net/lv2_socket.h
|
#pragma once
#include <functional>
#include <optional>
#include "Utilities/mutex.h"
#include "Emu/IdManager.h"
#include "Emu/Cell/lv2/sys_net.h"
#ifdef _WIN32
#include <winsock2.h>
#include <WS2tcpip.h>
#else
#ifdef __clang__
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wold-style-cast"
#endif
#include <poll.h>
#ifdef __clang__
#pragma GCC diagnostic pop
#endif
#endif
#ifdef _WIN32
using socket_type = uptr;
#else
using socket_type = int;
#endif
class lv2_socket
{
public:
// Poll events
enum class poll_t
{
read,
write,
error,
__bitset_enum_max
};
union sockopt_data
{
char ch[128];
be_t<s32> _int = 0;
sys_net_timeval timeo;
sys_net_linger linger;
};
struct sockopt_cache
{
sockopt_data data{};
s32 len = 0;
};
public:
SAVESTATE_INIT_POS(7); // Dependency on RPCN
lv2_socket(lv2_socket_family family, lv2_socket_type type, lv2_ip_protocol protocol);
lv2_socket(utils::serial&) {}
lv2_socket(utils::serial&, lv2_socket_type type);
static std::shared_ptr<void> load(utils::serial& ar);
void save(utils::serial&, bool save_only_this_class = false);
virtual ~lv2_socket() = default;
std::unique_lock<shared_mutex> lock();
void set_lv2_id(u32 id);
bs_t<poll_t> get_events() const;
void set_poll_event(bs_t<poll_t> event);
void poll_queue(std::shared_ptr<ppu_thread> ppu, bs_t<poll_t> event, std::function<bool(bs_t<poll_t>)> poll_cb);
u32 clear_queue(ppu_thread*);
void handle_events(const pollfd& native_fd, bool unset_connecting = false);
void queue_wake(ppu_thread* ppu);
lv2_socket_family get_family() const;
lv2_socket_type get_type() const;
lv2_ip_protocol get_protocol() const;
std::size_t get_queue_size() const;
socket_type get_socket() const;
#ifdef _WIN32
bool is_connecting() const;
void set_connecting(bool is_connecting);
#endif
public:
virtual std::tuple<bool, s32, std::shared_ptr<lv2_socket>, sys_net_sockaddr> accept(bool is_lock = true) = 0;
virtual s32 bind(const sys_net_sockaddr& addr) = 0;
virtual std::optional<s32> connect(const sys_net_sockaddr& addr) = 0;
virtual s32 connect_followup() = 0;
virtual std::pair<s32, sys_net_sockaddr> getpeername() = 0;
virtual std::pair<s32, sys_net_sockaddr> getsockname() = 0;
virtual std::tuple<s32, sockopt_data, u32> getsockopt(s32 level, s32 optname, u32 len) = 0;
virtual s32 setsockopt(s32 level, s32 optname, const std::vector<u8>& optval) = 0;
virtual s32 listen(s32 backlog) = 0;
virtual std::optional<std::tuple<s32, std::vector<u8>, sys_net_sockaddr>> recvfrom(s32 flags, u32 len, bool is_lock = true) = 0;
virtual std::optional<s32> sendto(s32 flags, const std::vector<u8>& buf, std::optional<sys_net_sockaddr> opt_sn_addr, bool is_lock = true) = 0;
virtual std::optional<s32> sendmsg(s32 flags, const sys_net_msghdr& msg, bool is_lock = true) = 0;
virtual void close() = 0;
virtual s32 shutdown(s32 how) = 0;
virtual s32 poll(sys_net_pollfd& sn_pfd, pollfd& native_pfd) = 0;
virtual std::tuple<bool, bool, bool> select(bs_t<poll_t> selected, pollfd& native_pfd) = 0;
error_code abort_socket(s32 flags);
public:
// IDM data
static const u32 id_base = 24;
static const u32 id_step = 1;
static const u32 id_count = 1000;
protected:
lv2_socket(utils::serial&, bool);
shared_mutex mutex;
s32 lv2_id = 0;
socket_type socket = 0;
lv2_socket_family family{};
lv2_socket_type type{};
lv2_ip_protocol protocol{};
// Events selected for polling
atomic_bs_t<poll_t> events{};
// Event processing workload (pair of thread id and the processing function)
std::vector<std::pair<std::shared_ptr<ppu_thread>, std::function<bool(bs_t<poll_t>)>>> queue;
// Socket options value keepers
// Non-blocking IO option
s32 so_nbio = 0;
// Error, only used for connection result for non blocking stream sockets
s32 so_error = 0;
// Unsupported option
s32 so_tcp_maxseg = 1500;
#ifdef _WIN32
s32 so_reuseaddr = 0;
s32 so_reuseport = 0;
// Tracks connect for WSAPoll workaround
bool connecting = false;
#endif
sys_net_sockaddr last_bound_addr{};
public:
u64 so_rcvtimeo = 0;
u64 so_sendtimeo = 0;
};
| 4,233
|
C++
|
.h
| 125
| 31.816
| 144
| 0.703558
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
5,965
|
lv2_socket_raw.h
|
RPCS3_rpcs3/rpcs3/Emu/Cell/lv2/sys_net/lv2_socket_raw.h
|
#pragma once
#include "lv2_socket.h"
class lv2_socket_raw final : public lv2_socket
{
public:
lv2_socket_raw(lv2_socket_family family, lv2_socket_type type, lv2_ip_protocol protocol);
lv2_socket_raw(utils::serial& ar, lv2_socket_type type);
void save(utils::serial& ar);
std::tuple<bool, s32, std::shared_ptr<lv2_socket>, sys_net_sockaddr> accept(bool is_lock = true) override;
s32 bind(const sys_net_sockaddr& addr) override;
std::optional<s32> connect(const sys_net_sockaddr& addr) override;
s32 connect_followup() override;
std::pair<s32, sys_net_sockaddr> getpeername() override;
std::pair<s32, sys_net_sockaddr> getsockname() override;
std::tuple<s32, sockopt_data, u32> getsockopt(s32 level, s32 optname, u32 len) override;
s32 setsockopt(s32 level, s32 optname, const std::vector<u8>& optval) override;
s32 listen(s32 backlog) override;
std::optional<std::tuple<s32, std::vector<u8>, sys_net_sockaddr>> recvfrom(s32 flags, u32 len, bool is_lock = true) override;
std::optional<s32> sendto(s32 flags, const std::vector<u8>& buf, std::optional<sys_net_sockaddr> opt_sn_addr, bool is_lock = true) override;
std::optional<s32> sendmsg(s32 flags, const sys_net_msghdr& msg, bool is_lock = true) override;
void close() override;
s32 shutdown(s32 how) override;
s32 poll(sys_net_pollfd& sn_pfd, pollfd& native_pfd) override;
std::tuple<bool, bool, bool> select(bs_t<poll_t> selected, pollfd& native_pfd) override;
};
| 1,446
|
C++
|
.h
| 25
| 55.68
| 141
| 0.749823
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
5,966
|
lv2_socket_p2ps.h
|
RPCS3_rpcs3/rpcs3/Emu/Cell/lv2/sys_net/lv2_socket_p2ps.h
|
#pragma once
#ifdef _WIN32
#include <winsock2.h>
#include <WS2tcpip.h>
#else
#ifdef __clang__
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wold-style-cast"
#endif
#include <netinet/in.h>
#ifdef __clang__
#pragma GCC diagnostic pop
#endif
#endif
#include "lv2_socket_p2p.h"
struct nt_p2p_port;
constexpr be_t<u32> P2PS_U2S_SIG = (static_cast<u32>('U') << 24 | static_cast<u32>('2') << 16 | static_cast<u32>('S') << 8 | static_cast<u32>('0'));
struct p2ps_encapsulated_tcp
{
be_t<u32> signature = P2PS_U2S_SIG; // Signature to verify it's P2P Stream data
be_t<u32> length = 0; // Length of data
be_t<u64> seq = 0; // This should be u32 but changed to u64 for simplicity
be_t<u64> ack = 0;
be_t<u16> src_port = 0; // fake source tcp port
be_t<u16> dst_port = 0; // fake dest tcp port(should be == vport)
be_t<u16> checksum = 0;
u8 flags = 0;
};
enum p2ps_stream_status
{
stream_closed, // Default when port is not listening nor connected
stream_listening, // Stream is listening, accepting SYN packets
stream_handshaking, // Currently handshaking
stream_connected, // This is an established connection(after tcp handshake)
};
enum p2ps_tcp_flags : u8
{
FIN = (1 << 0),
SYN = (1 << 1),
RST = (1 << 2),
PSH = (1 << 3),
ACK = (1 << 4),
URG = (1 << 5),
ECE = (1 << 6),
CWR = (1 << 7),
};
u16 u2s_tcp_checksum(const le_t<u16>* buffer, usz size);
std::vector<u8> generate_u2s_packet(const p2ps_encapsulated_tcp& header, const u8* data, const u32 datasize);
class lv2_socket_p2ps final : public lv2_socket_p2p
{
public:
lv2_socket_p2ps(lv2_socket_family family, lv2_socket_type type, lv2_ip_protocol protocol);
lv2_socket_p2ps(socket_type socket, u16 port, u16 vport, u32 op_addr, u16 op_port, u16 op_vport, u64 cur_seq, u64 data_beg_seq, s32 so_nbio);
lv2_socket_p2ps(utils::serial& ar, lv2_socket_type type);
void save(utils::serial& ar);
p2ps_stream_status get_status() const;
void set_status(p2ps_stream_status new_status);
bool handle_connected(p2ps_encapsulated_tcp* tcp_header, u8* data, ::sockaddr_storage* op_addr, nt_p2p_port* p2p_port);
bool handle_listening(p2ps_encapsulated_tcp* tcp_header, u8* data, ::sockaddr_storage* op_addr);
void send_u2s_packet(std::vector<u8> data, const ::sockaddr_in* dst, u64 seq, bool require_ack);
void close_stream();
std::tuple<bool, s32, std::shared_ptr<lv2_socket>, sys_net_sockaddr> accept(bool is_lock = true) override;
s32 bind(const sys_net_sockaddr& addr) override;
std::optional<s32> connect(const sys_net_sockaddr& addr) override;
std::pair<s32, sys_net_sockaddr> getpeername() override;
std::pair<s32, sys_net_sockaddr> getsockname() override;
s32 listen(s32 backlog) override;
std::optional<std::tuple<s32, std::vector<u8>, sys_net_sockaddr>> recvfrom(s32 flags, u32 len, bool is_lock = true) override;
std::optional<s32> sendto(s32 flags, const std::vector<u8>& buf, std::optional<sys_net_sockaddr> opt_sn_addr, bool is_lock = true) override;
std::optional<s32> sendmsg(s32 flags, const sys_net_msghdr& msg, bool is_lock = true) override;
void close() override;
s32 shutdown(s32 how) override;
s32 poll(sys_net_pollfd& sn_pfd, pollfd& native_pfd) override;
std::tuple<bool, bool, bool> select(bs_t<poll_t> selected, pollfd& native_pfd) override;
private:
void close_stream_nl(nt_p2p_port* p2p_port);
private:
static constexpr usz MAX_RECEIVED_BUFFER = (1024 * 1024 * 10);
p2ps_stream_status status = p2ps_stream_status::stream_closed;
usz max_backlog = 0; // set on listen
std::deque<s32> backlog;
u16 op_port = 0, op_vport = 0;
u32 op_addr = 0;
u64 data_beg_seq = 0; // Seq of first byte of received_data
u64 data_available = 0; // Amount of continuous data available(calculated on ACK send)
std::map<u64, std::vector<u8>> received_data; // holds seq/data of data received
u64 cur_seq = 0; // SEQ of next packet to be sent
};
| 3,975
|
C++
|
.h
| 88
| 43.284091
| 148
| 0.699456
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
5,967
|
nt_p2p_port.h
|
RPCS3_rpcs3/rpcs3/Emu/Cell/lv2/sys_net/nt_p2p_port.h
|
#pragma once
#include <set>
#include "lv2_socket_p2ps.h"
#ifdef _WIN32
#include <winsock2.h>
#include <WS2tcpip.h>
#else
#ifdef __clang__
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wold-style-cast"
#endif
#include <sys/socket.h>
#include <netinet/in.h>
#include <arpa/inet.h>
#ifdef __clang__
#pragma GCC diagnostic pop
#endif
#endif
// dst_vport src_vport flags
constexpr s32 VPORT_P2P_HEADER_SIZE = sizeof(u16) + sizeof(u16) + sizeof(u16);
enum VPORT_P2P_FLAGS
{
P2P_FLAG_P2P = 1,
P2P_FLAG_P2PS = 1 << 1,
};
struct signaling_message
{
u32 src_addr = 0;
u16 src_port = 0;
std::vector<u8> data;
};
namespace sys_net_helpers
{
bool all_reusable(const std::set<s32>& sock_ids);
}
struct nt_p2p_port
{
// Real socket where P2P packets are received/sent
socket_type p2p_socket = 0;
u16 port = 0;
shared_mutex bound_p2p_vports_mutex;
// For DGRAM_P2P sockets (vport, sock_ids)
std::map<u16, std::set<s32>> bound_p2p_vports{};
// For STREAM_P2P sockets (vport, sock_ids)
std::map<u16, std::set<s32>> bound_p2ps_vports{};
// List of active(either from a connect or an accept) P2PS sockets (key, sock_id)
// key is ( (src_vport) << 48 | (dst_vport) << 32 | addr ) with src_vport and addr being 0 for listening sockets
std::map<u64, s32> bound_p2p_streams{};
// Current free port index
u16 binding_port = 30000;
// Queued messages from RPCN
shared_mutex s_rpcn_mutex;
std::vector<std::vector<u8>> rpcn_msgs{};
// Queued signaling messages
shared_mutex s_sign_mutex;
std::vector<signaling_message> sign_msgs{};
std::array<u8, 65535> p2p_recv_data{};
nt_p2p_port(u16 port);
~nt_p2p_port();
static void dump_packet(p2ps_encapsulated_tcp* tcph);
u16 get_port();
bool handle_connected(s32 sock_id, p2ps_encapsulated_tcp* tcp_header, u8* data, ::sockaddr_storage* op_addr);
bool handle_listening(s32 sock_id, p2ps_encapsulated_tcp* tcp_header, u8* data, ::sockaddr_storage* op_addr);
bool recv_data();
};
| 1,972
|
C++
|
.h
| 65
| 28.584615
| 113
| 0.719196
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
5,968
|
sys_net_helpers.h
|
RPCS3_rpcs3/rpcs3/Emu/Cell/lv2/sys_net/sys_net_helpers.h
|
#pragma once
#ifdef _WIN32
#include <winsock2.h>
#include <WS2tcpip.h>
#else
#ifdef __clang__
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wold-style-cast"
#endif
#include <sys/socket.h>
#include <netinet/in.h>
#ifdef __clang__
#pragma GCC diagnostic pop
#endif
#endif
#include "Emu/Cell/lv2/sys_net.h"
int get_native_error();
sys_net_error convert_error(bool is_blocking, int native_error, bool is_connecting = false);
sys_net_error get_last_error(bool is_blocking, bool is_connecting = false);
sys_net_sockaddr native_addr_to_sys_net_addr(const ::sockaddr_storage& native_addr);
::sockaddr_in sys_net_addr_to_native_addr(const sys_net_sockaddr& sn_addr);
bool is_ip_public_address(const ::sockaddr_in& addr);
u32 network_clear_queue(ppu_thread& ppu);
#ifdef _WIN32
void windows_poll(std::vector<pollfd>& fds, unsigned long nfds, int timeout, std::vector<bool>& connecting);
#endif
| 903
|
C++
|
.h
| 26
| 33.576923
| 108
| 0.769759
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
5,969
|
CPUThread.h
|
RPCS3_rpcs3/rpcs3/Emu/CPU/CPUThread.h
|
#pragma once
#include "../Utilities/Thread.h"
#include "../Utilities/bit_set.h"
#include <vector>
#include <any>
template <typename Derived, typename Base>
concept DerivedFrom = std::is_base_of_v<Base, Derived> &&
std::is_convertible_v<const volatile Derived*, const volatile Base*>;
// Thread state flags
enum class cpu_flag : u32
{
stop, // Thread not running (HLE, initial state)
exit, // Irreversible exit
wait, // Indicates waiting state, set by the thread itself
temp, // Indicates that the thread cannot properly return after next check_state()
pause, // Thread suspended by suspend_all technique
suspend, // Thread suspended
ret, // Callback return requested
again, // Thread must complete the syscall after deserialization
signal, // Thread received a signal (HLE)
memory, // Thread must unlock memory mutex
pending, // Thread has postponed work
pending_recheck, // Thread needs to recheck if there is pending work before ::pending removal
notify, // Flag meant solely to allow atomic notification on state without changing other flags
yield, // Thread is being requested to yield its execution time if it's running
preempt, // Thread is being requested to preempt the execution of all CPU threads
dbg_global_pause, // Emulation paused
dbg_pause, // Thread paused
dbg_step, // Thread forced to pause after one step (one instruction, etc)
__bitset_enum_max
};
// Test stopped state
constexpr bool is_stopped(bs_t<cpu_flag> state)
{
return !!(state & (cpu_flag::stop + cpu_flag::exit + cpu_flag::again));
}
// Test paused state
constexpr bool is_paused(bs_t<cpu_flag> state)
{
return !!(state & (cpu_flag::suspend + cpu_flag::dbg_global_pause + cpu_flag::dbg_pause)) && !is_stopped(state);
}
class cpu_thread
{
public:
u64 block_hash = 0;
protected:
cpu_thread(u32 id);
public:
cpu_thread(const cpu_thread&) = delete;
cpu_thread& operator=(const cpu_thread&) = delete;
virtual ~cpu_thread();
void operator()();
// Self identifier
const u32 id;
// Public thread state
atomic_bs_t<cpu_flag> state{cpu_flag::stop + cpu_flag::wait};
// Process thread state, return true if the checker must return
bool check_state() noexcept;
// Process thread state (pause)
[[nodiscard]] bool test_stopped()
{
if (state)
{
if (check_state())
{
return true;
}
}
return false;
}
// Wrappers
static constexpr bool is_stopped(bs_t<cpu_flag> s)
{
return ::is_stopped(s);
}
static constexpr bool is_paused(bs_t<cpu_flag> s)
{
return ::is_paused(s);
}
bool is_stopped() const
{
return ::is_stopped(state);
}
bool is_paused() const
{
return ::is_paused(state);
}
bool has_pause_flag() const
{
return !!(state & cpu_flag::pause);
}
// Check thread type
u32 id_type() const
{
return id >> 24;
}
thread_class get_class() const
{
return static_cast<thread_class>(id_type()); // Static cast for performance reasons
}
template <DerivedFrom<cpu_thread> T>
T* try_get()
{
if constexpr (std::is_same_v<std::remove_const_t<T>, cpu_thread>)
{
return this;
}
else
{
if (id_type() == (T::id_base >> 24))
{
return static_cast<T*>(this);
}
return nullptr;
}
}
template <DerivedFrom<cpu_thread> T>
const T* try_get() const
{
return const_cast<cpu_thread*>(this)->try_get<const T>();
}
u32 get_pc() const;
u32* get_pc2(); // Last PC before stepping for the debugger (may be null)
cpu_thread* get_next_cpu(); // Access next_cpu member if the is one
void notify();
cpu_thread& operator=(thread_state);
// Add/remove CPU state flags in an atomic operations, notifying if required
void add_remove_flags(bs_t<cpu_flag> to_add, bs_t<cpu_flag> to_remove);
// Thread stats for external observation
static atomic_t<u64> g_threads_created, g_threads_deleted, g_suspend_counter;
// Get thread name (as assigned to named_thread)
std::string get_name() const;
// Get CPU state dump (everything)
virtual void dump_all(std::string&) const;
// Get CPU register dump
virtual void dump_regs(std::string& ret, std::any& custom_data) const;
// Get CPU call stack dump
virtual std::string dump_callstack() const;
// Get CPU call stack list
virtual std::vector<std::pair<u32, u32>> dump_callstack_list() const;
// Get CPU dump of misc information
virtual std::string dump_misc() const;
// Thread entry point function
virtual void cpu_task() = 0;
// Callback for cpu_flag::suspend
virtual void cpu_sleep() {}
// Callback for cpu_flag::pending
virtual void cpu_work() { state -= cpu_flag::pending + cpu_flag::pending_recheck; }
// Callback for cpu_flag::ret
virtual void cpu_return() {}
// Callback for thread_ctrl::wait or RSX wait
virtual void cpu_wait(bs_t<cpu_flag> old);
// Callback for function abortion stats on Emu.Kill()
virtual void cpu_on_stop() {}
// For internal use
struct suspend_work
{
// Task priority
u8 prio;
bool cancel_if_not_suspended;
bool was_posted;
// Size of prefetch list workload
u32 prf_size;
void* const* prf_list;
void* func_ptr;
void* res_buf;
// Type-erased op executor
void (*exec)(void* func, void* res);
// Next object in the linked list
suspend_work* next;
// Internal method
bool push(cpu_thread* _this) noexcept;
};
// Suspend all threads and execute op (may be executed by other thread than caller!)
template <u8 Prio = 0, typename F>
static auto suspend_all(cpu_thread* _this, std::initializer_list<void*> hints, F op)
{
constexpr u8 prio = Prio > 3 ? 3 : Prio;
if constexpr (std::is_void_v<std::invoke_result_t<F>>)
{
suspend_work work{prio, false, false, ::size32(hints), hints.begin(), &op, nullptr, [](void* func, void*)
{
std::invoke(*static_cast<F*>(func));
}};
work.push(_this);
return;
}
else
{
std::invoke_result_t<F> result;
suspend_work work{prio, false, false, ::size32(hints), hints.begin(), &op, &result, [](void* func, void* res_buf)
{
*static_cast<std::invoke_result_t<F>*>(res_buf) = std::invoke(*static_cast<F*>(func));
}};
work.push(_this);
return result;
}
}
template <u8 Prio = 0, typename F>
static suspend_work suspend_post(cpu_thread* /*_this*/, std::initializer_list<void*> hints, F& op)
{
constexpr u8 prio = Prio > 3 ? 3 : Prio;
static_assert(std::is_void_v<std::invoke_result_t<F>>, "cpu_thread::suspend_post only supports void as return type");
return suspend_work{prio, false, true, ::size32(hints), hints.begin(), &op, nullptr, [](void* func, void*)
{
std::invoke(*static_cast<F*>(func));
}};
}
// Push the workload only if threads are being suspended by suspend_all()
template <u8 Prio = 0, typename F>
static bool if_suspended(cpu_thread* _this, std::initializer_list<void*> hints, F op)
{
constexpr u8 prio = Prio > 3 ? 3 : Prio;
static_assert(std::is_void_v<std::invoke_result_t<F>>, "cpu_thread::if_suspended only supports void as return type");
{
suspend_work work{prio, true, false, ::size32(hints), hints.begin(), &op, nullptr, [](void* func, void*)
{
std::invoke(*static_cast<F*>(func));
}};
return work.push(_this);
}
}
// Cleanup thread counting information
static void cleanup() noexcept;
// Send signal to the profiler(s) to flush results
static void flush_profilers() noexcept;
template <DerivedFrom<cpu_thread> T = cpu_thread>
static inline T* get_current() noexcept
{
if (const auto cpu = g_tls_this_thread)
{
return cpu->try_get<T>();
}
return nullptr;
}
private:
static thread_local cpu_thread* g_tls_this_thread;
};
template <DerivedFrom<cpu_thread> T = cpu_thread>
inline T* get_current_cpu_thread() noexcept
{
return cpu_thread::get_current<T>();
}
class ppu_thread;
class spu_thread;
| 7,716
|
C++
|
.h
| 245
| 28.865306
| 119
| 0.703499
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
5,970
|
CPUDisAsm.h
|
RPCS3_rpcs3/rpcs3/Emu/CPU/CPUDisAsm.h
|
#pragma once
#include <string>
#include "Utilities/StrFmt.h"
enum class cpu_disasm_mode
{
dump,
interpreter,
normal,
compiler_elf,
list, // RSX exclusive
survey_cmd_size, // RSX exclusive
};
class cpu_thread;
class CPUDisAsm
{
protected:
cpu_disasm_mode m_mode{};
const u8* m_offset{};
const u32 m_start_pc;
std::add_pointer_t<const cpu_thread> m_cpu{};
std::shared_ptr<cpu_thread> m_cpu_handle;
u32 m_op = 0;
void format_by_mode()
{
switch (m_mode)
{
case cpu_disasm_mode::dump:
{
last_opcode = fmt::format("\t%08x:\t%02x %02x %02x %02x\t%s\n", dump_pc,
static_cast<u8>(m_op >> 24),
static_cast<u8>(m_op >> 16),
static_cast<u8>(m_op >> 8),
static_cast<u8>(m_op >> 0), last_opcode);
break;
}
case cpu_disasm_mode::interpreter:
{
last_opcode.insert(0, fmt::format("[%08x] %02x %02x %02x %02x: ", dump_pc,
static_cast<u8>(m_op >> 24),
static_cast<u8>(m_op >> 16),
static_cast<u8>(m_op >> 8),
static_cast<u8>(m_op >> 0)));
break;
}
case cpu_disasm_mode::compiler_elf:
{
last_opcode += '\n';
break;
}
case cpu_disasm_mode::normal:
{
break;
}
default: fmt::throw_exception("Unreachable");
}
}
public:
std::string last_opcode{};
u32 dump_pc{};
cpu_disasm_mode change_mode(cpu_disasm_mode mode)
{
return std::exchange(m_mode, mode);
}
const u8* change_ptr(const u8* ptr)
{
return std::exchange(m_offset, ptr);
}
cpu_thread* get_cpu() const
{
return const_cast<cpu_thread*>(m_cpu);
}
void set_cpu_handle(std::shared_ptr<cpu_thread> cpu)
{
m_cpu_handle = std::move(cpu);
if (!m_cpu)
{
m_cpu = m_cpu_handle.get();
}
else
{
AUDIT(m_cpu == m_cpu_handle.get());
}
}
protected:
CPUDisAsm(cpu_disasm_mode mode, const u8* offset, u32 start_pc = 0, const cpu_thread* cpu = nullptr)
: m_mode(mode)
, m_offset(offset - start_pc)
, m_start_pc(start_pc)
, m_cpu(cpu)
{
}
CPUDisAsm& operator=(const CPUDisAsm&) = delete;
virtual u32 DisAsmBranchTarget(s32 /*imm*/);
// TODO: Add builtin fmt helpper for best performance
template <typename T, std::enable_if_t<std::is_integral_v<T>, int> = 0>
static std::string SignedHex(T value)
{
const auto v = static_cast<std::make_signed_t<T>>(value);
if (v == smin)
{
// for INTx_MIN
return fmt::format("-0x%x", v);
}
const auto av = std::abs(v);
if (av < 10)
{
// Does not need hex
return fmt::format("%d", v);
}
return fmt::format("%s%s", v < 0 ? "-" : "", av);
}
// Signify the formatting function the minimum required amount of characters to print for an instruction
// Padding with spaces
int PadOp(std::string_view op = {}, int min_spaces = 0) const
{
return m_mode == cpu_disasm_mode::normal ? (static_cast<int>(op.size()) + min_spaces) : 10;
}
public:
virtual ~CPUDisAsm() = default;
virtual u32 disasm(u32 pc) = 0;
virtual std::pair<const void*, usz> get_memory_span() const = 0;
virtual std::unique_ptr<CPUDisAsm> copy_type_erased() const = 0;
};
| 3,026
|
C++
|
.h
| 123
| 21.560976
| 105
| 0.64571
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
5,972
|
CPUTranslator.h
|
RPCS3_rpcs3/rpcs3/Emu/CPU/CPUTranslator.h
|
#pragma once
#ifdef LLVM_AVAILABLE
#ifdef _MSC_VER
#pragma warning(push, 0)
#else
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wall"
#pragma GCC diagnostic ignored "-Wextra"
#pragma GCC diagnostic ignored "-Wold-style-cast"
#pragma GCC diagnostic ignored "-Wunused-parameter"
#pragma GCC diagnostic ignored "-Wstrict-aliasing"
#pragma GCC diagnostic ignored "-Weffc++"
#pragma GCC diagnostic ignored "-Wmissing-noreturn"
#pragma GCC diagnostic ignored "-Wredundant-decls"
#endif
#include "llvm/IR/LLVMContext.h"
#include "llvm/ExecutionEngine/ExecutionEngine.h"
#include "llvm/IR/IRBuilder.h"
#include "llvm/IR/Module.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Support/KnownBits.h"
#include "llvm/Support/ModRef.h"
#include "llvm/Analysis/ConstantFolding.h"
#include "llvm/Analysis/ValueTracking.h"
#include "llvm/IR/IntrinsicsX86.h"
#include "llvm/IR/IntrinsicsAArch64.h"
#include "llvm/IR/InlineAsm.h"
#ifdef _MSC_VER
#pragma warning(pop)
#else
#pragma GCC diagnostic pop
#endif
#include "util/types.hpp"
#include "util/sysinfo.hpp"
#include "Utilities/StrFmt.h"
#include "Utilities/BitField.h"
#include "Utilities/JIT.h"
#include "util/v128.hpp"
#include <functional>
#include <unordered_map>
// Helper function
llvm::Value* peek_through_bitcasts(llvm::Value*);
enum class i2 : char
{
};
enum class i4 : char
{
};
template <typename T>
concept LLVMType = (std::is_pointer_v<T>) && (std::is_base_of_v<llvm::Type, std::remove_pointer_t<T>>);
template <typename T>
concept LLVMValue = (std::is_pointer_v<T>) && (std::is_base_of_v<llvm::Value, std::remove_pointer_t<T>>);
template <typename T>
concept DSLValue = requires (T& v)
{
{ v.eval(std::declval<llvm::IRBuilder<>*>()) } -> LLVMValue;
};
template <usz N>
struct get_int_bits
{
};
template <>
struct get_int_bits<1>
{
using utype = bool;
};
template <>
struct get_int_bits<2>
{
using utype = i2;
};
template <>
struct get_int_bits<4>
{
using utype = i4;
};
template <>
struct get_int_bits<8>
{
using utype = u8;
};
template <>
struct get_int_bits<16>
{
using utype = u16;
};
template <>
struct get_int_bits<32>
{
using utype = u32;
};
template <>
struct get_int_bits<64>
{
using utype = u64;
};
template <>
struct get_int_bits<128>
{
using utype = u128;
};
template <usz Bits>
using get_int_vt = typename get_int_bits<Bits>::utype;
template <typename T = void>
struct llvm_value_t
{
static_assert(std::is_same_v<T, void>, "llvm_value_t<> error: unknown type");
using type = void;
using base = llvm_value_t;
static constexpr uint esize = 0;
static constexpr bool is_int = false;
static constexpr bool is_sint = false;
static constexpr bool is_uint = false;
static constexpr bool is_float = false;
static constexpr uint is_array = false;
static constexpr uint is_vector = false;
static constexpr uint is_pointer = false;
static llvm::Type* get_type(llvm::LLVMContext& context)
{
return llvm::Type::getVoidTy(context);
}
llvm::Value* eval(llvm::IRBuilder<>*) const
{
return value;
}
std::tuple<> match(llvm::Value*& value, llvm::Module*) const
{
if (peek_through_bitcasts(value) != peek_through_bitcasts(this->value))
{
value = nullptr;
}
return {};
}
llvm::Value* value;
// llvm_value_t() = default;
// llvm_value_t(llvm::Value* value)
// : value(value)
// {
// }
};
template <>
struct llvm_value_t<bool> : llvm_value_t<void>
{
using type = bool;
using base = llvm_value_t<void>;
using base::base;
static constexpr uint esize = 1;
static constexpr bool is_int = true;
static llvm::Type* get_type(llvm::LLVMContext& context)
{
return llvm::Type::getInt1Ty(context);
}
};
template <>
struct llvm_value_t<i2> : llvm_value_t<void>
{
using type = i2;
using base = llvm_value_t<void>;
using base::base;
static constexpr uint esize = 2;
static constexpr bool is_int = true;
static llvm::Type* get_type(llvm::LLVMContext& context)
{
return llvm::Type::getIntNTy(context, 2);
}
};
template <>
struct llvm_value_t<i4> : llvm_value_t<void>
{
using type = i4;
using base = llvm_value_t<void>;
using base::base;
static constexpr uint esize = 4;
static constexpr bool is_int = true;
static llvm::Type* get_type(llvm::LLVMContext& context)
{
return llvm::Type::getIntNTy(context, 4);
}
};
template <>
struct llvm_value_t<char> : llvm_value_t<void>
{
using type = char;
using base = llvm_value_t<void>;
using base::base;
static constexpr uint esize = 8;
static constexpr bool is_int = true;
static llvm::Type* get_type(llvm::LLVMContext& context)
{
return llvm::Type::getInt8Ty(context);
}
};
template <>
struct llvm_value_t<s8> : llvm_value_t<char>
{
using type = s8;
using base = llvm_value_t<char>;
using base::base;
static constexpr bool is_sint = true;
};
template <>
struct llvm_value_t<u8> : llvm_value_t<char>
{
using type = u8;
using base = llvm_value_t<char>;
using base::base;
static constexpr bool is_uint = true;
};
template <>
struct llvm_value_t<s16> : llvm_value_t<s8>
{
using type = s16;
using base = llvm_value_t<s8>;
using base::base;
static constexpr uint esize = 16;
static llvm::Type* get_type(llvm::LLVMContext& context)
{
return llvm::Type::getInt16Ty(context);
}
};
template <>
struct llvm_value_t<u16> : llvm_value_t<s16>
{
using type = u16;
using base = llvm_value_t<s16>;
using base::base;
static constexpr bool is_sint = false;
static constexpr bool is_uint = true;
};
template <>
struct llvm_value_t<int> : llvm_value_t<s8>
{
using type = int;
using base = llvm_value_t<s8>;
using base::base;
static constexpr uint esize = 32;
static llvm::Type* get_type(llvm::LLVMContext& context)
{
return llvm::Type::getInt32Ty(context);
}
};
template <>
struct llvm_value_t<uint> : llvm_value_t<int>
{
using type = uint;
using base = llvm_value_t<int>;
using base::base;
static constexpr bool is_sint = false;
static constexpr bool is_uint = true;
};
template <>
struct llvm_value_t<long> : llvm_value_t<s8>
{
using type = long;
using base = llvm_value_t<s8>;
using base::base;
static constexpr uint esize = 8 * sizeof(long);
static llvm::Type* get_type(llvm::LLVMContext& context)
{
return llvm::Type::getInt64Ty(context);
}
};
template <>
struct llvm_value_t<ulong> : llvm_value_t<long>
{
using type = ulong;
using base = llvm_value_t<long>;
using base::base;
static constexpr bool is_sint = false;
static constexpr bool is_uint = true;
};
template <>
struct llvm_value_t<llong> : llvm_value_t<s8>
{
using type = llong;
using base = llvm_value_t<s8>;
using base::base;
static constexpr uint esize = 64;
static llvm::Type* get_type(llvm::LLVMContext& context)
{
return llvm::Type::getInt64Ty(context);
}
};
template <>
struct llvm_value_t<ullong> : llvm_value_t<llong>
{
using type = ullong;
using base = llvm_value_t<llong>;
using base::base;
static constexpr bool is_sint = false;
static constexpr bool is_uint = true;
};
template <>
struct llvm_value_t<s128> : llvm_value_t<s8>
{
using type = s128;
using base = llvm_value_t<s8>;
using base::base;
static constexpr uint esize = 128;
static llvm::Type* get_type(llvm::LLVMContext& context)
{
return llvm::Type::getIntNTy(context, 128);
}
};
template <>
struct llvm_value_t<u128> : llvm_value_t<s128>
{
using type = u128;
using base = llvm_value_t<s128>;
using base::base;
static constexpr bool is_sint = false;
static constexpr bool is_uint = true;
};
template <>
struct llvm_value_t<f32> : llvm_value_t<void>
{
using type = f32;
using base = llvm_value_t<void>;
using base::base;
static constexpr uint esize = 32;
static constexpr bool is_float = true;
static llvm::Type* get_type(llvm::LLVMContext& context)
{
return llvm::Type::getFloatTy(context);
}
};
template <>
struct llvm_value_t<f64> : llvm_value_t<void>
{
using type = f64;
using base = llvm_value_t<void>;
using base::base;
static constexpr uint esize = 64;
static constexpr bool is_float = true;
static llvm::Type* get_type(llvm::LLVMContext& context)
{
return llvm::Type::getDoubleTy(context);
}
};
template <typename T>
struct llvm_value_t<T*> : llvm_value_t<T>
{
static_assert(!std::is_void_v<T>, "llvm_value_t<> error: invalid pointer to void type");
using type = T*;
using base = llvm_value_t<T>;
using base::base;
static constexpr uint esize = 64;
static constexpr bool is_int = false;
static constexpr bool is_sint = false;
static constexpr bool is_uint = false;
static constexpr bool is_float = false;
static constexpr uint is_array = false;
static constexpr uint is_vector = false;
static constexpr uint is_pointer = llvm_value_t<T>::is_pointer + 1;
static llvm::Type* get_type(llvm::LLVMContext& context)
{
return llvm_value_t<T>::get_type(context)->getPointerTo();
}
};
// u32[4] : vector of 4 u32 elements
// u32[123][4] : array of 123 u32[4] vectors
// u32[123][1] : array of 123 u32 scalars
template <typename T, uint N>
struct llvm_value_t<T[N]> : llvm_value_t<std::conditional_t<(std::extent_v<T> > 1), T, std::remove_extent_t<T>>>
{
using type = T[N];
using base = llvm_value_t<std::conditional_t<(std::extent_v<T> > 1), T, std::remove_extent_t<T>>>;
using base::base;
static constexpr uint esize = std::is_array_v<T> ? 0 : base::esize;
static constexpr bool is_int = !std::is_array_v<T> && base::is_int;
static constexpr bool is_sint = !std::is_array_v<T> && base::is_sint;
static constexpr bool is_uint = !std::is_array_v<T> && base::is_uint;
static constexpr bool is_float = !std::is_array_v<T> && base::is_float;
static constexpr uint is_array = std::is_array_v<T> ? N : 0;
static constexpr uint is_vector = std::is_array_v<T> ? 0 : N;
static constexpr uint is_pointer = 0;
static llvm::Type* get_type(llvm::LLVMContext& context)
{
if constexpr (std::is_array_v<T>)
{
return llvm::ArrayType::get(base::get_type(context), N);
}
else if constexpr (N > 1)
{
return llvm::VectorType::get(base::get_type(context), N, false);
}
else
{
return base::get_type(context);
}
}
};
template <typename T>
using llvm_expr_t = std::decay_t<T>;
template <typename T, typename = void>
struct is_llvm_expr
{
};
template <typename T>
struct is_llvm_expr<T, std::void_t<decltype(std::declval<T>().eval(std::declval<llvm::IRBuilder<>*>()))>>
{
using type = typename std::decay_t<T>::type;
};
template <typename T, typename Of, typename = void>
struct is_llvm_expr_of
{
static constexpr bool ok = false;
};
template <typename T, typename Of>
struct is_llvm_expr_of<T, Of, std::void_t<typename is_llvm_expr<T>::type, typename is_llvm_expr<Of>::type>>
{
static constexpr bool ok = std::is_same_v<typename is_llvm_expr<T>::type, typename is_llvm_expr<Of>::type>;
};
template <typename T, typename... Types>
using llvm_common_t = std::enable_if_t<(is_llvm_expr_of<T, Types>::ok && ...), typename is_llvm_expr<T>::type>;
template <typename... Args>
using llvm_match_tuple = decltype(std::tuple_cat(std::declval<llvm_expr_t<Args>&>().match(std::declval<llvm::Value*&>(), nullptr)...));
template <typename T, typename U = llvm_common_t<llvm_value_t<T>>>
struct llvm_match_t
{
using type = T;
llvm::Value* value = nullptr;
explicit operator bool() const
{
return value != nullptr;
}
template <typename... Args>
bool eq(const Args&... args) const
{
llvm::Value* lhs = nullptr;
return value && (lhs = peek_through_bitcasts(value)) && ((lhs == peek_through_bitcasts(args.value)) && ...);
}
llvm::Value* eval(llvm::IRBuilder<>*) const
{
return value;
}
std::tuple<> match(llvm::Value*& value, llvm::Module*) const
{
if (peek_through_bitcasts(value) != peek_through_bitcasts(this->value))
{
value = nullptr;
}
return {};
}
};
template <typename T, typename U = llvm_common_t<llvm_value_t<T>>>
struct llvm_placeholder_t
{
// TODO: placeholder extracting actual constant values (u64, f64, vector, etc)
using type = T;
llvm::Value* eval(llvm::IRBuilder<>*) const
{
return nullptr;
}
std::tuple<llvm_match_t<T>> match(llvm::Value*& value, llvm::Module*) const
{
if (value && value->getType() == llvm_value_t<T>::get_type(value->getContext()))
{
return {{value}};
}
value = nullptr;
return {};
}
};
template <typename T, bool ForceSigned = false>
struct llvm_const_int
{
using type = T;
u64 val;
static constexpr bool is_ok = llvm_value_t<T>::is_int;
llvm::Value* eval(llvm::IRBuilder<>* ir) const
{
static_assert(llvm_value_t<T>::is_int, "llvm_const_int<>: invalid type");
return llvm::ConstantInt::get(llvm_value_t<T>::get_type(ir->getContext()), val, ForceSigned || llvm_value_t<T>::is_sint);
}
std::tuple<> match(llvm::Value*& value, llvm::Module*) const
{
if (value && value == llvm::ConstantInt::get(llvm_value_t<T>::get_type(value->getContext()), val, ForceSigned || llvm_value_t<T>::is_sint))
{
return {};
}
value = nullptr;
return {};
}
};
template <typename T>
struct llvm_const_float
{
using type = T;
f64 val;
static constexpr bool is_ok = llvm_value_t<T>::is_float;
llvm::Value* eval(llvm::IRBuilder<>* ir) const
{
static_assert(llvm_value_t<T>::is_float, "llvm_const_float<>: invalid type");
return llvm::ConstantFP::get(llvm_value_t<T>::get_type(ir->getContext()), val);
}
std::tuple<> match(llvm::Value*& value, llvm::Module*) const
{
if (value && value == llvm::ConstantFP::get(llvm_value_t<T>::get_type(value->getContext()), val))
{
return {};
}
value = nullptr;
return {};
}
};
template <uint N, typename T>
struct llvm_const_vector
{
using type = T;
T data;
static constexpr bool is_ok = N && llvm_value_t<T>::is_vector == N;
llvm::Value* eval(llvm::IRBuilder<>* ir) const
{
static_assert(N && llvm_value_t<T>::is_vector == N, "llvm_const_vector<>: invalid type");
return llvm::ConstantDataVector::get(ir->getContext(), data);
}
std::tuple<> match(llvm::Value*& value, llvm::Module*) const
{
if (value && value == llvm::ConstantDataVector::get(value->getContext(), data))
{
return {};
}
value = nullptr;
return {};
}
};
template <typename A1, typename A2, typename T = llvm_common_t<A1, A2>>
struct llvm_add
{
using type = T;
llvm_expr_t<A1> a1;
llvm_expr_t<A2> a2;
static_assert(llvm_value_t<T>::is_sint || llvm_value_t<T>::is_uint || llvm_value_t<T>::is_float, "llvm_add<>: invalid type");
static constexpr auto opc = llvm_value_t<T>::is_float ? llvm::Instruction::FAdd : llvm::Instruction::Add;
llvm::Value* eval(llvm::IRBuilder<>* ir) const
{
const auto v1 = a1.eval(ir);
const auto v2 = a2.eval(ir);
return ir->CreateBinOp(opc, v1, v2);
}
llvm_match_tuple<A1, A2> match(llvm::Value*& value, llvm::Module* _m) const
{
llvm::Value* v1 = {};
llvm::Value* v2 = {};
if (auto i = llvm::dyn_cast_or_null<llvm::BinaryOperator>(value); i && i->getOpcode() == opc)
{
v1 = i->getOperand(0);
v2 = i->getOperand(1);
if (auto r1 = a1.match(v1, _m); v1)
{
if (auto r2 = a2.match(v2, _m); v2)
{
return std::tuple_cat(r1, r2);
}
}
v1 = i->getOperand(0);
v2 = i->getOperand(1);
// Argument order does not matter here, try when swapped
if (auto r1 = a1.match(v2, _m); v2)
{
if (auto r2 = a2.match(v1, _m); v1)
{
return std::tuple_cat(r1, r2);
}
}
}
value = nullptr;
return {};
}
};
template <typename T1, typename T2>
inline llvm_add<T1, T2> operator +(T1&& a1, T2&& a2)
{
return {a1, a2};
}
template <typename T1>
inline llvm_add<T1, llvm_const_int<typename is_llvm_expr<T1>::type>> operator +(T1&& a1, u64 c)
{
return {a1, {c}};
}
template <typename A1, typename A2, typename A3, typename T = llvm_common_t<A1, A2, A3>>
struct llvm_sum
{
using type = T;
llvm_expr_t<A1> a1;
llvm_expr_t<A2> a2;
llvm_expr_t<A3> a3;
static_assert(llvm_value_t<T>::is_sint || llvm_value_t<T>::is_uint, "llvm_sum<>: invalid_type");
llvm::Value* eval(llvm::IRBuilder<>* ir) const
{
const auto v1 = a1.eval(ir);
const auto v2 = a2.eval(ir);
const auto v3 = a3.eval(ir);
return ir->CreateAdd(ir->CreateAdd(v1, v2), v3);
}
llvm_match_tuple<A1, A2, A3> match(llvm::Value*& value, llvm::Module* _m) const
{
llvm::Value* v1 = {};
llvm::Value* v2 = {};
llvm::Value* v3 = {};
if (auto i = llvm::dyn_cast_or_null<llvm::BinaryOperator>(value); i && i->getOpcode() == llvm::Instruction::Add)
{
v3 = i->getOperand(1);
if (auto r3 = a3.match(v3, _m); v3)
{
i = llvm::dyn_cast<llvm::BinaryOperator>(i->getOperand(0));
if (i && i->getOpcode() == llvm::Instruction::Add)
{
v1 = i->getOperand(0);
v2 = i->getOperand(1);
if (auto r1 = a1.match(v1, _m); v1)
{
if (auto r2 = a2.match(v2, _m); v2)
{
return std::tuple_cat(r1, r2, r3);
}
}
}
}
}
value = nullptr;
return {};
}
};
template <typename T1, typename T2, typename T3>
llvm_sum(T1&& a1, T2&& a2, T3&& a3) -> llvm_sum<T1, T2, T3>;
template <typename A1, typename A2, typename T = llvm_common_t<A1, A2>>
struct llvm_sub
{
using type = T;
llvm_expr_t<A1> a1;
llvm_expr_t<A2> a2;
static_assert(llvm_value_t<T>::is_sint || llvm_value_t<T>::is_uint || llvm_value_t<T>::is_float, "llvm_sub<>: invalid type");
static constexpr auto opc = llvm_value_t<T>::is_float ? llvm::Instruction::FSub : llvm::Instruction::Sub;
llvm::Value* eval(llvm::IRBuilder<>* ir) const
{
const auto v1 = a1.eval(ir);
const auto v2 = a2.eval(ir);
return ir->CreateBinOp(opc, v1, v2);
}
llvm_match_tuple<A1, A2> match(llvm::Value*& value, llvm::Module* _m) const
{
llvm::Value* v1 = {};
llvm::Value* v2 = {};
if (auto i = llvm::dyn_cast_or_null<llvm::BinaryOperator>(value); i && i->getOpcode() == opc)
{
v1 = i->getOperand(0);
v2 = i->getOperand(1);
if (auto r1 = a1.match(v1, _m); v1)
{
if (auto r2 = a2.match(v2, _m); v2)
{
return std::tuple_cat(r1, r2);
}
}
}
value = nullptr;
return {};
}
};
template <typename T1, typename T2>
inline llvm_sub<T1, T2> operator -(T1&& a1, T2&& a2)
{
return {a1, a2};
}
template <typename T1>
inline llvm_sub<T1, llvm_const_int<typename is_llvm_expr<T1>::type>> operator -(T1&& a1, u64 c)
{
return {a1, {c}};
}
template <typename T1>
inline llvm_sub<llvm_const_int<typename is_llvm_expr<T1>::type>, T1> operator -(u64 c, T1&& a1)
{
return {{c}, a1};
}
template <typename A1, typename A2, typename T = llvm_common_t<A1, A2>>
struct llvm_mul
{
using type = T;
llvm_expr_t<A1> a1;
llvm_expr_t<A2> a2;
static_assert(llvm_value_t<T>::is_sint || llvm_value_t<T>::is_uint || llvm_value_t<T>::is_float, "llvm_mul<>: invalid type");
static constexpr auto opc = llvm_value_t<T>::is_float ? llvm::Instruction::FMul : llvm::Instruction::Mul;
llvm::Value* eval(llvm::IRBuilder<>* ir) const
{
const auto v1 = a1.eval(ir);
const auto v2 = a2.eval(ir);
return ir->CreateBinOp(opc, v1, v2);
}
llvm_match_tuple<A1, A2> match(llvm::Value*& value, llvm::Module* _m) const
{
llvm::Value* v1 = {};
llvm::Value* v2 = {};
if (auto i = llvm::dyn_cast_or_null<llvm::BinaryOperator>(value); i && i->getOpcode() == opc)
{
v1 = i->getOperand(0);
v2 = i->getOperand(1);
if (auto r1 = a1.match(v1, _m); v1)
{
if (auto r2 = a2.match(v2, _m); v2)
{
return std::tuple_cat(r1, r2);
}
}
v1 = i->getOperand(0);
v2 = i->getOperand(1);
// Argument order does not matter here, try when swapped
if (auto r1 = a1.match(v2, _m); v2)
{
if (auto r2 = a2.match(v1, _m); v1)
{
return std::tuple_cat(r1, r2);
}
}
}
value = nullptr;
return {};
}
};
template <typename T1, typename T2>
inline llvm_mul<T1, T2> operator *(T1&& a1, T2&& a2)
{
return {a1, a2};
}
template <typename A1, typename A2, typename T = llvm_common_t<A1, A2>>
struct llvm_div
{
using type = T;
llvm_expr_t<A1> a1;
llvm_expr_t<A2> a2;
static_assert(llvm_value_t<T>::is_sint || llvm_value_t<T>::is_uint || llvm_value_t<T>::is_float, "llvm_div<>: invalid type");
static constexpr auto opc =
llvm_value_t<T>::is_float ? llvm::Instruction::FDiv :
llvm_value_t<T>::is_uint ? llvm::Instruction::UDiv : llvm::Instruction::SDiv;
llvm::Value* eval(llvm::IRBuilder<>* ir) const
{
const auto v1 = a1.eval(ir);
const auto v2 = a2.eval(ir);
return ir->CreateBinOp(opc, v1, v2);
}
llvm_match_tuple<A1, A2> match(llvm::Value*& value, llvm::Module* _m) const
{
llvm::Value* v1 = {};
llvm::Value* v2 = {};
if (auto i = llvm::dyn_cast_or_null<llvm::BinaryOperator>(value); i && i->getOpcode() == opc)
{
v1 = i->getOperand(0);
v2 = i->getOperand(1);
if (auto r1 = a1.match(v1, _m); v1)
{
if (auto r2 = a2.match(v2, _m); v2)
{
return std::tuple_cat(r1, r2);
}
}
}
value = nullptr;
return {};
}
};
template <typename T1, typename T2>
inline llvm_div<T1, T2> operator /(T1&& a1, T2&& a2)
{
return {a1, a2};
}
inline llvm::Constant* getZeroValueForNegation(llvm::Type* Ty)
{
if (Ty->isFPOrFPVectorTy())
return llvm::ConstantFP::getNegativeZero(Ty);
return llvm::Constant::getNullValue(Ty);
}
template <typename A1, typename T = llvm_common_t<A1>>
struct llvm_neg
{
using type = T;
llvm_expr_t<A1> a1;
static_assert(llvm_value_t<T>::is_sint || llvm_value_t<T>::is_uint || llvm_value_t<T>::is_float, "llvm_neg<>: invalid type");
static constexpr int opc = llvm_value_t<T>::is_float ? +llvm::Instruction::FNeg : +llvm::Instruction::Sub;
llvm::Value* eval(llvm::IRBuilder<>* ir) const
{
const auto v1 = a1.eval(ir);
if constexpr (llvm_value_t<T>::is_int)
{
return ir->CreateNeg(v1);
}
if constexpr (llvm_value_t<T>::is_float)
{
return ir->CreateFNeg(v1);
}
// TODO: return value ?
}
llvm_match_tuple<A1> match(llvm::Value*& value, llvm::Module* _m) const
{
llvm::Value* v1 = {};
if constexpr (llvm_value_t<T>::is_float)
{
if (auto i = llvm::dyn_cast_or_null<llvm::UnaryOperator>(value); i && i->getOpcode() == opc)
{
v1 = i->getOperand(0);
if (auto r1 = a1.match(v1, _m); v1)
{
return r1;
}
}
}
if (auto i = llvm::dyn_cast_or_null<llvm::BinaryOperator>(value); i && i->getOpcode() == opc)
{
v1 = i->getOperand(1);
if (i->getOperand(0) == getZeroValueForNegation(v1->getType()))
{
if (auto r1 = a1.match(v1, _m); v1)
{
return r1;
}
}
}
value = nullptr;
return {};
}
};
template <typename T1>
inline llvm_neg<T1> operator -(T1 a1)
{
return {a1};
}
template <typename A1, typename A2, typename T = llvm_common_t<A1, A2>>
struct llvm_shl
{
using type = T;
llvm_expr_t<A1> a1;
llvm_expr_t<A2> a2;
static_assert(llvm_value_t<T>::is_sint || llvm_value_t<T>::is_uint, "llvm_shl<>: invalid type");
llvm::Value* eval(llvm::IRBuilder<>* ir) const
{
const auto v1 = a1.eval(ir);
const auto v2 = a2.eval(ir);
return ir->CreateShl(v1, v2);
}
llvm_match_tuple<A1, A2> match(llvm::Value*& value, llvm::Module* _m) const
{
llvm::Value* v1 = {};
llvm::Value* v2 = {};
if (auto i = llvm::dyn_cast_or_null<llvm::BinaryOperator>(value); i && i->getOpcode() == llvm::Instruction::Shl)
{
v1 = i->getOperand(0);
v2 = i->getOperand(1);
if (auto r1 = a1.match(v1, _m); v1)
{
if (auto r2 = a2.match(v2, _m); v2)
{
return std::tuple_cat(r1, r2);
}
}
}
value = nullptr;
return {};
}
};
template <typename T1, typename T2>
inline llvm_shl<T1, T2> operator <<(T1&& a1, T2&& a2)
{
return {a1, a2};
}
template <typename T1>
inline llvm_shl<T1, llvm_const_int<typename is_llvm_expr<T1>::type>> operator <<(T1&& a1, u64 c)
{
return {a1, {c}};
}
template <typename A1, typename A2, typename T = llvm_common_t<A1, A2>>
struct llvm_shr
{
using type = T;
llvm_expr_t<A1> a1;
llvm_expr_t<A2> a2;
static_assert(llvm_value_t<T>::is_sint || llvm_value_t<T>::is_uint, "llvm_shr<>: invalid type");
static constexpr auto opc = llvm_value_t<T>::is_uint ? llvm::Instruction::LShr : llvm::Instruction::AShr;
llvm::Value* eval(llvm::IRBuilder<>* ir) const
{
const auto v1 = a1.eval(ir);
const auto v2 = a2.eval(ir);
return ir->CreateBinOp(opc, v1, v2);
}
llvm_match_tuple<A1, A2> match(llvm::Value*& value, llvm::Module* _m) const
{
llvm::Value* v1 = {};
llvm::Value* v2 = {};
if (auto i = llvm::dyn_cast_or_null<llvm::BinaryOperator>(value); i && i->getOpcode() == opc)
{
v1 = i->getOperand(0);
v2 = i->getOperand(1);
if (auto r1 = a1.match(v1, _m); v1)
{
if (auto r2 = a2.match(v2, _m); v2)
{
return std::tuple_cat(r1, r2);
}
}
}
value = nullptr;
return {};
}
};
template <typename T1, typename T2>
inline llvm_shr<T1, T2> operator >>(T1&& a1, T2&& a2)
{
return {a1, a2};
}
template <typename T1>
inline llvm_shr<T1, llvm_const_int<typename is_llvm_expr<T1>::type>> operator >>(T1&& a1, u64 c)
{
return {a1, {c}};
}
template <typename A1, typename A2, typename A3, typename T = llvm_common_t<A1, A2, A3>>
struct llvm_fshl
{
using type = T;
llvm_expr_t<A1> a1;
llvm_expr_t<A2> a2;
llvm_expr_t<A3> a3;
static_assert(llvm_value_t<T>::is_sint || llvm_value_t<T>::is_uint, "llvm_fshl<>: invalid type");
static constexpr bool is_ok = llvm_value_t<T>::is_sint || llvm_value_t<T>::is_uint;
static llvm::Function* get_fshl(llvm::IRBuilder<>* ir)
{
const auto _module = ir->GetInsertBlock()->getParent()->getParent();
return llvm::Intrinsic::getDeclaration(_module, llvm::Intrinsic::fshl, {llvm_value_t<T>::get_type(ir->getContext())});
}
static llvm::Value* fold(llvm::IRBuilder<>* ir, llvm::Value* v1, llvm::Value* v2, llvm::Value* v3)
{
// Compute constant result.
const u64 size = v3->getType()->getScalarSizeInBits();
const auto val = ir->CreateURem(v3, llvm::ConstantInt::get(v3->getType(), size));
const auto shl = ir->CreateShl(v1, val);
const auto shr = ir->CreateLShr(v2, ir->CreateSub(llvm::ConstantInt::get(v3->getType(), size - 1), val));
return ir->CreateOr(shl, ir->CreateLShr(shr, 1));
}
llvm::Value* eval(llvm::IRBuilder<>* ir) const
{
const auto v1 = a1.eval(ir);
const auto v2 = a2.eval(ir);
const auto v3 = a3.eval(ir);
if (llvm::isa<llvm::Constant>(v1) && llvm::isa<llvm::Constant>(v2) && llvm::isa<llvm::Constant>(v3))
{
return fold(ir, v1, v2, v3);
}
return ir->CreateCall(get_fshl(ir), {v1, v2, v3});
}
llvm_match_tuple<A1, A2, A3> match(llvm::Value*& value, llvm::Module* _m) const
{
llvm::Value* v1 = {};
llvm::Value* v2 = {};
llvm::Value* v3 = {};
if (auto i = llvm::dyn_cast_or_null<llvm::CallInst>(value); i && i->getIntrinsicID() == llvm::Intrinsic::fshl)
{
v1 = i->getOperand(0);
v2 = i->getOperand(1);
v3 = i->getOperand(2);
if (auto r1 = a1.match(v1, _m); v1)
{
if (auto r2 = a2.match(v2, _m); v2)
{
if (auto r3 = a3.match(v3, _m); v3)
{
return std::tuple_cat(r1, r2, r3);
}
}
}
}
value = nullptr;
return {};
}
};
template <typename A1, typename A2, typename A3, typename T = llvm_common_t<A1, A2, A3>>
struct llvm_fshr
{
using type = T;
llvm_expr_t<A1> a1;
llvm_expr_t<A2> a2;
llvm_expr_t<A3> a3;
static_assert(llvm_value_t<T>::is_sint || llvm_value_t<T>::is_uint, "llvm_fshr<>: invalid type");
static constexpr bool is_ok = llvm_value_t<T>::is_sint || llvm_value_t<T>::is_uint;
static llvm::Function* get_fshr(llvm::IRBuilder<>* ir)
{
const auto _module = ir->GetInsertBlock()->getParent()->getParent();
return llvm::Intrinsic::getDeclaration(_module, llvm::Intrinsic::fshr, {llvm_value_t<T>::get_type(ir->getContext())});
}
static llvm::Value* fold(llvm::IRBuilder<>* ir, llvm::Value* v1, llvm::Value* v2, llvm::Value* v3)
{
// Compute constant result.
const u64 size = v3->getType()->getScalarSizeInBits();
const auto val = ir->CreateURem(v3, llvm::ConstantInt::get(v3->getType(), size));
const auto shr = ir->CreateLShr(v2, val);
const auto shl = ir->CreateShl(v1, ir->CreateSub(llvm::ConstantInt::get(v3->getType(), size - 1), val));
return ir->CreateOr(shr, ir->CreateShl(shl, 1));
}
llvm::Value* eval(llvm::IRBuilder<>* ir) const
{
const auto v1 = a1.eval(ir);
const auto v2 = a2.eval(ir);
const auto v3 = a3.eval(ir);
if (llvm::isa<llvm::Constant>(v1) && llvm::isa<llvm::Constant>(v2) && llvm::isa<llvm::Constant>(v3))
{
return fold(ir, v1, v2, v3);
}
return ir->CreateCall(get_fshr(ir), {v1, v2, v3});
}
llvm_match_tuple<A1, A2, A3> match(llvm::Value*& value, llvm::Module* _m) const
{
llvm::Value* v1 = {};
llvm::Value* v2 = {};
llvm::Value* v3 = {};
if (auto i = llvm::dyn_cast_or_null<llvm::CallInst>(value); i && i->getIntrinsicID() == llvm::Intrinsic::fshr)
{
v1 = i->getOperand(0);
v2 = i->getOperand(1);
v3 = i->getOperand(2);
if (auto r1 = a1.match(v1, _m); v1)
{
if (auto r2 = a2.match(v2, _m); v2)
{
if (auto r3 = a3.match(v3, _m); v3)
{
return std::tuple_cat(r1, r2, r3);
}
}
}
}
value = nullptr;
return {};
}
};
template <typename A1, typename A2, typename T = llvm_common_t<A1, A2>>
struct llvm_rol
{
using type = T;
llvm_expr_t<A1> a1;
llvm_expr_t<A2> a2;
static_assert(llvm_value_t<T>::is_sint || llvm_value_t<T>::is_uint, "llvm_rol<>: invalid type");
static constexpr bool is_ok = llvm_value_t<T>::is_sint || llvm_value_t<T>::is_uint;
llvm::Value* eval(llvm::IRBuilder<>* ir) const
{
const auto v1 = a1.eval(ir);
const auto v2 = a2.eval(ir);
if (llvm::isa<llvm::Constant>(v1) && llvm::isa<llvm::Constant>(v2))
{
return llvm_fshl<A1, A1, A2>::fold(ir, v1, v1, v2);
}
return ir->CreateCall(llvm_fshl<A1, A1, A2>::get_fshl(ir), {v1, v1, v2});
}
llvm_match_tuple<A1, A2> match(llvm::Value*& value, llvm::Module* _m) const
{
llvm::Value* v1 = {};
llvm::Value* v2 = {};
if (auto i = llvm::dyn_cast_or_null<llvm::CallInst>(value); i && i->getIntrinsicID() == llvm::Intrinsic::fshl)
{
v1 = i->getOperand(0);
v2 = i->getOperand(2);
if (i->getOperand(1) == v1)
{
if (auto r1 = a1.match(v1, _m); v1)
{
if (auto r2 = a2.match(v2, _m); v2)
{
return std::tuple_cat(r1, r2);
}
}
}
}
value = nullptr;
return {};
}
};
template <typename A1, typename A2, typename T = llvm_common_t<A1, A2>>
struct llvm_and
{
using type = T;
llvm_expr_t<A1> a1;
llvm_expr_t<A2> a2;
static_assert(llvm_value_t<T>::is_int, "llvm_and<>: invalid type");
llvm::Value* eval(llvm::IRBuilder<>* ir) const
{
const auto v1 = a1.eval(ir);
const auto v2 = a2.eval(ir);
return ir->CreateAnd(v1, v2);
}
llvm_match_tuple<A1, A2> match(llvm::Value*& value, llvm::Module* _m) const
{
llvm::Value* v1 = {};
llvm::Value* v2 = {};
if (auto i = llvm::dyn_cast_or_null<llvm::BinaryOperator>(value); i && i->getOpcode() == llvm::Instruction::And)
{
v1 = i->getOperand(0);
v2 = i->getOperand(1);
if (auto r1 = a1.match(v1, _m); v1)
{
if (auto r2 = a2.match(v2, _m); v2)
{
return std::tuple_cat(r1, r2);
}
}
}
value = nullptr;
return {};
}
};
template <typename T1, typename T2>
inline llvm_and<T1, T2> operator &(T1&& a1, T2&& a2)
{
return {a1, a2};
}
template <typename T1>
inline llvm_and<T1, llvm_const_int<typename is_llvm_expr<T1>::type>> operator &(T1&& a1, u64 c)
{
return {a1, {c}};
}
template <typename A1, typename A2, typename T = llvm_common_t<A1, A2>>
struct llvm_or
{
using type = T;
llvm_expr_t<A1> a1;
llvm_expr_t<A2> a2;
static_assert(llvm_value_t<T>::is_int, "llvm_or<>: invalid type");
llvm::Value* eval(llvm::IRBuilder<>* ir) const
{
const auto v1 = a1.eval(ir);
const auto v2 = a2.eval(ir);
return ir->CreateOr(v1, v2);
}
llvm_match_tuple<A1, A2> match(llvm::Value*& value, llvm::Module* _m) const
{
llvm::Value* v1 = {};
llvm::Value* v2 = {};
if (auto i = llvm::dyn_cast_or_null<llvm::BinaryOperator>(value); i && i->getOpcode() == llvm::Instruction::Or)
{
v1 = i->getOperand(0);
v2 = i->getOperand(1);
if (auto r1 = a1.match(v1, _m); v1)
{
if (auto r2 = a2.match(v2, _m); v2)
{
return std::tuple_cat(r1, r2);
}
}
}
value = nullptr;
return {};
}
};
template <typename T1, typename T2>
inline llvm_or<T1, T2> operator |(T1&& a1, T2&& a2)
{
return {a1, a2};
}
template <typename T1>
inline llvm_or<T1, llvm_const_int<typename is_llvm_expr<T1>::type>> operator |(T1&& a1, u64 c)
{
return {a1, {c}};
}
template <typename A1, typename A2, typename T = llvm_common_t<A1, A2>>
struct llvm_xor
{
using type = T;
llvm_expr_t<A1> a1;
llvm_expr_t<A2> a2;
static_assert(llvm_value_t<T>::is_int, "llvm_xor<>: invalid type");
llvm::Value* eval(llvm::IRBuilder<>* ir) const
{
const auto v1 = a1.eval(ir);
const auto v2 = a2.eval(ir);
return ir->CreateXor(v1, v2);
}
llvm_match_tuple<A1, A2> match(llvm::Value*& value, llvm::Module* _m) const
{
llvm::Value* v1 = {};
llvm::Value* v2 = {};
if (auto i = llvm::dyn_cast_or_null<llvm::BinaryOperator>(value); i && i->getOpcode() == llvm::Instruction::Xor)
{
v1 = i->getOperand(0);
v2 = i->getOperand(1);
if (auto r1 = a1.match(v1, _m); v1)
{
if (auto r2 = a2.match(v2, _m); v2)
{
return std::tuple_cat(r1, r2);
}
}
}
value = nullptr;
return {};
}
};
template <typename T1, typename T2>
inline llvm_xor<T1, T2> operator ^(T1&& a1, T2&& a2)
{
return {a1, a2};
}
template <typename T1>
inline llvm_xor<T1, llvm_const_int<typename is_llvm_expr<T1>::type>> operator ^(T1&& a1, u64 c)
{
return {a1, {c}};
}
template <typename T1>
inline llvm_xor<T1, llvm_const_int<typename is_llvm_expr<T1>::type, true>> operator ~(T1&& a1)
{
return {a1, {u64{umax}}};
}
template <typename A1, typename A2, llvm::CmpInst::Predicate UPred, typename T = llvm_common_t<A1, A2>>
struct llvm_cmp
{
using type = std::conditional_t<llvm_value_t<T>::is_vector != 0, bool[llvm_value_t<T>::is_vector], bool>;
static constexpr bool is_float = llvm_value_t<T>::is_float;
llvm_expr_t<A1> a1;
llvm_expr_t<A2> a2;
static_assert(llvm_value_t<T>::is_int || is_float, "llvm_cmp<>: invalid type");
// Convert unsigned comparison predicate to signed if necessary
static constexpr llvm::CmpInst::Predicate pred = llvm_value_t<T>::is_uint ? UPred :
UPred == llvm::ICmpInst::ICMP_UGT ? llvm::ICmpInst::ICMP_SGT :
UPred == llvm::ICmpInst::ICMP_UGE ? llvm::ICmpInst::ICMP_SGE :
UPred == llvm::ICmpInst::ICMP_ULT ? llvm::ICmpInst::ICMP_SLT :
UPred == llvm::ICmpInst::ICMP_ULE ? llvm::ICmpInst::ICMP_SLE : UPred;
static_assert(llvm_value_t<T>::is_sint || llvm_value_t<T>::is_uint || is_float || UPred == llvm::ICmpInst::ICMP_EQ || UPred == llvm::ICmpInst::ICMP_NE, "llvm_cmp<>: invalid operation on sign-undefined type");
llvm::Value* eval(llvm::IRBuilder<>* ir) const
{
static_assert(!is_float, "llvm_cmp<>: invalid operation (missing fcmp_ord or fcmp_uno)");
const auto v1 = a1.eval(ir);
const auto v2 = a2.eval(ir);
return ir->CreateICmp(pred, v1, v2);
}
llvm_match_tuple<A1, A2> match(llvm::Value*& value, llvm::Module* _m) const
{
llvm::Value* v1 = {};
llvm::Value* v2 = {};
if (auto i = llvm::dyn_cast_or_null<llvm::ICmpInst>(value); i && i->getPredicate() == pred)
{
v1 = i->getOperand(0);
v2 = i->getOperand(1);
if (auto r1 = a1.match(v1, _m); v1)
{
if (auto r2 = a2.match(v2, _m); v2)
{
return std::tuple_cat(r1, r2);
}
}
}
value = nullptr;
return {};
}
};
template <typename T>
struct is_llvm_cmp : std::bool_constant<false>
{
};
template <typename A1, typename A2, auto UPred, typename T>
struct is_llvm_cmp<llvm_cmp<A1, A2, UPred, T>> : std::bool_constant<true>
{
};
template <typename Cmp, typename T = llvm_common_t<Cmp>>
struct llvm_ord
{
using base = std::decay_t<Cmp>;
using type = typename base::type;
llvm_expr_t<Cmp> cmp;
// Convert comparison predicate to ordered
static constexpr llvm::CmpInst::Predicate pred =
base::pred == llvm::ICmpInst::ICMP_EQ ? llvm::ICmpInst::FCMP_OEQ :
base::pred == llvm::ICmpInst::ICMP_NE ? llvm::ICmpInst::FCMP_ONE :
base::pred == llvm::ICmpInst::ICMP_SGT ? llvm::ICmpInst::FCMP_OGT :
base::pred == llvm::ICmpInst::ICMP_SGE ? llvm::ICmpInst::FCMP_OGE :
base::pred == llvm::ICmpInst::ICMP_SLT ? llvm::ICmpInst::FCMP_OLT :
base::pred == llvm::ICmpInst::ICMP_SLE ? llvm::ICmpInst::FCMP_OLE : base::pred;
static_assert(base::is_float, "llvm_ord<>: invalid type");
llvm::Value* eval(llvm::IRBuilder<>* ir) const
{
const auto v1 = cmp.a1.eval(ir);
const auto v2 = cmp.a2.eval(ir);
return ir->CreateFCmp(pred, v1, v2);
}
llvm_match_tuple<Cmp> match(llvm::Value*& value, llvm::Module* _m) const
{
llvm::Value* v1 = {};
llvm::Value* v2 = {};
if (auto i = llvm::dyn_cast_or_null<llvm::FCmpInst>(value); i && i->getPredicate() == pred)
{
v1 = i->getOperand(0);
v2 = i->getOperand(1);
if (auto r1 = cmp.a1.match(v1, _m); v1)
{
if (auto r2 = cmp.a2.match(v2, _m); v2)
{
return std::tuple_cat(r1, r2);
}
}
}
value = nullptr;
return {};
}
};
template <typename T>
llvm_ord(T&&) -> llvm_ord<std::enable_if_t<is_llvm_cmp<std::decay_t<T>>::value, T&&>>;
template <typename Cmp, typename T = llvm_common_t<Cmp>>
struct llvm_uno
{
using base = std::decay_t<Cmp>;
using type = typename base::type;
llvm_expr_t<Cmp> cmp;
// Convert comparison predicate to unordered
static constexpr llvm::CmpInst::Predicate pred =
base::pred == llvm::ICmpInst::ICMP_EQ ? llvm::ICmpInst::FCMP_UEQ :
base::pred == llvm::ICmpInst::ICMP_NE ? llvm::ICmpInst::FCMP_UNE :
base::pred == llvm::ICmpInst::ICMP_SGT ? llvm::ICmpInst::FCMP_UGT :
base::pred == llvm::ICmpInst::ICMP_SGE ? llvm::ICmpInst::FCMP_UGE :
base::pred == llvm::ICmpInst::ICMP_SLT ? llvm::ICmpInst::FCMP_ULT :
base::pred == llvm::ICmpInst::ICMP_SLE ? llvm::ICmpInst::FCMP_ULE : base::pred;
static_assert(base::is_float, "llvm_uno<>: invalid type");
llvm::Value* eval(llvm::IRBuilder<>* ir) const
{
const auto v1 = cmp.a1.eval(ir);
const auto v2 = cmp.a2.eval(ir);
return ir->CreateFCmp(pred, v1, v2);
}
llvm_match_tuple<Cmp> match(llvm::Value*& value, llvm::Module* _m) const
{
llvm::Value* v1 = {};
llvm::Value* v2 = {};
if (auto i = llvm::dyn_cast_or_null<llvm::FCmpInst>(value); i && i->getPredicate() == pred)
{
v1 = i->getOperand(0);
v2 = i->getOperand(1);
if (auto r1 = cmp.a1.match(v1, _m); v1)
{
if (auto r2 = cmp.a2.match(v2, _m); v2)
{
return std::tuple_cat(r1, r2);
}
}
}
value = nullptr;
return {};
}
};
template <typename T>
llvm_uno(T&&) -> llvm_uno<std::enable_if_t<is_llvm_cmp<std::decay_t<T>>::value, T&&>>;
template <typename T1, typename T2>
inline llvm_cmp<T1, T2, llvm::ICmpInst::ICMP_EQ> operator ==(T1&& a1, T2&& a2)
{
return {a1, a2};
}
template <typename T1>
inline llvm_cmp<T1, llvm_const_int<typename is_llvm_expr<T1>::type>, llvm::ICmpInst::ICMP_EQ> operator ==(T1&& a1, u64 c)
{
return {a1, {c}};
}
template <typename T1, typename T2>
inline llvm_cmp<T1, T2, llvm::ICmpInst::ICMP_NE> operator !=(T1&& a1, T2&& a2)
{
return {a1, a2};
}
template <typename T1>
inline llvm_cmp<T1, llvm_const_int<typename is_llvm_expr<T1>::type>, llvm::ICmpInst::ICMP_NE> operator !=(T1&& a1, u64 c)
{
return {a1, {c}};
}
template <typename T1, typename T2>
inline llvm_cmp<T1, T2, llvm::ICmpInst::ICMP_UGT> operator >(T1&& a1, T2&& a2)
{
return {a1, a2};
}
template <typename T1>
inline llvm_cmp<T1, llvm_const_int<typename is_llvm_expr<T1>::type>, llvm::ICmpInst::ICMP_UGT> operator >(T1&& a1, u64 c)
{
return {a1, {c}};
}
template <typename T1, typename T2>
inline llvm_cmp<T1, T2, llvm::ICmpInst::ICMP_UGE> operator >=(T1&& a1, T2&& a2)
{
return {a1, a2};
}
template <typename T1>
inline llvm_cmp<T1, llvm_const_int<typename is_llvm_expr<T1>::type>, llvm::ICmpInst::ICMP_UGE> operator >=(T1&& a1, u64 c)
{
return {a1, {c}};
}
template <typename T1, typename T2>
inline llvm_cmp<T1, T2, llvm::ICmpInst::ICMP_ULT> operator <(T1&& a1, T2&& a2)
{
return {a1, a2};
}
template <typename T1>
inline llvm_cmp<T1, llvm_const_int<typename is_llvm_expr<T1>::type>, llvm::ICmpInst::ICMP_ULT> operator <(T1&& a1, u64 c)
{
return {a1, {c}};
}
template <typename T1, typename T2>
inline llvm_cmp<T1, T2, llvm::ICmpInst::ICMP_ULE> operator <=(T1&& a1, T2&& a2)
{
return {a1, a2};
}
template <typename T1>
inline llvm_cmp<T1, llvm_const_int<typename is_llvm_expr<T1>::type>, llvm::ICmpInst::ICMP_ULE> operator <=(T1&& a1, u64 c)
{
return {a1, {c}};
}
template <typename U, typename A1, typename T = llvm_common_t<A1>>
struct llvm_noncast
{
using type = U;
llvm_expr_t<A1> a1;
static_assert(llvm_value_t<T>::is_int, "llvm_noncast<>: invalid type");
static_assert(llvm_value_t<U>::is_int, "llvm_noncast<>: invalid result type");
static_assert(llvm_value_t<T>::esize == llvm_value_t<U>::esize, "llvm_noncast<>: result is resized");
static_assert(llvm_value_t<T>::is_vector == llvm_value_t<U>::is_vector, "llvm_noncast<>: vector element mismatch");
static constexpr bool is_ok =
llvm_value_t<T>::is_int &&
llvm_value_t<U>::is_int &&
llvm_value_t<T>::esize == llvm_value_t<U>::esize &&
llvm_value_t<T>::is_vector == llvm_value_t<U>::is_vector;
llvm::Value* eval(llvm::IRBuilder<>* ir) const
{
// No operation required
return a1.eval(ir);
}
llvm_match_tuple<A1> match(llvm::Value*& value, llvm::Module* _m) const
{
if (value)
{
if (auto r1 = a1.match(value, _m); value)
{
return r1;
}
}
value = nullptr;
return {};
}
};
template <typename U, typename A1, typename T = llvm_common_t<A1>>
struct llvm_bitcast
{
using type = U;
llvm_expr_t<A1> a1;
llvm::Module* _module;
static constexpr uint bitsize0 = llvm_value_t<T>::is_vector ? llvm_value_t<T>::is_vector * llvm_value_t<T>::esize : llvm_value_t<T>::esize;
static constexpr uint bitsize1 = llvm_value_t<U>::is_vector ? llvm_value_t<U>::is_vector * llvm_value_t<U>::esize : llvm_value_t<U>::esize;
static_assert(bitsize0 == bitsize1, "llvm_bitcast<>: invalid type (size mismatch)");
static_assert(llvm_value_t<T>::is_int || llvm_value_t<T>::is_float, "llvm_bitcast<>: invalid type");
static_assert(llvm_value_t<U>::is_int || llvm_value_t<U>::is_float, "llvm_bitcast<>: invalid result type");
static constexpr bool is_ok =
bitsize0 && bitsize0 == bitsize1 &&
(llvm_value_t<T>::is_int || llvm_value_t<T>::is_float) &&
(llvm_value_t<U>::is_int || llvm_value_t<U>::is_float);
llvm::Value* eval(llvm::IRBuilder<>* ir) const
{
const auto v1 = a1.eval(ir);
const auto rt = llvm_value_t<U>::get_type(ir->getContext());
if constexpr (llvm_value_t<T>::is_int == llvm_value_t<U>::is_int && llvm_value_t<T>::is_vector == llvm_value_t<U>::is_vector)
{
return v1;
}
if (const auto c1 = llvm::dyn_cast<llvm::Constant>(v1))
{
const auto result = llvm::ConstantFoldCastOperand(llvm::Instruction::BitCast, c1, rt, ir->GetInsertBlock()->getParent()->getParent()->getDataLayout());
if (result)
{
return result;
}
}
return ir->CreateBitCast(v1, rt);
}
llvm_match_tuple<A1> match(llvm::Value*& value, llvm::Module* _m) const
{
if constexpr (llvm_value_t<T>::is_int == llvm_value_t<U>::is_int && llvm_value_t<T>::is_vector == llvm_value_t<U>::is_vector)
{
if (value)
{
if (auto r1 = a1.match(value, _m); value)
{
return r1;
}
}
return {};
}
llvm::Value* v1 = {};
if (auto i = llvm::dyn_cast_or_null<llvm::CastInst>(value); i && i->getOpcode() == llvm::Instruction::BitCast)
{
v1 = i->getOperand(0);
if (llvm_value_t<U>::get_type(v1->getContext()) == i->getDestTy())
{
if (auto r1 = a1.match(v1, _m); v1)
{
return r1;
}
}
}
if (auto c = llvm::dyn_cast_or_null<llvm::Constant>(value))
{
const auto target = llvm_value_t<T>::get_type(c->getContext());
// Reverse bitcast on a constant
if (llvm::Value* cv = llvm::ConstantFoldCastOperand(llvm::Instruction::BitCast, c, target, _m->getDataLayout()))
{
if (auto r1 = a1.match(cv, _m); cv)
{
return r1;
}
}
}
value = nullptr;
return {};
}
};
template <typename U, typename A1, typename T = llvm_common_t<A1>>
struct llvm_fpcast
{
using type = U;
static constexpr auto opc =
llvm_value_t<T>::is_sint ? llvm::Instruction::SIToFP :
llvm_value_t<U>::is_sint ? llvm::Instruction::FPToSI :
llvm_value_t<T>::is_int ? llvm::Instruction::UIToFP :
llvm_value_t<U>::is_int ? llvm::Instruction::FPToUI :
llvm_value_t<T>::esize > llvm_value_t<U>::esize ? llvm::Instruction::FPTrunc :
llvm_value_t<T>::esize < llvm_value_t<U>::esize ? llvm::Instruction::FPExt : llvm::Instruction::BitCast;
llvm_expr_t<A1> a1;
static_assert(llvm_value_t<T>::is_float || llvm_value_t<U>::is_float, "llvm_fpcast<>: invalid type(s)");
static_assert(opc != llvm::Instruction::BitCast, "llvm_fpcast<>: possible cast to the same type");
static_assert(llvm_value_t<T>::is_vector == llvm_value_t<U>::is_vector, "llvm_fpcast<>: vector element mismatch");
static constexpr bool is_ok =
(llvm_value_t<T>::is_float || llvm_value_t<U>::is_float) && opc != llvm::Instruction::BitCast &&
llvm_value_t<T>::is_vector == llvm_value_t<U>::is_vector;
llvm::Value* eval(llvm::IRBuilder<>* ir) const
{
return ir->CreateCast(opc, a1.eval(ir), llvm_value_t<U>::get_type(ir->getContext()));
}
llvm_match_tuple<A1> match(llvm::Value*& value, llvm::Module* _m) const
{
llvm::Value* v1 = {};
if (auto i = llvm::dyn_cast_or_null<llvm::CastInst>(value); i && i->getOpcode() == opc)
{
v1 = i->getOperand(0);
if (llvm_value_t<U>::get_type(v1->getContext()) == i->getDestTy())
{
if (auto r1 = a1.match(v1, _m); v1)
{
return r1;
}
}
}
value = nullptr;
return {};
}
};
template <typename U, typename A1, typename T = llvm_common_t<A1>>
struct llvm_trunc
{
using type = U;
llvm_expr_t<A1> a1;
static_assert(llvm_value_t<T>::is_int, "llvm_trunc<>: invalid type");
static_assert(llvm_value_t<U>::is_int, "llvm_trunc<>: invalid result type");
static_assert(llvm_value_t<T>::esize > llvm_value_t<U>::esize, "llvm_trunc<>: result is not truncated");
static_assert(llvm_value_t<T>::is_vector == llvm_value_t<U>::is_vector, "llvm_trunc<>: vector element mismatch");
static constexpr bool is_ok =
llvm_value_t<T>::is_int &&
llvm_value_t<U>::is_int &&
llvm_value_t<T>::esize > llvm_value_t<U>::esize &&
llvm_value_t<T>::is_vector == llvm_value_t<U>::is_vector;
llvm::Value* eval(llvm::IRBuilder<>* ir) const
{
return ir->CreateTrunc(a1.eval(ir), llvm_value_t<U>::get_type(ir->getContext()));
}
llvm_match_tuple<A1> match(llvm::Value*& value, llvm::Module* _m) const
{
llvm::Value* v1 = {};
if (auto i = llvm::dyn_cast_or_null<llvm::CastInst>(value); i && i->getOpcode() == llvm::Instruction::Trunc)
{
v1 = i->getOperand(0);
if (llvm_value_t<U>::get_type(v1->getContext()) == i->getDestTy())
{
if (auto r1 = a1.match(v1, _m); v1)
{
return r1;
}
}
}
value = nullptr;
return {};
}
};
template <typename U, typename A1, typename T = llvm_common_t<A1>>
struct llvm_sext
{
using type = U;
llvm_expr_t<A1> a1;
static_assert(llvm_value_t<T>::is_int, "llvm_sext<>: invalid type");
static_assert(llvm_value_t<U>::is_sint, "llvm_sext<>: invalid result type");
static_assert(llvm_value_t<T>::esize < llvm_value_t<U>::esize, "llvm_sext<>: result is not extended");
static_assert(llvm_value_t<T>::is_vector == llvm_value_t<U>::is_vector, "llvm_sext<>: vector element mismatch");
static constexpr bool is_ok =
llvm_value_t<T>::is_int &&
llvm_value_t<U>::is_sint &&
llvm_value_t<T>::esize < llvm_value_t<U>::esize &&
llvm_value_t<T>::is_vector == llvm_value_t<U>::is_vector;
llvm::Value* eval(llvm::IRBuilder<>* ir) const
{
return ir->CreateSExt(a1.eval(ir), llvm_value_t<U>::get_type(ir->getContext()));
}
llvm_match_tuple<A1> match(llvm::Value*& value, llvm::Module* _m) const
{
llvm::Value* v1 = {};
if (auto i = llvm::dyn_cast_or_null<llvm::CastInst>(value); i && i->getOpcode() == llvm::Instruction::SExt)
{
v1 = i->getOperand(0);
if (llvm_value_t<U>::get_type(v1->getContext()) == i->getDestTy())
{
if (auto r1 = a1.match(v1, _m); v1)
{
return r1;
}
}
}
value = nullptr;
return {};
}
};
template <typename U, typename A1, typename T = llvm_common_t<A1>>
struct llvm_zext
{
using type = U;
llvm_expr_t<A1> a1;
static_assert(llvm_value_t<T>::is_int, "llvm_zext<>: invalid type");
static_assert(llvm_value_t<U>::is_uint, "llvm_zext<>: invalid result type");
static_assert(llvm_value_t<T>::esize < llvm_value_t<U>::esize, "llvm_zext<>: result is not extended");
static_assert(llvm_value_t<T>::is_vector == llvm_value_t<U>::is_vector, "llvm_zext<>: vector element mismatch");
static constexpr bool is_ok =
llvm_value_t<T>::is_int &&
llvm_value_t<U>::is_uint &&
llvm_value_t<T>::esize < llvm_value_t<U>::esize &&
llvm_value_t<T>::is_vector == llvm_value_t<U>::is_vector;
llvm::Value* eval(llvm::IRBuilder<>* ir) const
{
return ir->CreateZExt(a1.eval(ir), llvm_value_t<U>::get_type(ir->getContext()));
}
llvm_match_tuple<A1> match(llvm::Value*& value, llvm::Module* _m) const
{
llvm::Value* v1 = {};
if (auto i = llvm::dyn_cast_or_null<llvm::CastInst>(value); i && i->getOpcode() == llvm::Instruction::ZExt)
{
v1 = i->getOperand(0);
if (llvm_value_t<U>::get_type(v1->getContext()) == i->getDestTy())
{
if (auto r1 = a1.match(v1, _m); v1)
{
return r1;
}
}
}
value = nullptr;
return {};
}
};
template <typename A1, typename A2, typename A3, typename T = llvm_common_t<A2, A3>, typename U = llvm_common_t<A1>>
struct llvm_select
{
using type = T;
llvm_expr_t<A1> cond;
llvm_expr_t<A2> a2;
llvm_expr_t<A3> a3;
static_assert(llvm_value_t<U>::esize == 1 && llvm_value_t<U>::is_int, "llvm_select<>: invalid condition type (bool expected)");
static_assert(llvm_value_t<T>::is_vector == llvm_value_t<U>::is_vector, "llvm_select<>: vector element mismatch");
static constexpr bool is_ok =
llvm_value_t<U>::esize == 1 && llvm_value_t<U>::is_int &&
llvm_value_t<T>::is_vector == llvm_value_t<U>::is_vector;
llvm::Value* eval(llvm::IRBuilder<>* ir) const
{
return ir->CreateSelect(cond.eval(ir), a2.eval(ir), a3.eval(ir));
}
llvm_match_tuple<A1, A2, A3> match(llvm::Value*& value, llvm::Module* _m) const
{
llvm::Value* v1 = {};
llvm::Value* v2 = {};
llvm::Value* v3 = {};
if (auto i = llvm::dyn_cast_or_null<llvm::SelectInst>(value))
{
v1 = i->getOperand(0);
v2 = i->getOperand(1);
v3 = i->getOperand(2);
if (auto r1 = cond.match(v1, _m); v1)
{
if (auto r2 = a2.match(v2, _m); v2)
{
if (auto r3 = a3.match(v3, _m); v3)
{
return std::tuple_cat(r1, r2, r3);
}
}
}
}
value = nullptr;
return {};
}
};
template <typename A1, typename A2, typename T = llvm_common_t<A1, A2>>
struct llvm_min
{
using type = T;
llvm_expr_t<A1> a1;
llvm_expr_t<A2> a2;
static_assert(llvm_value_t<T>::is_sint || llvm_value_t<T>::is_uint, "llvm_min<>: invalid type");
static constexpr bool is_ok = llvm_value_t<T>::is_sint || llvm_value_t<T>::is_uint;
static constexpr auto pred = llvm_value_t<T>::is_sint ? llvm::ICmpInst::ICMP_SLT : llvm::ICmpInst::ICMP_ULT;
llvm::Value* eval(llvm::IRBuilder<>* ir) const
{
const auto v1 = a1.eval(ir);
const auto v2 = a2.eval(ir);
return ir->CreateSelect(ir->CreateICmp(pred, v1, v2), v1, v2);
}
llvm_match_tuple<A1, A2> match(llvm::Value*& value, llvm::Module* _m) const
{
llvm::Value* v1 = {};
llvm::Value* v2 = {};
if (auto i = llvm::dyn_cast_or_null<llvm::SelectInst>(value))
{
v1 = i->getOperand(1);
v2 = i->getOperand(2);
if (auto j = llvm::dyn_cast<llvm::ICmpInst>(i->getOperand(0)); j && j->getPredicate() == pred)
{
if (v1 == j->getOperand(0) && v2 == j->getOperand(1))
{
if (auto r1 = a1.match(v1, _m); v1)
{
if (auto r2 = a2.match(v2, _m); v2)
{
return std::tuple_cat(r1, r2);
}
}
}
}
}
value = nullptr;
return {};
}
};
template <typename A1, typename A2, typename T = llvm_common_t<A1, A2>>
struct llvm_max
{
using type = T;
llvm_expr_t<A1> a1;
llvm_expr_t<A2> a2;
static_assert(llvm_value_t<T>::is_sint || llvm_value_t<T>::is_uint, "llvm_max<>: invalid type");
static constexpr auto pred = llvm_value_t<T>::is_sint ? llvm::ICmpInst::ICMP_SLT : llvm::ICmpInst::ICMP_ULT;
llvm::Value* eval(llvm::IRBuilder<>* ir) const
{
const auto v1 = a1.eval(ir);
const auto v2 = a2.eval(ir);
return ir->CreateSelect(ir->CreateICmp(pred, v1, v2), v2, v1);
}
llvm_match_tuple<A1, A2> match(llvm::Value*& value, llvm::Module* _m) const
{
llvm::Value* v1 = {};
llvm::Value* v2 = {};
if (auto i = llvm::dyn_cast_or_null<llvm::SelectInst>(value))
{
v1 = i->getOperand(2);
v2 = i->getOperand(1);
if (auto j = llvm::dyn_cast<llvm::ICmpInst>(i->getOperand(0)); j && j->getPredicate() == pred)
{
if (v1 == j->getOperand(0) && v2 == j->getOperand(1))
{
if (auto r1 = a1.match(v1, _m); v1)
{
if (auto r2 = a2.match(v2, _m); v2)
{
return std::tuple_cat(r1, r2);
}
}
}
}
}
value = nullptr;
return {};
}
};
template <typename A1, typename A2, typename T = llvm_common_t<A1, A2>>
struct llvm_add_sat
{
using type = T;
llvm_expr_t<A1> a1;
llvm_expr_t<A2> a2;
static_assert(llvm_value_t<T>::is_sint || llvm_value_t<T>::is_uint, "llvm_add_sat<>: invalid type");
static constexpr bool is_ok = llvm_value_t<T>::is_sint || llvm_value_t<T>::is_uint;
static constexpr auto intr = llvm_value_t<T>::is_sint ? llvm::Intrinsic::sadd_sat : llvm::Intrinsic::uadd_sat;
static llvm::Function* get_add_sat(llvm::IRBuilder<>* ir)
{
const auto _module = ir->GetInsertBlock()->getParent()->getParent();
return llvm::Intrinsic::getDeclaration(_module, intr, {llvm_value_t<T>::get_type(ir->getContext())});
}
llvm::Value* eval(llvm::IRBuilder<>* ir) const
{
const auto v1 = a1.eval(ir);
const auto v2 = a2.eval(ir);
if (llvm::isa<llvm::Constant>(v1) && llvm::isa<llvm::Constant>(v2))
{
const auto sum = ir->CreateAdd(v1, v2);
if constexpr (llvm_value_t<T>::is_sint)
{
const auto max = llvm::ConstantInt::get(v1->getType(), llvm::APInt::getSignedMaxValue(llvm_value_t<T>::esize));
const auto sat = ir->CreateXor(ir->CreateAShr(v1, llvm_value_t<T>::esize - 1), max); // Max -> min if v1 < 0
const auto ovf = ir->CreateAnd(ir->CreateXor(v2, sum), ir->CreateNot(ir->CreateXor(v1, v2))); // Get overflow
return ir->CreateSelect(ir->CreateICmpSLT(ovf, llvm::ConstantInt::get(v1->getType(), 0)), sat, sum);
}
if constexpr (llvm_value_t<T>::is_uint)
{
const auto max = llvm::ConstantInt::get(v1->getType(), llvm::APInt::getMaxValue(llvm_value_t<T>::esize));
return ir->CreateSelect(ir->CreateICmpULT(sum, v1), max, sum);
}
}
return ir->CreateCall(get_add_sat(ir), {v1, v2});
}
llvm_match_tuple<A1, A2> match(llvm::Value*& value, llvm::Module* _m) const
{
llvm::Value* v1 = {};
llvm::Value* v2 = {};
if (auto i = llvm::dyn_cast_or_null<llvm::CallInst>(value); i && i->getIntrinsicID() == intr)
{
v1 = i->getOperand(0);
v2 = i->getOperand(1);
if (auto r1 = a1.match(v1, _m); v1)
{
if (auto r2 = a2.match(v2, _m); v2)
{
return std::tuple_cat(r1, r2);
}
}
v1 = i->getOperand(0);
v2 = i->getOperand(1);
// Argument order does not matter here, try when swapped
if (auto r1 = a1.match(v2, _m); v2)
{
if (auto r2 = a2.match(v1, _m); v1)
{
return std::tuple_cat(r1, r2);
}
}
}
value = nullptr;
return {};
}
};
template <typename A1, typename A2, typename T = llvm_common_t<A1, A2>>
struct llvm_sub_sat
{
using type = T;
llvm_expr_t<A1> a1;
llvm_expr_t<A2> a2;
static_assert(llvm_value_t<T>::is_sint || llvm_value_t<T>::is_uint, "llvm_sub_sat<>: invalid type");
static constexpr bool is_ok = llvm_value_t<T>::is_sint || llvm_value_t<T>::is_uint;
static constexpr auto intr = llvm_value_t<T>::is_sint ? llvm::Intrinsic::ssub_sat : llvm::Intrinsic::usub_sat;
static llvm::Function* get_sub_sat(llvm::IRBuilder<>* ir)
{
const auto _module = ir->GetInsertBlock()->getParent()->getParent();
return llvm::Intrinsic::getDeclaration(_module, intr, {llvm_value_t<T>::get_type(ir->getContext())});
}
llvm::Value* eval(llvm::IRBuilder<>* ir) const
{
const auto v1 = a1.eval(ir);
const auto v2 = a2.eval(ir);
if (llvm::isa<llvm::Constant>(v1) && llvm::isa<llvm::Constant>(v2))
{
const auto dif = ir->CreateSub(v1, v2);
if constexpr (llvm_value_t<T>::is_sint)
{
const auto max = llvm::ConstantInt::get(v1->getType(), llvm::APInt::getSignedMaxValue(llvm_value_t<T>::esize));
const auto sat = ir->CreateXor(ir->CreateAShr(v1, llvm_value_t<T>::esize - 1), max); // Max -> min if v1 < 0
const auto ovf = ir->CreateAnd(ir->CreateXor(v1, dif), ir->CreateXor(v1, v2)); // Get overflow (subtraction)
return ir->CreateSelect(ir->CreateICmpSLT(ovf, llvm::ConstantInt::get(v1->getType(), 0)), sat, dif);
}
if constexpr (llvm_value_t<T>::is_uint)
{
return ir->CreateSelect(ir->CreateICmpULT(v1, v2), llvm::ConstantInt::get(v1->getType(), 0), dif);
}
}
return ir->CreateCall(get_sub_sat(ir), {v1, v2});
}
llvm_match_tuple<A1, A2> match(llvm::Value*& value, llvm::Module* _m) const
{
llvm::Value* v1 = {};
llvm::Value* v2 = {};
if (auto i = llvm::dyn_cast_or_null<llvm::CallInst>(value); i && i->getIntrinsicID() == intr)
{
v1 = i->getOperand(0);
v2 = i->getOperand(1);
if (auto r1 = a1.match(v1, _m); v1)
{
if (auto r2 = a2.match(v2, _m); v2)
{
return std::tuple_cat(r1, r2);
}
}
}
value = nullptr;
return {};
}
};
template <typename A1, typename I2, typename T = llvm_common_t<A1>, typename U = llvm_common_t<I2>>
struct llvm_extract
{
using type = std::remove_extent_t<T>;
llvm_expr_t<A1> a1;
llvm_expr_t<I2> i2;
static_assert(llvm_value_t<T>::is_vector, "llvm_extract<>: invalid type");
static_assert(llvm_value_t<U>::is_int && !llvm_value_t<U>::is_vector, "llvm_extract<>: invalid index type");
static constexpr bool is_ok = llvm_value_t<T>::is_vector &&
llvm_value_t<U>::is_int && !llvm_value_t<U>::is_vector;
llvm::Value* eval(llvm::IRBuilder<>* ir) const
{
const auto v1 = a1.eval(ir);
const auto v2 = i2.eval(ir);
return ir->CreateExtractElement(v1, v2);
}
llvm_match_tuple<A1, I2> match(llvm::Value*& value, llvm::Module* _m) const
{
llvm::Value* v1 = {};
llvm::Value* v2 = {};
if (auto i = llvm::dyn_cast_or_null<llvm::ExtractElementInst>(value))
{
v1 = i->getOperand(0);
v2 = i->getOperand(1);
if (auto r1 = a1.match(v1, _m); v1)
{
if (auto r2 = i2.match(v2, _m); v2)
{
return std::tuple_cat(r1, r2);
}
}
}
value = nullptr;
return {};
}
};
template <typename A1, typename I2, typename A3, typename T = llvm_common_t<A1>, typename U = llvm_common_t<I2>, typename V = llvm_common_t<A3>>
struct llvm_insert
{
using type = T;
llvm_expr_t<A1> a1;
llvm_expr_t<I2> i2;
llvm_expr_t<A3> a3;
static_assert(llvm_value_t<T>::is_vector, "llvm_insert<>: invalid type");
static_assert(llvm_value_t<U>::is_int && !llvm_value_t<U>::is_vector, "llvm_insert<>: invalid index type");
static_assert(std::is_same_v<V, std::remove_extent_t<T>>, "llvm_insert<>: invalid element type");
static constexpr bool is_ok = llvm_extract<A1, I2>::is_ok &&
std::is_same_v<V, std::remove_extent_t<T>>;
llvm::Value* eval(llvm::IRBuilder<>* ir) const
{
const auto v1 = a1.eval(ir);
const auto v2 = i2.eval(ir);
const auto v3 = a3.eval(ir);
return ir->CreateInsertElement(v1, v3, v2);
}
llvm_match_tuple<A1, I2, A3> match(llvm::Value*& value, llvm::Module* _m) const
{
llvm::Value* v1 = {};
llvm::Value* v2 = {};
llvm::Value* v3 = {};
if (auto i = llvm::dyn_cast_or_null<llvm::InsertElementInst>(value))
{
v1 = i->getOperand(0);
v2 = i->getOperand(2);
v3 = i->getOperand(1);
if (auto r1 = a1.match(v1, _m); v1)
{
if (auto r2 = i2.match(v2, _m); v2)
{
if (auto r3 = a3.match(v3, _m); v3)
{
return std::tuple_cat(r1, r2, r3);
}
}
}
}
value = nullptr;
return {};
}
};
template <typename U, typename A1, typename T = llvm_common_t<A1>>
struct llvm_splat
{
using type = U;
llvm_expr_t<A1> a1;
static_assert(!llvm_value_t<T>::is_vector, "llvm_splat<>: invalid type");
static_assert(llvm_value_t<U>::is_vector, "llvm_splat<>: invalid result type");
static_assert(std::is_same_v<T, std::remove_extent_t<U>>, "llvm_splat<>: incompatible splat type");
static constexpr bool is_ok =
!llvm_value_t<T>::is_vector &&
llvm_value_t<U>::is_vector &&
std::is_same_v<T, std::remove_extent_t<U>>;
llvm::Value* eval(llvm::IRBuilder<>* ir) const
{
const auto v1 = a1.eval(ir);
return ir->CreateVectorSplat(llvm_value_t<U>::is_vector, v1);
}
llvm_match_tuple<A1> match(llvm::Value*& value, llvm::Module* _m) const
{
llvm::Value* v1 = {};
if (auto i = llvm::dyn_cast_or_null<llvm::ShuffleVectorInst>(value))
{
if (llvm::isa<llvm::ConstantAggregateZero>(i->getOperand(1)) || llvm::isa<llvm::UndefValue>(i->getOperand(1)))
{
static constexpr int zero_array[llvm_value_t<U>::is_vector]{};
if (auto j = llvm::dyn_cast<llvm::InsertElementInst>(i->getOperand(0)); j && i->getShuffleMask().equals(zero_array))
{
if (llvm::cast<llvm::ConstantInt>(j->getOperand(2))->isZero())
{
v1 = j->getOperand(1);
if (auto r1 = a1.match(v1, _m); v1)
{
return r1;
}
}
}
}
}
value = nullptr;
return {};
}
};
template <uint N, typename A1, typename T = llvm_common_t<A1>>
struct llvm_zshuffle
{
using type = std::remove_extent_t<T>[N];
llvm_expr_t<A1> a1;
int index_array[N];
static_assert(llvm_value_t<T>::is_vector, "llvm_zshuffle<>: invalid type");
static constexpr bool is_ok = llvm_value_t<T>::is_vector && 1;
llvm::Value* eval(llvm::IRBuilder<>* ir) const
{
const auto v1 = a1.eval(ir);
return ir->CreateShuffleVector(v1, llvm::ConstantAggregateZero::get(v1->getType()), index_array);
}
llvm_match_tuple<A1> match(llvm::Value*& value, llvm::Module* _m) const
{
llvm::Value* v1 = {};
if (auto i = llvm::dyn_cast_or_null<llvm::ShuffleVectorInst>(value))
{
v1 = i->getOperand(0);
if (auto z = llvm::dyn_cast<llvm::ConstantAggregateZero>(i->getOperand(1)); z && z->getType() == v1->getType())
{
if (i->getShuffleMask().equals(index_array))
{
if (auto r1 = a1.match(v1, _m); v1)
{
return r1;
}
}
}
}
value = nullptr;
return {};
}
};
template <uint N, typename A1, typename A2, typename T = llvm_common_t<A1, A2>>
struct llvm_shuffle2
{
using type = std::remove_extent_t<T>[N];
llvm_expr_t<A1> a1;
llvm_expr_t<A2> a2;
int index_array[N];
static_assert(llvm_value_t<T>::is_vector, "llvm_shuffle2<>: invalid type");
static constexpr bool is_ok = llvm_value_t<T>::is_vector && 1;
llvm::Value* eval(llvm::IRBuilder<>* ir) const
{
const auto v1 = a1.eval(ir);
const auto v2 = a2.eval(ir);
return ir->CreateShuffleVector(v1, v2, index_array);
}
llvm_match_tuple<A1, A2> match(llvm::Value*& value, llvm::Module* _m) const
{
llvm::Value* v1 = {};
llvm::Value* v2 = {};
if (auto i = llvm::dyn_cast_or_null<llvm::ShuffleVectorInst>(value))
{
v1 = i->getOperand(0);
v2 = i->getOperand(1);
if (v1->getType() == v2->getType() && v1->getType() == llvm_value_t<T>::get_type(v1->getContext()))
{
if (i->getShuffleMask().equals(index_array))
{
if (auto r1 = a1.match(v1, _m); v1)
{
if (auto r2 = a2.match(v2, _m); v2)
{
return std::tuple_cat(r1, r2);
}
}
}
}
}
value = nullptr;
return {};
}
};
template <typename A1, typename T = llvm_common_t<A1>>
struct llvm_ctlz
{
using type = T;
llvm_expr_t<A1> a1;
static_assert(llvm_value_t<T>::is_sint || llvm_value_t<T>::is_uint, "llvm_ctlz<>: invalid type");
static constexpr bool is_ok = llvm_value_t<T>::is_sint || llvm_value_t<T>::is_uint;
llvm::Value* eval(llvm::IRBuilder<>* ir) const
{
llvm::Value* v = a1.eval(ir);
if (llvm::isa<llvm::Constant>(v))
{
return llvm::ConstantFoldInstruction(ir->CreateIntrinsic(llvm::Intrinsic::ctlz, {v->getType()}, {v, ir->getFalse()}), llvm::DataLayout(""));
}
return ir->CreateIntrinsic(llvm::Intrinsic::ctlz, {v->getType()}, {v, ir->getFalse()});
}
llvm_match_tuple<A1> match(llvm::Value*& value, llvm::Module* _m) const
{
llvm::Value* v1 = {};
if (auto i = llvm::dyn_cast_or_null<llvm::CallInst>(value); i && i->getIntrinsicID() == llvm::Intrinsic::ctlz)
{
v1 = i->getOperand(0);
if (i->getOperand(2) == llvm::ConstantInt::getFalse(value->getContext()))
{
if (auto r1 = a1.match(v1, _m); v1)
{
return r1;
}
}
}
value = nullptr;
return {};
}
};
template <typename A1, typename T = llvm_common_t<A1>>
struct llvm_ctpop
{
using type = T;
llvm_expr_t<A1> a1;
static_assert(llvm_value_t<T>::is_sint || llvm_value_t<T>::is_uint, "llvm_ctpop<>: invalid type");
static constexpr bool is_ok = llvm_value_t<T>::is_sint || llvm_value_t<T>::is_uint;
llvm::Value* eval(llvm::IRBuilder<>* ir) const
{
llvm::Value* v = a1.eval(ir);
if (llvm::isa<llvm::Constant>(v))
{
return llvm::ConstantFoldInstruction(ir->CreateUnaryIntrinsic(llvm::Intrinsic::ctpop, v), llvm::DataLayout(""));
}
return ir->CreateUnaryIntrinsic(llvm::Intrinsic::ctpop, v);
}
llvm_match_tuple<A1> match(llvm::Value*& value, llvm::Module* _m) const
{
llvm::Value* v1 = {};
if (auto i = llvm::dyn_cast_or_null<llvm::CallInst>(value); i && i->getIntrinsicID() == llvm::Intrinsic::ctpop)
{
v1 = i->getOperand(0);
if (auto r1 = a1.match(v1, _m); v1)
{
return r1;
}
}
value = nullptr;
return {};
}
};
template <typename A1, typename A2, typename T = llvm_common_t<A1, A2>>
struct llvm_avg
{
using type = T;
llvm_expr_t<A1> a1;
llvm_expr_t<A2> a2;
static_assert(llvm_value_t<T>::is_sint || llvm_value_t<T>::is_uint, "llvm_avg<>: invalid type");
static constexpr bool is_ok = llvm_value_t<T>::is_sint || llvm_value_t<T>::is_uint;
static constexpr auto cast_op = llvm_value_t<T>::is_sint ? llvm::Instruction::SExt : llvm::Instruction::ZExt;
static llvm::Type* cast_dst_type(llvm::LLVMContext& context)
{
llvm::Type* cast_to = llvm::IntegerType::get(context, llvm_value_t<T>::esize * 2);
if constexpr (llvm_value_t<T>::is_vector != 0)
cast_to = llvm::VectorType::get(cast_to, llvm_value_t<T>::is_vector, false);
return cast_to;
}
llvm::Value* eval(llvm::IRBuilder<>* ir) const
{
const auto v1 = a1.eval(ir);
const auto v2 = a2.eval(ir);
const auto dty = cast_dst_type(ir->getContext());
const auto axt = ir->CreateCast(cast_op, v1, dty);
const auto bxt = ir->CreateCast(cast_op, v2, dty);
const auto cxt = llvm::ConstantInt::get(dty, 1, false);
const auto abc = ir->CreateAdd(ir->CreateAdd(axt, bxt), cxt);
return ir->CreateTrunc(ir->CreateLShr(abc, 1), llvm_value_t<T>::get_type(ir->getContext()));
}
llvm_match_tuple<A1, A2> match(llvm::Value*& value, llvm::Module* _m) const
{
llvm::Value* v1 = {};
llvm::Value* v2 = {};
const auto dty = cast_dst_type(value->getContext());
if (auto i = llvm::dyn_cast_or_null<llvm::CastInst>(value); i && i->getOpcode() == llvm::Instruction::Trunc && i->getSrcTy() == dty)
{
const auto cxt = llvm::ConstantInt::get(dty, 1, false);
if (auto j = llvm::dyn_cast_or_null<llvm::BinaryOperator>(i->getOperand(0)); j && j->getOpcode() == llvm::Instruction::LShr && j->getOperand(1) == cxt)
{
if (j = llvm::dyn_cast_or_null<llvm::BinaryOperator>(j->getOperand(0)); j && j->getOpcode() == llvm::Instruction::Add && j->getOperand(1) == cxt)
{
if (j = llvm::dyn_cast_or_null<llvm::BinaryOperator>(j->getOperand(0)); j && j->getOpcode() == llvm::Instruction::Add)
{
auto a = llvm::dyn_cast_or_null<llvm::CastInst>(j->getOperand(0));
auto b = llvm::dyn_cast_or_null<llvm::CastInst>(j->getOperand(1));
if (a && b && a->getOpcode() == cast_op && b->getOpcode() == cast_op)
{
v1 = a->getOperand(0);
v2 = b->getOperand(0);
if (auto r1 = a1.match(v1, _m); v1)
{
if (auto r2 = a2.match(v2, _m); v2)
{
return std::tuple_cat(r1, r2);
}
}
}
}
}
}
}
value = nullptr;
return {};
}
};
template <typename A1, typename T = llvm_common_t<A1>>
struct llvm_fsqrt
{
using type = T;
llvm_expr_t<A1> a1;
static_assert(llvm_value_t<T>::is_float, "llvm_fsqrt<>: invalid type");
static constexpr bool is_ok = llvm_value_t<T>::is_float;
llvm::Value* eval(llvm::IRBuilder<>* ir) const
{
llvm::Value* v = a1.eval(ir);
if (llvm::isa<llvm::Constant>(v))
{
if (auto c = llvm::ConstantFoldInstruction(ir->CreateUnaryIntrinsic(llvm::Intrinsic::sqrt, v), llvm::DataLayout("")))
{
// Will fail in some cases (such as negative constant)
return c;
}
}
return ir->CreateUnaryIntrinsic(llvm::Intrinsic::sqrt, v);
}
llvm_match_tuple<A1> match(llvm::Value*& value, llvm::Module* _m) const
{
llvm::Value* v1 = {};
if (auto i = llvm::dyn_cast_or_null<llvm::CallInst>(value); i && i->getIntrinsicID() == llvm::Intrinsic::sqrt)
{
v1 = i->getOperand(0);
if (auto r1 = a1.match(v1, _m); v1)
{
return r1;
}
}
value = nullptr;
return {};
}
};
template <typename A1, typename T = llvm_common_t<A1>>
struct llvm_fabs
{
using type = T;
llvm_expr_t<A1> a1;
static_assert(llvm_value_t<T>::is_float, "llvm_fabs<>: invalid type");
static constexpr bool is_ok = llvm_value_t<T>::is_float;
llvm::Value* eval(llvm::IRBuilder<>* ir) const
{
llvm::Value* v = a1.eval(ir);
if (llvm::isa<llvm::Constant>(v))
{
return llvm::ConstantFoldInstruction(ir->CreateUnaryIntrinsic(llvm::Intrinsic::fabs, v), llvm::DataLayout(""));
}
return ir->CreateUnaryIntrinsic(llvm::Intrinsic::fabs, v);
}
llvm_match_tuple<A1> match(llvm::Value*& value, llvm::Module* _m) const
{
llvm::Value* v1 = {};
if (auto i = llvm::dyn_cast_or_null<llvm::CallInst>(value); i && i->getIntrinsicID() == llvm::Intrinsic::fabs)
{
v1 = i->getOperand(0);
if (auto r1 = a1.match(v1, _m); v1)
{
return r1;
}
}
value = nullptr;
return {};
}
};
template <typename A1, typename A2, typename A3, typename T = llvm_common_t<A1, A2, A3>>
struct llvm_fmuladd
{
using type = T;
llvm_expr_t<A1> a1;
llvm_expr_t<A2> a2;
llvm_expr_t<A3> a3;
bool strict_fma;
static_assert(llvm_value_t<T>::is_float, "llvm_fmuladd<>: invalid type");
static constexpr bool is_ok = llvm_value_t<T>::is_float;
llvm::Value* eval(llvm::IRBuilder<>* ir) const
{
llvm::Value* v1 = a1.eval(ir);
llvm::Value* v2 = a2.eval(ir);
llvm::Value* v3 = a3.eval(ir);
if (llvm::isa<llvm::Constant>(v1) && llvm::isa<llvm::Constant>(v2) && llvm::isa<llvm::Constant>(v3))
{
return llvm::ConstantFoldInstruction(ir->CreateIntrinsic(llvm::Intrinsic::fma, {v1->getType()}, {v1, v2, v3}), llvm::DataLayout(""));
}
return ir->CreateIntrinsic(strict_fma ? llvm::Intrinsic::fma : llvm::Intrinsic::fmuladd, {v1->getType()}, {v1, v2, v3});
}
llvm_match_tuple<A1, A2, A3> match(llvm::Value*& value, llvm::Module* _m) const
{
llvm::Value* v1 = {};
llvm::Value* v2 = {};
llvm::Value* v3 = {};
if (auto i = llvm::dyn_cast_or_null<llvm::CallInst>(value); i && i->getIntrinsicID() == (strict_fma ? llvm::Intrinsic::fma : llvm::Intrinsic::fmuladd))
{
v1 = i->getOperand(0);
v2 = i->getOperand(1);
v3 = i->getOperand(2);
if (auto r1 = a1.match(v1, _m); v1)
{
if (auto r2 = a2.match(v2, _m); v2)
{
if (auto r3 = a3.match(v3, _m); v3)
{
return std::tuple_cat(r1, r2, r3);
}
}
}
v1 = i->getOperand(0);
v2 = i->getOperand(1);
v3 = i->getOperand(2);
// With multiplication args swapped
if (auto r1 = a1.match(v2, _m); v2)
{
if (auto r2 = a2.match(v1, _m); v1)
{
if (auto r3 = a3.match(v3, _m); v3)
{
return std::tuple_cat(r1, r2, r3);
}
}
}
}
value = nullptr;
return {};
}
};
template <typename RT, typename... A>
struct llvm_calli
{
using type = RT;
llvm::StringRef iname;
std::tuple<llvm_expr_t<A>...> a;
std::array<usz, std::max<usz>(sizeof...(A), 1)> order_equality_hint = []()
{
std::array<usz, std::max<usz>(sizeof...(A), 1)> r{};
for (usz i = 0; i < r.size(); i++)
{
r[i] = i;
}
return r;
}();
llvm::Value*(*c)(llvm::Value**, llvm::IRBuilder<>*){};
llvm::Value* eval(llvm::IRBuilder<>* ir) const
{
return eval(ir, std::make_index_sequence<sizeof...(A)>());
}
template <usz... I>
llvm::Value* eval(llvm::IRBuilder<>* ir, std::index_sequence<I...>) const
{
llvm::Value* v[std::max<usz>(sizeof...(A), 1)]{std::get<I>(a).eval(ir)...};
if (c && (llvm::isa<llvm::Constant>(v[I]) || ...))
{
if (llvm::Value* r = c(v, ir))
{
return r;
}
}
const auto _rty = llvm_value_t<RT>::get_type(ir->getContext());
const auto type = llvm::FunctionType::get(_rty, {v[I]->getType()...}, false);
const auto func = llvm::cast<llvm::Function>(ir->GetInsertBlock()->getParent()->getParent()->getOrInsertFunction(iname, type).getCallee());
return ir->CreateCall(func, v);
}
template <typename F>
llvm_calli& if_const(F func)
{
c = +func;
return *this;
}
template <typename... Args> requires (sizeof...(Args) == sizeof...(A))
llvm_calli& set_order_equality_hint(Args... args)
{
order_equality_hint = {static_cast<usz>(args)...};
return *this;
}
llvm_match_tuple<A...> match(llvm::Value*& value, llvm::Module* _m) const
{
return match(value, _m, std::make_index_sequence<sizeof...(A)>());
}
template <usz... I>
llvm_match_tuple<A...> match(llvm::Value*& value, llvm::Module* _m, std::index_sequence<I...>) const
{
llvm::Value* v[sizeof...(A)]{};
if (auto i = llvm::dyn_cast_or_null<llvm::CallInst>(value))
{
if (auto cf = i->getCalledFunction(); cf && cf->getName() == iname)
{
((v[I] = i->getOperand(I)), ...);
std::tuple<decltype(std::get<I>(a).match(v[I], _m))...> r;
if (((std::get<I>(r) = std::get<I>(a).match(v[I], _m), v[I]) && ...))
{
return std::tuple_cat(std::get<I>(r)...);
}
if constexpr (sizeof...(A) >= 2)
{
if (order_equality_hint[0] == order_equality_hint[1])
{
// Test if it works with the first pair swapped
((v[I <= 1 ? I ^ 1 : I] = i->getOperand(I)), ...);
if (((std::get<I>(r) = std::get<I>(a).match(v[I], _m), v[I]) && ...))
{
return std::tuple_cat(std::get<I>(r)...);
}
}
}
}
}
value = nullptr;
return {};
}
};
class translator_pass
{
public:
translator_pass() = default;
virtual ~translator_pass() {}
virtual void run(llvm::IRBuilder<>* irb, llvm::Function& func) = 0;
virtual void reset() = 0;
};
class cpu_translator
{
protected:
cpu_translator(llvm::Module* _module, bool is_be);
// LLVM context
std::reference_wrapper<llvm::LLVMContext> m_context;
// Module to which all generated code is output to
llvm::Module* m_module;
// Execution engine from JIT instance
llvm::ExecutionEngine* m_engine{};
// Endianness, affects vector element numbering (TODO)
bool m_is_be;
// Allow PSHUFB intrinsic
bool m_use_ssse3 = true;
// Allow FMA
bool m_use_fma = false;
// Allow AVX
bool m_use_avx = false;
// Allow skylake-x tier AVX-512
bool m_use_avx512 = false;
// Allow VNNI
bool m_use_vnni = false;
// Allow GFNI
bool m_use_gfni = false;
// Allow Icelake tier AVX-512
bool m_use_avx512_icl = false;
// IR builder
llvm::IRBuilder<>* m_ir = nullptr;
// CUstomized transformation passes. Technically the intrinsics replacement belongs here.
std::vector<std::unique_ptr<translator_pass>> m_transform_passes;
void initialize(llvm::LLVMContext& context, llvm::ExecutionEngine& engine);
// Run intrinsics replacement pass
void replace_intrinsics(llvm::Function&);
public:
// Register a transformation pass to be run before final compilation by llvm
void register_transform_pass(std::unique_ptr<translator_pass>& pass);
// Delete all transform passes
void clear_transforms();
// Reset internal state of all passes to evict caches and such. Use when resetting a JIT.
void reset_transforms();
// Convert a C++ type to an LLVM type (TODO: remove)
template <typename T>
llvm::Type* GetType()
{
return llvm_value_t<T>::get_type(m_context);
}
template <typename T>
llvm::Type* get_type()
{
return llvm_value_t<T>::get_type(m_context);
}
template <typename R, typename... Args>
llvm::FunctionType* get_ftype()
{
return llvm::FunctionType::get(get_type<R>(), {get_type<Args>()...}, false);
}
template <typename T>
using value_t = llvm_value_t<T>;
template <typename T>
value_t<T> value(llvm::Value* value)
{
if (!value || value->getType() != get_type<T>())
{
fmt::throw_exception("cpu_translator::value<>(): invalid value type");
}
value_t<T> result;
result.value = value;
return result;
}
template <typename T>
auto eval(T&& expr)
{
value_t<typename std::decay_t<T>::type> result;
result.value = expr.eval(m_ir);
return result;
}
// Call external function: provide name and function pointer
template <typename RetT = void, typename RT, typename... FArgs, LLVMValue... Args>
llvm::CallInst* call(std::string_view lame, RT(*_func)(FArgs...), Args... args)
{
static_assert(sizeof...(FArgs) == sizeof...(Args), "spu_llvm_recompiler::call(): unexpected arg number");
const auto type = llvm::FunctionType::get(get_type<std::conditional_t<std::is_void_v<RetT>, RT, RetT>>(), {args->getType()...}, false);
const auto func = llvm::cast<llvm::Function>(m_module->getOrInsertFunction({lame.data(), lame.size()}, type).getCallee());
#ifdef _WIN32
func->setCallingConv(llvm::CallingConv::Win64);
#endif
m_engine->updateGlobalMapping({lame.data(), lame.size()}, reinterpret_cast<uptr>(_func));
const auto inst = m_ir->CreateCall(func, {args...});
inst->setTailCallKind(llvm::CallInst::TCK_NoTail);
#ifdef _WIN32
inst->setCallingConv(llvm::CallingConv::Win64);
#endif
return inst;
}
template <typename RT, typename... FArgs, DSLValue... Args> requires (sizeof...(Args) != 0)
auto call(std::string_view name, RT(*_func)(FArgs...), Args&&... args)
{
llvm_value_t<RT> r;
r.value = call(name, _func, std::forward<Args>(args).eval(m_ir)...);
return r;
}
template <typename RT, DSLValue... Args>
auto callf(llvm::Function* func, Args&&... args)
{
llvm_value_t<RT> r;
r.value = m_ir->CreateCall(func, {std::forward<Args>(args).eval(m_ir)...});
return r;
}
// Bitcast with immediate constant folding
llvm::Value* bitcast(llvm::Value* val, llvm::Type* type) const;
template <typename T>
llvm::Value* bitcast(llvm::Value* val)
{
return bitcast(val, get_type<T>());
}
template <typename T>
static llvm_placeholder_t<T> match()
{
return {};
}
template <typename T, typename = llvm_common_t<T>>
static auto match_expr(llvm::Value* v, llvm::Module* _m, T&& expr)
{
auto r = expr.match(v, _m);
return std::tuple_cat(std::make_tuple(v != nullptr), r);
}
template <typename T, typename U, typename = llvm_common_t<T, U>>
auto match_expr(T&& arg, U&& expr) -> decltype(std::tuple_cat(std::make_tuple(false), expr.match(std::declval<llvm::Value*&>(), nullptr)))
{
auto v = arg.eval(m_ir);
auto r = expr.match(v, m_module);
return std::tuple_cat(std::make_tuple(v != nullptr), r);
}
template <typename... Types, typename F>
bool match_for(F&& pred)
{
// Execute pred(.) for each type until one of them returns true
return (pred(llvm_placeholder_t<Types>{}) || ...);
}
template <typename T, typename F>
struct expr_t
{
using type = llvm_common_t<T>;
T a;
F match;
llvm::Value* eval(llvm::IRBuilder<>* ir) const
{
return a.eval(ir);
}
};
template <typename T, typename F>
static auto expr(T&& expr, F matcher)
{
return expr_t<T, F>{std::forward<T>(expr), std::move(matcher)};
}
template <typename T, typename = std::enable_if_t<is_llvm_cmp<std::decay_t<T>>::value>>
static auto fcmp_ord(T&& cmp_expr)
{
return llvm_ord{std::forward<T>(cmp_expr)};
}
template <typename T, typename = std::enable_if_t<is_llvm_cmp<std::decay_t<T>>::value>>
static auto fcmp_uno(T&& cmp_expr)
{
return llvm_uno{std::forward<T>(cmp_expr)};
}
template <typename U, typename T, typename = std::enable_if_t<llvm_noncast<U, T>::is_ok>>
static auto noncast(T&& expr)
{
return llvm_noncast<U, T>{std::forward<T>(expr)};
}
template <typename U, typename T, typename = std::enable_if_t<llvm_bitcast<U, T>::is_ok>>
static auto bitcast(T&& expr)
{
return llvm_bitcast<U, T>{std::forward<T>(expr)};
}
template <typename U, typename T, typename = std::enable_if_t<llvm_fpcast<U, T>::is_ok>>
static auto fpcast(T&& expr)
{
return llvm_fpcast<U, T>{std::forward<T>(expr)};
}
template <typename U, typename T, typename = std::enable_if_t<llvm_trunc<U, T>::is_ok>>
static auto trunc(T&& expr)
{
return llvm_trunc<U, T>{std::forward<T>(expr)};
}
template <typename U, typename T, typename = std::enable_if_t<llvm_sext<U, T>::is_ok>>
static auto sext(T&& expr)
{
return llvm_sext<U, T>{std::forward<T>(expr)};
}
template <typename U, typename T, typename = std::enable_if_t<llvm_zext<U, T>::is_ok>>
static auto zext(T&& expr)
{
return llvm_zext<U, T>{std::forward<T>(expr)};
}
template <typename T, typename U, typename V, typename = std::enable_if_t<llvm_select<T, U, V>::is_ok>>
static auto select(T&& c, U&& a, V&& b)
{
return llvm_select<T, U, V>{std::forward<T>(c), std::forward<U>(a), std::forward<V>(b)};
}
template <typename T, typename U, typename = std::enable_if_t<llvm_min<T, U>::is_ok>>
static auto min(T&& a, U&& b)
{
return llvm_min<T, U>{std::forward<T>(a), std::forward<U>(b)};
}
template <typename T, typename U, typename = std::enable_if_t<llvm_min<T, U>::is_ok>>
static auto max(T&& a, U&& b)
{
return llvm_max<T, U>{std::forward<T>(a), std::forward<U>(b)};
}
template <typename T, typename U, typename V, typename = std::enable_if_t<llvm_fshl<T, U, V>::is_ok>>
static auto fshl(T&& a, U&& b, V&& c)
{
return llvm_fshl<T, U, V>{std::forward<T>(a), std::forward<U>(b), std::forward<V>(c)};
}
template <typename T, typename U, typename V, typename = std::enable_if_t<llvm_fshr<T, U, V>::is_ok>>
static auto fshr(T&& a, U&& b, V&& c)
{
return llvm_fshr<T, U, V>{std::forward<T>(a), std::forward<U>(b), std::forward<V>(c)};
}
template <typename T, typename U, typename = std::enable_if_t<llvm_rol<T, U>::is_ok>>
static auto rol(T&& a, U&& b)
{
return llvm_rol<T, U>{std::forward<T>(a), std::forward<U>(b)};
}
template <typename T, typename U, typename = std::enable_if_t<llvm_add_sat<T, U>::is_ok>>
static auto add_sat(T&& a, U&& b)
{
return llvm_add_sat<T, U>{std::forward<T>(a), std::forward<U>(b)};
}
template <typename T, typename U, typename = std::enable_if_t<llvm_sub_sat<T, U>::is_ok>>
static auto sub_sat(T&& a, U&& b)
{
return llvm_sub_sat<T, U>{std::forward<T>(a), std::forward<U>(b)};
}
template <typename T, typename U, typename = std::enable_if_t<llvm_extract<T, U>::is_ok>>
static auto extract(T&& v, U&& i)
{
return llvm_extract<T, U>{std::forward<T>(v), std::forward<U>(i)};
}
template <typename T, typename = std::enable_if_t<llvm_extract<T, llvm_const_int<u32>>::is_ok>>
static auto extract(T&& v, u32 i)
{
return llvm_extract<T, llvm_const_int<u32>>{std::forward<T>(v), llvm_const_int<u32>{i}};
}
template <typename T, typename U, typename V, typename = std::enable_if_t<llvm_insert<T, U, V>::is_ok>>
static auto insert(T&& v, U&& i, V&& e)
{
return llvm_insert<T, U, V>{std::forward<T>(v), std::forward<U>(i), std::forward<V>(e)};
}
template <typename T, typename V, typename = std::enable_if_t<llvm_insert<T, llvm_const_int<u32>, V>::is_ok>>
static auto insert(T&& v, u32 i, V&& e)
{
return llvm_insert<T, llvm_const_int<u32>, V>{std::forward<T>(v), llvm_const_int<u32>{i}, std::forward<V>(e)};
}
template <typename T, typename = std::enable_if_t<llvm_const_int<T>::is_ok>>
static auto splat(u64 c)
{
return llvm_const_int<T>{c};
}
template <typename T, typename = std::enable_if_t<llvm_const_float<T>::is_ok>>
static auto fsplat(f64 c)
{
return llvm_const_float<T>{c};
}
template <typename T, typename U, typename = std::enable_if_t<llvm_splat<T, U>::is_ok>>
static auto vsplat(U&& v)
{
return llvm_splat<T, U>{std::forward<U>(v)};
}
template <typename T, typename... Args, typename = std::enable_if_t<llvm_const_vector<sizeof...(Args), T>::is_ok>>
static auto build(Args... args)
{
return llvm_const_vector<sizeof...(Args), T>{static_cast<std::remove_extent_t<T>>(args)...};
}
template <typename T, typename... Args, typename = std::enable_if_t<llvm_zshuffle<sizeof...(Args), T>::is_ok>>
static auto zshuffle(T&& v, Args... indices)
{
return llvm_zshuffle<sizeof...(Args), T>{std::forward<T>(v), {static_cast<int>(indices)...}};
}
template <typename T, typename U, typename... Args, typename = std::enable_if_t<llvm_shuffle2<sizeof...(Args), T, U>::is_ok>>
static auto shuffle2(T&& v1, U&& v2, Args... indices)
{
return llvm_shuffle2<sizeof...(Args), T, U>{std::forward<T>(v1), std::forward<U>(v2), {static_cast<int>(indices)...}};
}
template <typename T, typename = std::enable_if_t<llvm_ctlz<T>::is_ok>>
static auto ctlz(T&& a)
{
return llvm_ctlz<T>{std::forward<T>(a)};
}
template <typename T, typename = std::enable_if_t<llvm_ctpop<T>::is_ok>>
static auto ctpop(T&& a)
{
return llvm_ctpop<T>{std::forward<T>(a)};
}
// Average: (a + b + 1) >> 1
template <typename T, typename U, typename = std::enable_if_t<llvm_avg<T, U>::is_ok>>
static auto avg(T&& a, U&& b)
{
return llvm_avg<T, U>{std::forward<T>(a), std::forward<U>(b)};
}
template <typename T, typename = std::enable_if_t<llvm_fsqrt<T>::is_ok>>
static auto fsqrt(T&& a)
{
return llvm_fsqrt<T>{std::forward<T>(a)};
}
template <typename T, typename = std::enable_if_t<llvm_fabs<T>::is_ok>>
static auto fabs(T&& a)
{
return llvm_fabs<T>{std::forward<T>(a)};
}
// Optionally opportunistic hardware FMA, can be used if results are identical for all possible input values
template <typename T, typename U, typename V, typename = std::enable_if_t<llvm_fmuladd<T, U, V>::is_ok>>
static auto fmuladd(T&& a, U&& b, V&& c, bool strict_fma)
{
return llvm_fmuladd<T, U, V>{std::forward<T>(a), std::forward<U>(b), std::forward<V>(c), strict_fma};
}
// Opportunistic hardware FMA, can be used if results are identical for all possible input values
template <typename T, typename U, typename V, typename = std::enable_if_t<llvm_fmuladd<T, U, V>::is_ok>>
auto fmuladd(T&& a, U&& b, V&& c)
{
return llvm_fmuladd<T, U, V>{std::forward<T>(a), std::forward<U>(b), std::forward<V>(c), m_use_fma};
}
// Absolute difference
template <typename T, typename U, typename CT = llvm_common_t<T, U>>
static auto absd(T&& a, U&& b)
{
return expr(max(a, b) - min(a, b), [](llvm::Value*& value, llvm::Module* _m) -> llvm_match_tuple<T, U>
{
static const auto M = match<CT>();
if (auto [ok, _max, _min] = match_expr(value, _m, M - M); ok)
{
if (auto [ok1, a, b] = match_expr(_max.value, _m, max(M, M)); ok1 && !a.eq(b))
{
if (auto [ok2, c, d] = match_expr(_min.value, _m, min(M, M)); ok2 && !c.eq(d))
{
if ((a.eq(c) && b.eq(d)) || (a.eq(d) && b.eq(c)))
{
if (auto r1 = llvm_expr_t<T>{}.match(a.value, _m); a.eq())
{
if (auto r2 = llvm_expr_t<U>{}.match(b.value, _m); b.eq())
{
return std::tuple_cat(r1, r2);
}
}
}
}
}
}
value = nullptr;
return {};
});
}
// Infinite-precision shift left
template <typename T, typename U, typename CT = llvm_common_t<T, U>>
auto inf_shl(T&& a, U&& b)
{
static constexpr u32 esz = llvm_value_t<CT>::esize;
return expr(select(b < esz, a << b, splat<CT>(0)), [](llvm::Value*& value, llvm::Module* _m) -> llvm_match_tuple<T, U>
{
static const auto M = match<CT>();
if (auto [ok, b, a, b2] = match_expr(value, _m, select(M < esz, M << M, splat<CT>(0))); ok && b.eq(b2))
{
if (auto r1 = llvm_expr_t<T>{}.match(a.value, _m); a.eq())
{
if (auto r2 = llvm_expr_t<U>{}.match(b.value, _m); b.eq())
{
return std::tuple_cat(r1, r2);
}
}
}
value = nullptr;
return {};
});
}
// Infinite-precision logical shift right (unsigned)
template <typename T, typename U, typename CT = llvm_common_t<T, U>>
auto inf_lshr(T&& a, U&& b)
{
static constexpr u32 esz = llvm_value_t<CT>::esize;
return expr(select(b < esz, a >> b, splat<CT>(0)), [](llvm::Value*& value, llvm::Module* _m) -> llvm_match_tuple<T, U>
{
static const auto M = match<CT>();
if (auto [ok, b, a, b2] = match_expr(value, _m, select(M < esz, M >> M, splat<CT>(0))); ok && b.eq(b2))
{
if (auto r1 = llvm_expr_t<T>{}.match(a.value, _m); a.eq())
{
if (auto r2 = llvm_expr_t<U>{}.match(b.value, _m); b.eq())
{
return std::tuple_cat(r1, r2);
}
}
}
value = nullptr;
return {};
});
}
// Infinite-precision arithmetic shift right (signed)
template <typename T, typename U, typename CT = llvm_common_t<T, U>>
auto inf_ashr(T&& a, U&& b)
{
static constexpr u32 esz = llvm_value_t<CT>::esize;
return expr(a >> select(b > (esz - 1), splat<CT>(esz - 1), b), [](llvm::Value*& value, llvm::Module* _m) -> llvm_match_tuple<T, U>
{
static const auto M = match<CT>();
if (auto [ok, a, b, b2] = match_expr(value, _m, M >> select(M > (esz - 1), splat<CT>(esz - 1), M)); ok && b.eq(b2))
{
if (auto r1 = llvm_expr_t<T>{}.match(a.value, _m); a.eq())
{
if (auto r2 = llvm_expr_t<U>{}.match(b.value, _m); b.eq())
{
return std::tuple_cat(r1, r2);
}
}
}
value = nullptr;
return {};
});
}
template <typename... Types>
llvm::Function* get_intrinsic(llvm::Intrinsic::ID id)
{
const auto _module = m_ir->GetInsertBlock()->getParent()->getParent();
return llvm::Intrinsic::getDeclaration(_module, id, {get_type<Types>()...});
}
template <typename T1, typename T2>
value_t<u8[16]> gf2p8affineqb(T1 a, T2 b, u8 c)
{
value_t<u8[16]> result;
const auto data0 = a.eval(m_ir);
const auto data1 = b.eval(m_ir);
const auto immediate = (llvm_const_int<u8>{c});
const auto imm8 = immediate.eval(m_ir);
result.value = m_ir->CreateCall(get_intrinsic(llvm::Intrinsic::x86_vgf2p8affineqb_128), {data0, data1, imm8});
return result;
}
template <typename T1, typename T2, typename T3>
value_t<u32[4]> vpdpbusd(T1 a, T2 b, T3 c)
{
value_t<u32[4]> result;
const auto data0 = a.eval(m_ir);
const auto data1 = b.eval(m_ir);
const auto data2 = c.eval(m_ir);
result.value = m_ir->CreateCall(get_intrinsic(llvm::Intrinsic::x86_avx512_vpdpbusd_128), {data0, data1, data2});
return result;
}
template <typename T1, typename T2>
value_t<u8[16]> vpermb(T1 a, T2 b)
{
value_t<u8[16]> result;
const auto data0 = a.eval(m_ir);
const auto index = b.eval(m_ir);
const auto zeros = llvm::ConstantAggregateZero::get(get_type<u8[16]>());
if (auto c = llvm::dyn_cast<llvm::Constant>(index))
{
// Convert VPERMB index back to LLVM vector shuffle mask
v128 mask{};
const auto cv = llvm::dyn_cast<llvm::ConstantDataVector>(c);
if (cv)
{
for (u32 i = 0; i < 16; i++)
{
const u64 b = cv->getElementAsInteger(i);
mask._u8[i] = b & 0xf;
}
}
if (cv || llvm::isa<llvm::ConstantAggregateZero>(c))
{
result.value = llvm::ConstantDataVector::get(m_context, llvm::ArrayRef(reinterpret_cast<const u8*>(&mask), 16));
result.value = m_ir->CreateZExt(result.value, get_type<u32[16]>());
result.value = m_ir->CreateShuffleVector(data0, zeros, result.value);
return result;
}
}
result.value = m_ir->CreateCall(get_intrinsic(llvm::Intrinsic::x86_avx512_permvar_qi_128), {data0, index});
return result;
}
template <typename T1, typename T2, typename T3>
value_t<u8[16]> vperm2b(T1 a, T2 b, T3 c)
{
if (!utils::has_fast_vperm2b())
{
return vperm2b256to128(a, b, c);
}
value_t<u8[16]> result;
const auto data0 = a.eval(m_ir);
const auto data1 = b.eval(m_ir);
const auto index = c.eval(m_ir);
if (auto c = llvm::dyn_cast<llvm::Constant>(index))
{
// Convert VPERM2B index back to LLVM vector shuffle mask
v128 mask{};
const auto cv = llvm::dyn_cast<llvm::ConstantDataVector>(c);
if (cv)
{
for (u32 i = 0; i < 16; i++)
{
const u64 b = cv->getElementAsInteger(i);
mask._u8[i] = b & 0x1f;
}
}
if (cv || llvm::isa<llvm::ConstantAggregateZero>(c))
{
result.value = llvm::ConstantDataVector::get(m_context, llvm::ArrayRef(reinterpret_cast<const u8*>(&mask), 16));
result.value = m_ir->CreateZExt(result.value, get_type<u32[16]>());
result.value = m_ir->CreateShuffleVector(data0, data1, result.value);
return result;
}
}
result.value = m_ir->CreateCall(get_intrinsic(llvm::Intrinsic::x86_avx512_vpermi2var_qi_128), {data0, index, data1});
return result;
}
// Emulate the behavior of VPERM2B by using a 256 bit wide VPERMB
template <typename T1, typename T2, typename T3>
value_t<u8[16]> vperm2b256to128(T1 a, T2 b, T3 c)
{
value_t<u8[16]> result;
const auto data0 = a.eval(m_ir);
const auto data1 = b.eval(m_ir);
const auto index = c.eval(m_ir);
// May be slower than non constant path?
if (auto c = llvm::dyn_cast<llvm::Constant>(index))
{
// Convert VPERM2B index back to LLVM vector shuffle mask
v128 mask{};
const auto cv = llvm::dyn_cast<llvm::ConstantDataVector>(c);
if (cv)
{
for (u32 i = 0; i < 16; i++)
{
const u64 b = cv->getElementAsInteger(i);
mask._u8[i] = b & 0x1f;
}
}
if (cv || llvm::isa<llvm::ConstantAggregateZero>(c))
{
result.value = llvm::ConstantDataVector::get(m_context, llvm::ArrayRef(reinterpret_cast<const u8*>(&mask), 16));
result.value = m_ir->CreateZExt(result.value, get_type<u32[16]>());
result.value = m_ir->CreateShuffleVector(data0, data1, result.value);
return result;
}
}
const auto zeroes = llvm::ConstantAggregateZero::get(get_type<u8[16]>());
const auto zeroes32 = llvm::ConstantAggregateZero::get(get_type<u8[32]>());
value_t<u8[32]> intermediate;
value_t<u8[32]> shuffle;
value_t<u8[32]> shuffleindex;
u8 mask32[32] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31};
u8 mask16[16] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15};
// insert the second source operand into the same vector as the first source operand and expand to 256 bit width
shuffle.value = llvm::ConstantDataVector::get(m_context, llvm::ArrayRef(reinterpret_cast<const u8*>(&mask32), 32));
shuffle.value = m_ir->CreateZExt(shuffle.value, get_type<u32[32]>());
intermediate.value = m_ir->CreateShuffleVector(data0, data1, shuffle.value);
// expand the shuffle index to 256 bits with zeroes
shuffleindex.value = m_ir->CreateShuffleVector(index, zeroes, shuffle.value);
// permute
intermediate.value = m_ir->CreateCall(get_intrinsic(llvm::Intrinsic::x86_avx512_permvar_qi_256), {intermediate.value, shuffleindex.value});
// convert the 256 bit vector back to 128 bits
result.value = llvm::ConstantDataVector::get(m_context, llvm::ArrayRef(reinterpret_cast<const u8*>(&mask16), 16));
result.value = m_ir->CreateZExt(result.value, get_type<u32[16]>());
result.value = m_ir->CreateShuffleVector(intermediate.value, zeroes32, result.value);
return result;
}
template <typename T1, typename T2, typename T3>
value_t<f32[4]> vfixupimmps(T1 a, T2 b, T3 c, u8 d, u8 e)
{
value_t<f32[4]> result;
const auto data0 = a.eval(m_ir);
const auto data1 = b.eval(m_ir);
const auto data2 = c.eval(m_ir);
const auto immediate = (llvm_const_int<u32>{d});
const auto imm32 = immediate.eval(m_ir);
const auto immediate2 = (llvm_const_int<u8>{e});
const auto imm8 = immediate2.eval(m_ir);
result.value = m_ir->CreateCall(get_intrinsic(llvm::Intrinsic::x86_avx512_mask_fixupimm_ps_128), {data0, data1, data2, imm32, imm8});
return result;
}
llvm::Value* load_const(llvm::GlobalVariable* g, llvm::Value* i, llvm::Type* type = nullptr)
{
return m_ir->CreateLoad(type ? type : g->getValueType(), m_ir->CreateGEP(g->getValueType(), g, {m_ir->getInt64(0), m_ir->CreateZExtOrTrunc(i, get_type<u64>())}));
}
template <typename T>
llvm::Value* load_const(llvm::GlobalVariable* g, llvm::Value* i)
{
return load_const(g, i, get_type<T>());
}
template <typename T, typename I> requires requires () { std::declval<I>().eval(std::declval<llvm::IRBuilder<>*>()); }
value_t<T> load_const(llvm::GlobalVariable* g, I i)
{
value_t<T> result;
result.value = load_const<T>(g, i.eval(m_ir));
return result;
}
template <typename T>
llvm::GlobalVariable* make_local_variable(T initializing_value)
{
return new llvm::GlobalVariable(*m_module, get_type<T>(), false, llvm::GlobalVariable::PrivateLinkage, llvm::ConstantInt::get(get_type<T>(), initializing_value));
}
template <typename R = v128>
std::pair<bool, R> get_const_vector(llvm::Value*, u32 pos, u32 = __builtin_LINE());
template <typename T = v128>
llvm::Constant* make_const_vector(T, llvm::Type*, u32 = __builtin_LINE());
template <typename T>
llvm::KnownBits get_known_bits(T a)
{
return llvm::computeKnownBits(a.eval(m_ir), m_module->getDataLayout());
}
template <typename T>
llvm::KnownBits kbc(T value)
{
return llvm::KnownBits::makeConstant(llvm::APInt(sizeof(T) * 8, u64(value)));
}
private:
// Custom intrinsic table
std::unordered_map<std::string_view, std::function<llvm::Value*(llvm::CallInst*)>> m_intrinsics;
public:
// Call custom intrinsic by name
template <typename RT, typename... Args>
llvm::CallInst* _calli(std::string_view name, Args... args)
{
const auto type = llvm::FunctionType::get(get_type<RT>(), {args->getType()...}, false);
const auto func = llvm::cast<llvm::Function>(m_module->getOrInsertFunction({name.data(), name.size()}, type).getCallee());
return m_ir->CreateCall(func, {args...});
}
// Initialize custom intrinsic
template <typename F>
void register_intrinsic(std::string_view name, F replace_with)
{
if constexpr (std::is_same_v<std::invoke_result_t<F, llvm::CallInst*>, llvm::Value*>)
{
m_intrinsics.try_emplace(name, replace_with);
}
else
{
m_intrinsics.try_emplace(name, [=, this](llvm::CallInst* ci)
{
return replace_with(ci).eval(m_ir);
});
}
}
// Finalize processing
void run_transforms(llvm::Function&);
// Erase store instructions of provided
void erase_stores(llvm::ArrayRef<llvm::Value*> args);
template <typename... Args>
void erase_stores(Args... args)
{
erase_stores({args.value...});
}
template <typename T, typename U>
static auto pshufb(T&& a, U&& b)
{
return llvm_calli<u8[16], T, U>{"x86_pshufb", {std::forward<T>(a), std::forward<U>(b)}}.if_const([](llvm::Value* args[], llvm::IRBuilder<>* ir) -> llvm::Value*
{
const auto zeros = llvm::ConstantAggregateZero::get(llvm_value_t<u8[16]>::get_type(ir->getContext()));
if (auto c = llvm::dyn_cast<llvm::Constant>(args[1]))
{
// Convert PSHUFB index back to LLVM vector shuffle mask
v128 mask{};
const auto cv = llvm::dyn_cast<llvm::ConstantDataVector>(c);
if (cv)
{
for (u32 i = 0; i < 16; i++)
{
const u64 b = cv->getElementAsInteger(i);
mask._u8[i] = b < 128 ? b % 16 : 16;
}
}
if (cv || llvm::isa<llvm::ConstantAggregateZero>(c))
{
llvm::Value* r = nullptr;
r = llvm::ConstantDataVector::get(ir->getContext(), llvm::ArrayRef(reinterpret_cast<const u8*>(&mask), 16));
r = ir->CreateZExt(r, llvm_value_t<u32[16]>::get_type(ir->getContext()));
r = ir->CreateShuffleVector(args[0], zeros, r);
return r;
}
}
return nullptr;
});
}
// (m << 3) >= 0 ? a : b
template <typename T, typename U, typename V>
static auto select_by_bit4(T&& m, U&& a, V&& b)
{
return llvm_calli<u8[16], T, U, V>{"any_select_by_bit4", {std::forward<T>(m), std::forward<U>(a), std::forward<V>(b)}};
}
template <typename T, typename = std::enable_if_t<std::is_same_v<llvm_common_t<T>, f32[4]>>>
static auto fre(T&& a)
{
#if defined(ARCH_X64)
return llvm_calli<f32[4], T>{"llvm.x86.sse.rcp.ps", {std::forward<T>(a)}};
#elif defined(ARCH_ARM64)
return llvm_calli<f32[4], T>{"llvm.aarch64.neon.frecpe.v4f32", {std::forward<T>(a)}};
#endif
}
template <typename T, typename = std::enable_if_t<std::is_same_v<llvm_common_t<T>, f32[4]>>>
static auto frsqe(T&& a)
{
#if defined(ARCH_X64)
return llvm_calli<f32[4], T>{"llvm.x86.sse.rsqrt.ps", {std::forward<T>(a)}};
#elif defined(ARCH_ARM64)
return llvm_calli<f32[4], T>{"llvm.aarch64.neon.frsqrte.v4f32", {std::forward<T>(a)}};
#endif
}
template <typename T, typename U, typename = std::enable_if_t<std::is_same_v<llvm_common_t<T, U>, f32[4]>>>
static auto fmax(T&& a, U&& b)
{
#if defined(ARCH_X64)
return llvm_calli<f32[4], T, U>{"llvm.x86.sse.max.ps", {std::forward<T>(a), std::forward<U>(b)}};
#elif defined(ARCH_ARM64)
return llvm_calli<f32[4], T, U>{"llvm.aarch64.neon.fmax.v4f32", {std::forward<T>(a), std::forward<U>(b)}};
#endif
}
template <typename T, typename U, typename = std::enable_if_t<std::is_same_v<llvm_common_t<T, U>, f32[4]>>>
static auto fmin(T&& a, U&& b)
{
#if defined(ARCH_X64)
return llvm_calli<f32[4], T, U>{"llvm.x86.sse.min.ps", {std::forward<T>(a), std::forward<U>(b)}};
#elif defined(ARCH_ARM64)
return llvm_calli<f32[4], T, U>{"llvm.aarch64.neon.fmin.v4f32", {std::forward<T>(a), std::forward<U>(b)}};
#endif
}
template <typename T, typename U, typename = std::enable_if_t<std::is_same_v<llvm_common_t<T, U>, u8[16]>>>
static auto vdbpsadbw(T&& a, U&& b, u8 c)
{
return llvm_calli<u16[8], T, U, llvm_const_int<u32>>{"llvm.x86.avx512.dbpsadbw.128", {std::forward<T>(a), std::forward<U>(b), llvm_const_int<u32>{c}}};
}
template <typename T, typename U, typename = std::enable_if_t<std::is_same_v<llvm_common_t<T, U>, f32[4]>>>
static auto vrangeps(T&& a, U&& b, u8 c, u8 d)
{
return llvm_calli<f32[4], T, U, llvm_const_int<u32>, T, llvm_const_int<u8>>{"llvm.x86.avx512.mask.range.ps.128", {std::forward<T>(a), std::forward<U>(b), llvm_const_int<u32>{c}, std::forward<T>(a), llvm_const_int<u8>{d}}};
}
};
// Format llvm::SizeType
template <>
struct fmt_unveil<llvm::TypeSize, void>
{
using type = usz;
static inline usz get(const llvm::TypeSize& arg)
{
return arg;
}
};
// Inline assembly wrappers.
// TODO: Move these to proper location and replace macros with templates
static inline
llvm::InlineAsm* compile_inline_asm(
llvm::Type* returnType,
llvm::ArrayRef<llvm::Type*> argTypes,
const std::string& code,
const std::string& constraints)
{
const auto callSig = llvm::FunctionType::get(returnType, argTypes, false);
return llvm::InlineAsm::get(callSig, code, constraints, true, false);
}
// Helper for ASM generation with dynamic number of arguments
static inline
llvm::CallInst* llvm_asm(
llvm::IRBuilder<>* irb,
const std::string& asm_,
llvm::ArrayRef<llvm::Value*> args,
const std::string& constraints,
llvm::LLVMContext& context)
{
llvm::ArrayRef<llvm::Type*> types_ref = std::nullopt;
std::vector<llvm::Type*> types;
types.reserve(args.size());
if (!args.empty())
{
for (const auto& arg : args)
{
types.push_back(arg->getType());
}
types_ref = types;
}
auto return_type = llvm::Type::getVoidTy(context);
auto callee = compile_inline_asm(return_type, types_ref, asm_, constraints);
auto c = irb->CreateCall(callee, args);
c->addFnAttr(llvm::Attribute::AlwaysInline);
return c;
}
#define LLVM_ASM(asm_, args, constraints, irb, ctx)\
llvm_asm(irb, asm_, args, constraints, ctx)
// Helper for ASM generation with 0 args
#define LLVM_ASM_VOID(asm_, irb, ctx)\
llvm_asm(irb, asm_, {}, "", ctx)
#endif
| 104,181
|
C++
|
.h
| 3,229
| 29.33323
| 224
| 0.649552
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
5,973
|
Hypervisor.h
|
RPCS3_rpcs3/rpcs3/Emu/CPU/Hypervisor.h
|
#pragma once
#include <util/types.hpp>
namespace rpcs3
{
#if defined(ARCH_x64)
union hypervisor_context_t
{
u64 regs[1];
struct
{
u64 rsp;
} x86;
};
static_assert(sizeof(hypervisor_context_t) == 8);
#else
union alignas(16) hypervisor_context_t
{
u64 regs[16];
struct
{
u64 pc;
u64 sp;
u64 x18;
u64 x19;
u64 x20;
u64 x21;
u64 x22;
u64 x23;
u64 x24;
u64 x25;
u64 x26;
u64 x27;
u64 x28;
u64 x29;
u64 x30;
// x0-x17 unused
} aarch64;
};
#endif
}
| 568
|
C++
|
.h
| 40
| 10.05
| 51
| 0.589147
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
5,974
|
AArch64Common.h
|
RPCS3_rpcs3/rpcs3/Emu/CPU/Backends/AArch64/AArch64Common.h
|
#pragma once
#include <util/types.hpp>
#include "../../CPUTranslator.h"
namespace aarch64
{
enum gpr : s32
{
x0 = 0,
x1, x2, x3, x4, x5, x6, x7, x8, x9,
x10, x11, x12, x13, x14, x15, x16, x17, x18, x19,
x20, x21, x22, x23, x24, x25, x26, x27, x28, x29, x30
};
enum spr : s32
{
xzr = 0,
pc,
sp
};
static const char* gpr_names[] =
{
"x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7", "x8", "x9",
"x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x18", "x19",
"x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "x29", "x30"
};
static const char* spr_names[] =
{
"xzr", "pc", "sp"
};
static const char* spr_asm_names[] =
{
"xzr", ".", "sp"
};
std::string get_cpu_brand();
}
| 853
|
C++
|
.h
| 34
| 19.176471
| 83
| 0.449507
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
5,975
|
AArch64ASM.h
|
RPCS3_rpcs3/rpcs3/Emu/CPU/Backends/AArch64/AArch64ASM.h
|
#pragma once
#include "AArch64Common.h"
namespace aarch64
{
// Micro-assembler
class UASM
{
public:
enum ArgType
{
Register = 0,
SRegister,
Immediate,
LLVMInt,
LLVMPtr,
LLVMReg
};
struct Arg
{
ArgType type;
union
{
llvm::Value* value;
gpr reg;
spr sreg;
s64 imm;
};
std::string to_string(int* id = nullptr) const;
};
protected:
struct compiled_instruction_t
{
std::string asm_;
std::vector<std::string> constraints;
std::vector<llvm::Value*> args;
};
std::vector<compiled_instruction_t> m_instructions;
void emit0(const char* inst);
void emit1(const char* inst, const Arg& arg0, const std::vector<gpr>& clobbered);
void emit2(const char* inst, const Arg& arg0, const Arg& arg1, const std::vector<gpr>& clobbered);
void emit3(const char* inst, const Arg& arg0, const Arg& arg1, const Arg& arg2, const std::vector<gpr>& clobbered);
void emit4(const char* inst, const Arg& arg0, const Arg& arg1, const Arg& arg2, const Arg& arg3, const std::vector<gpr>& clobbered);
void embed_args(compiled_instruction_t& instruction, const std::vector<Arg>& args, const std::vector<gpr>& clobbered);
public:
UASM() = default;
// Convenience wrappers
static Arg Int(llvm::Value* v);
static Arg Imm(s64 v);
static Arg Reg(gpr reg);
static Arg Reg(spr reg);
static Arg Ptr(llvm::Value* v);
static Arg Var(llvm::Value* v);
void mov(gpr dst, gpr src);
void mov(gpr dst, const Arg& src);
void movnt(gpr dst, const Arg& src);
void adr(gpr dst, const Arg& src);
void str(gpr src, gpr base, const Arg& offset);
void str(gpr src, spr base, const Arg& offset);
void str(const Arg& src, gpr base, const Arg& offset);
void str(const Arg& src, spr base, const Arg& offset);
void ldr(gpr dst, gpr base, const Arg& offset);
void ldr(gpr dst, spr base, const Arg& offset);
void stp(gpr src0, gpr src1, gpr base, const Arg& offset);
void stp(gpr src0, gpr src1, spr base, const Arg& offset);
void ldp(gpr dst0, gpr dst1, gpr base, const Arg& offset);
void ldp(gpr dst0, gpr dst1, spr base, const Arg& offset);
void add(spr dst, spr src0, const Arg& src1);
void add(gpr dst, gpr src0, const Arg& src1);
void sub(spr dst, spr src0, const Arg& src1);
void sub(gpr dst, gpr src0, const Arg& src1);
void b(const Arg& target);
void br(gpr target);
void br(const Arg& target);
void ret();
void nop(const std::vector<Arg>& refs = {});
void brk(int mark = 0);
void append(const UASM& other);
void prepend(const UASM& other);
void insert(llvm::IRBuilder<>* irb, llvm::LLVMContext& ctx) const;
};
using ASMBlock = UASM;
}
| 3,167
|
C++
|
.h
| 82
| 29.182927
| 140
| 0.576835
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
5,976
|
AArch64Signal.h
|
RPCS3_rpcs3/rpcs3/Emu/CPU/Backends/AArch64/AArch64Signal.h
|
#pragma once
#include <util/types.hpp>
#ifndef _WIN32
#include <sys/ucontext.h>
#else
using ucontext_t = void;
#endif
namespace aarch64
{
// Some renamed kernel definitions, we don't need to include kernel headers directly
#pragma pack(push, 1)
#if defined(__linux__)
struct aarch64_cpu_ctx_block
{
u32 magic;
u32 size;
};
struct aarch64_esr_ctx
{
aarch64_cpu_ctx_block head;
u64 esr; // Exception syndrome register
};
#elif defined(__APPLE__)
struct aarch64_exception_state
{
u64 FAR; // Fault address reg
u32 ESR; // Exception syndrome reg (ESR_EL1)
u32 exception_id;
};
struct aarch64_darwin_mcontext64
{
aarch64_exception_state es;
// Other states we don't care about follow this field
};
#endif
#pragma pack(pop)
// Fault reason
enum class fault_reason
{
undefined = 0,
data_read,
data_write,
instruction_execute,
illegal_instruction,
breakpoint
};
fault_reason decode_fault_reason(const ucontext_t* uctx);
}
| 1,130
|
C++
|
.h
| 48
| 18.354167
| 88
| 0.634669
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
5,977
|
AArch64JIT.h
|
RPCS3_rpcs3/rpcs3/Emu/CPU/Backends/AArch64/AArch64JIT.h
|
#pragma once
#ifndef ARCH_ARM64
#error "You have included an arm-only header"
#endif
#include "AArch64Common.h"
#include <unordered_set>
namespace aarch64
{
class UASM;
using ASMBlock = UASM;
// On non-x86 architectures GHC runs stackless. SP is treated as a pointer to scratchpad memory.
// This pass keeps this behavior intact while preserving the expectations of the host's C++ ABI.
class GHC_frame_preservation_pass : public translator_pass
{
public:
struct function_info_t
{
u32 instruction_count;
u32 num_external_calls;
u32 stack_frame_size; // Guessing this properly is critical for vector-heavy functions where spilling is a lot more common
bool clobbers_x30;
bool is_leaf;
};
struct instruction_info_t
{
bool is_call_inst; // Is a function call. This includes a branch to external code.
bool preserve_stack; // Preserve the stack around this call.
bool is_returning; // This instruction "returns" to the next instruction (typically just llvm::CallInst*)
bool callee_is_GHC; // The other function is GHC
bool is_tail_call; // Tail call. Assume it is an exit/terminator.
bool is_indirect; // Indirect call. Target is the first operand.
llvm::Function* callee; // Callee if any
std::string callee_name; // Name of the callee.
};
struct config_t
{
bool debug_info = false; // Record debug information
bool use_stack_frames = true; // Allocate a stack frame for each function. The gateway can alternatively manage a global stack to use as scratch.
bool optimize = true; // Optimize instructions when possible. Set to false when debugging.
u32 hypervisor_context_offset = 0; // Offset within the "thread" object where we can find the hypervisor context (registers configured at gateway).
std::function<bool(const std::string&)> exclusion_callback; // [Optional] Callback run on each function before transform. Return "true" to exclude from frame processing.
std::vector<std::pair<std::string, gpr>> base_register_lookup; // [Optional] Function lookup table to determine the location of the "thread" context.
std::vector<std::string> faux_function_list; // [Optional] List of faux block names to treat as untrusted - typically fake functions representing codecaves.
};
protected:
std::unordered_set<std::string> m_visited_functions;
config_t m_config;
void force_tail_call_terminators(llvm::Function& f);
function_info_t preprocess_function(const llvm::Function& f);
instruction_info_t decode_instruction(const llvm::Function& f, const llvm::Instruction* i);
bool is_ret_instruction(const llvm::Instruction* i);
bool is_inlined_call(const llvm::CallInst* ci);
bool is_faux_function(const std::string& function_name);
gpr get_base_register_for_call(const std::string& callee_name, gpr default_reg = gpr::x19);
void process_leaf_function(llvm::IRBuilder<>* irb, llvm::Function& f);
llvm::BasicBlock::iterator patch_tail_call(
llvm::IRBuilder<>* irb,
llvm::Function& f,
llvm::BasicBlock::iterator where,
const instruction_info_t& instruction_info,
const function_info_t& function_info,
const ASMBlock& frame_epilogue);
public:
GHC_frame_preservation_pass(const config_t& configuration);
~GHC_frame_preservation_pass() = default;
void run(llvm::IRBuilder<>* irb, llvm::Function& f) override;
void reset() override;
};
}
| 3,879
|
C++
|
.h
| 69
| 47.043478
| 186
| 0.649604
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
5,978
|
vm_ptr.h
|
RPCS3_rpcs3/rpcs3/Emu/Memory/vm_ptr.h
|
#pragma once
#include "util/types.hpp"
#include "util/to_endian.hpp"
#include "Utilities/StrFmt.h"
#include "vm.h"
class ppu_thread;
struct ppu_func_opd_t;
namespace vm
{
template <typename T, typename AT>
class _ref_base;
// Enables comparison between comparable types of pointers
template<typename T1, typename T2>
concept PtrComparable = requires (T1* t1, T2* t2) { t1 == t2; };
template <typename T, typename AT>
class _ptr_base
{
AT m_addr;
static_assert(!std::is_pointer_v<T>, "vm::_ptr_base<> error: invalid type (pointer)");
static_assert(!std::is_reference_v<T>, "vm::_ptr_base<> error: invalid type (reference)");
public:
using type = T;
using addr_type = std::remove_cv_t<AT>;
ENABLE_BITWISE_SERIALIZATION;
_ptr_base() = default;
_ptr_base(vm::addr_t addr) noexcept
: m_addr(addr)
{
}
addr_type addr() const
{
return m_addr;
}
void set(addr_type addr)
{
this->m_addr = addr;
}
static _ptr_base make(addr_type addr)
{
_ptr_base result;
result.m_addr = addr;
return result;
}
// Enable only the conversions which are originally possible between pointer types
template <typename T2, typename AT2> requires (std::is_convertible_v<T*, T2*>)
operator _ptr_base<T2, AT2>() const noexcept
{
return vm::cast(m_addr);
}
explicit operator bool() const noexcept
{
return m_addr != 0u;
}
// Get vm pointer to a struct member
template <typename MT, typename T2> requires PtrComparable<T, T2>
_ptr_base<MT, u32> ptr(MT T2::*const mptr) const
{
return vm::cast(vm::cast(m_addr) + offset32(mptr));
}
// Get vm pointer to a struct member with array subscription
template <typename MT, typename T2, typename ET = std::remove_extent_t<MT>> requires PtrComparable<T, T2>
_ptr_base<ET, u32> ptr(MT T2::*const mptr, u32 index) const
{
return vm::cast(vm::cast(m_addr) + offset32(mptr) + u32{sizeof(ET)} * index);
}
// Get vm reference to a struct member
template <typename MT, typename T2> requires PtrComparable<T, T2> && (!std::is_void_v<T>)
_ref_base<MT, u32> ref(MT T2::*const mptr) const
{
return vm::cast(vm::cast(m_addr) + offset32(mptr));
}
// Get vm reference to a struct member with array subscription
template <typename MT, typename T2, typename ET = std::remove_extent_t<MT>> requires PtrComparable<T, T2> && (!std::is_void_v<T>)
_ref_base<ET, u32> ref(MT T2::*const mptr, u32 index) const
{
return vm::cast(vm::cast(m_addr) + offset32(mptr) + u32{sizeof(ET)} * index);
}
// Get vm reference
_ref_base<T, u32> ref() const requires (!std::is_void_v<T>)
{
return vm::cast(m_addr);
}
template <bool Strict = false>
T* get_ptr() const
{
if constexpr (Strict)
{
AUDIT(m_addr);
}
return static_cast<T*>(vm::base(vm::cast(m_addr)));
}
T* operator ->() const requires (!std::is_void_v<T>)
{
return get_ptr<true>();
}
std::add_lvalue_reference_t<T> operator *() const requires (!std::is_void_v<T>)
{
return *get_ptr<true>();
}
std::add_lvalue_reference_t<T> operator [](u32 index) const requires (!std::is_void_v<T>)
{
AUDIT(m_addr);
return *static_cast<T*>(vm::base(vm::cast(m_addr) + u32{sizeof(T)} * index));
}
// Test address for arbitrary alignment: (addr & (align - 1)) == 0
bool aligned(u32 align = alignof(T)) const
{
return (m_addr & (align - 1)) == 0u;
}
// Get type size
static constexpr u32 size() noexcept requires (!std::is_void_v<T>)
{
return sizeof(T);
}
// Get type alignment
static constexpr u32 align() noexcept requires (!std::is_void_v<T>)
{
return alignof(T);
}
_ptr_base<T, u32> operator +() const
{
return vm::cast(m_addr);
}
_ptr_base<T, u32> operator +(u32 count) const requires (!std::is_void_v<T>)
{
return vm::cast(vm::cast(m_addr) + count * size());
}
_ptr_base<T, u32> operator -(u32 count) const requires (!std::is_void_v<T>)
{
return vm::cast(vm::cast(m_addr) - count * size());
}
friend _ptr_base<T, u32> operator +(u32 count, const _ptr_base& ptr) requires (!std::is_void_v<T>)
{
return vm::cast(vm::cast(ptr.m_addr) + count * size());
}
// Pointer difference operator
template <typename T2, typename AT2> requires (std::is_object_v<T2> && std::is_same_v<std::decay_t<T>, std::decay_t<T2>>)
s32 operator -(const _ptr_base<T2, AT2>& right) const
{
return static_cast<s32>(vm::cast(m_addr) - vm::cast(right.m_addr)) / size();
}
_ptr_base operator ++(int) requires (!std::is_void_v<T>)
{
_ptr_base result = *this;
m_addr = vm::cast(m_addr) + size();
return result;
}
_ptr_base& operator ++() requires (!std::is_void_v<T>)
{
m_addr = vm::cast(m_addr) + size();
return *this;
}
_ptr_base operator --(int) requires (!std::is_void_v<T>)
{
_ptr_base result = *this;
m_addr = vm::cast(m_addr) - size();
return result;
}
_ptr_base& operator --() requires (!std::is_void_v<T>)
{
m_addr = vm::cast(m_addr) - size();
return *this;
}
_ptr_base& operator +=(s32 count) requires (!std::is_void_v<T>)
{
m_addr = vm::cast(m_addr) + count * size();
return *this;
}
_ptr_base& operator -=(s32 count) requires (!std::is_void_v<T>)
{
m_addr = vm::cast(m_addr) - count * size();
return *this;
}
std::pair<bool, std::conditional_t<std::is_void_v<T>, char, std::remove_const_t<T>>> try_read() const requires (std::is_copy_constructible_v<T>)
{
alignas(sizeof(T) >= 16 ? 16 : 8) char buf[sizeof(T)]{};
const bool ok = vm::try_access(vm::cast(m_addr), buf, sizeof(T), false);
return { ok, std::bit_cast<decltype(try_read().second)>(buf) };
}
bool try_read(std::conditional_t<std::is_void_v<T>, char, std::remove_const_t<T>>& out) const requires (!std::is_void_v<T>)
{
return vm::try_access(vm::cast(m_addr), std::addressof(out), sizeof(T), false);
}
bool try_write(const std::conditional_t<std::is_void_v<T>, char, T>& _in) const requires (!std::is_void_v<T>)
{
return vm::try_access(vm::cast(m_addr), const_cast<T*>(std::addressof(_in)), sizeof(T), true);
}
// Don't use
auto& raw()
{
return m_addr;
}
};
template<typename AT, typename RT, typename... T>
class _ptr_base<RT(T...), AT>
{
AT m_addr;
public:
using addr_type = std::remove_cv_t<AT>;
ENABLE_BITWISE_SERIALIZATION;
_ptr_base() = default;
_ptr_base(vm::addr_t addr)
: m_addr(addr)
{
}
addr_type addr() const
{
return m_addr;
}
void set(addr_type addr)
{
m_addr = addr;
}
static _ptr_base make(addr_type addr)
{
_ptr_base result;
result.m_addr = addr;
return result;
}
// Conversion to another function pointer
template<typename AT2>
operator _ptr_base<RT(T...), AT2>() const
{
return vm::cast(m_addr);
}
explicit operator bool() const
{
return m_addr != 0u;
}
_ptr_base<RT(T...), u32> operator +() const
{
return vm::cast(m_addr);
}
// Don't use
auto& raw()
{
return m_addr;
}
// Callback; defined in PPUCallback.h, passing context is mandatory
RT operator()(ppu_thread& ppu, T... args) const;
const ppu_func_opd_t& opd() const;
};
template<typename AT, typename RT, typename... T>
class _ptr_base<RT(*)(T...), AT>
{
static_assert(!sizeof(AT), "vm::_ptr_base<> error: use RT(T...) format for functions instead of RT(*)(T...)");
};
// Native endianness pointer to LE data
template<typename T, typename AT = u32> using ptrl = _ptr_base<to_le_t<T>, AT>;
template<typename T, typename AT = u32> using cptrl = ptrl<const T, AT>;
// Native endianness pointer to BE data
template<typename T, typename AT = u32> using ptrb = _ptr_base<to_be_t<T>, AT>;
template<typename T, typename AT = u32> using cptrb = ptrb<const T, AT>;
// BE pointer to LE data
template<typename T, typename AT = u32> using bptrl = _ptr_base<to_le_t<T>, to_be_t<AT>>;
// BE pointer to BE data
template<typename T, typename AT = u32> using bptrb = _ptr_base<to_be_t<T>, to_be_t<AT>>;
// LE pointer to LE data
template<typename T, typename AT = u32> using lptrl = _ptr_base<to_le_t<T>, to_le_t<AT>>;
// LE pointer to BE data
template<typename T, typename AT = u32> using lptrb = _ptr_base<to_be_t<T>, to_le_t<AT>>;
inline namespace ps3_
{
// Default pointer type for PS3 HLE functions (Native endianness pointer to BE data)
template<typename T, typename AT = u32> using ptr = ptrb<T, AT>;
template<typename T, typename AT = u32> using cptr = ptr<const T, AT>;
// Default pointer to pointer type for PS3 HLE functions (Native endianness pointer to BE pointer to BE data)
template<typename T, typename AT = u32, typename AT2 = u32> using pptr = ptr<ptr<T, AT2>, AT>;
template<typename T, typename AT = u32, typename AT2 = u32> using cpptr = pptr<const T, AT, AT2>;
// Default pointer type for PS3 HLE structures (BE pointer to BE data)
template<typename T, typename AT = u32> using bptr = bptrb<T, AT>;
template<typename T, typename AT = u32> using bcptr = bptr<const T, AT>;
// Default pointer to pointer type for PS3 HLE structures (BE pointer to BE pointer to BE data)
template<typename T, typename AT = u32, typename AT2 = u32> using bpptr = bptr<ptr<T, AT2>, AT>;
template<typename T, typename AT = u32, typename AT2 = u32> using bcpptr = bpptr<const T, AT, AT2>;
// Perform static_cast (for example, vm::ptr<void> to vm::ptr<char>)
template<typename CT, typename T, typename AT, typename = decltype(static_cast<to_be_t<CT>*>(std::declval<T*>()))>
inline _ptr_base<to_be_t<CT>, u32> static_ptr_cast(const _ptr_base<T, AT>& other)
{
return vm::cast(other.addr());
}
// Perform const_cast (for example, vm::cptr<char> to vm::ptr<char>)
template<typename CT, typename T, typename AT, typename = decltype(const_cast<to_be_t<CT>*>(std::declval<T*>()))>
inline _ptr_base<to_be_t<CT>, u32> const_ptr_cast(const _ptr_base<T, AT>& other)
{
return vm::cast(other.addr());
}
// Perform reinterpret cast
template <typename CT, typename T, typename AT, typename = decltype(reinterpret_cast<to_be_t<CT>*>(std::declval<T*>()))>
inline _ptr_base<to_be_t<CT>, u32> unsafe_ptr_cast(const _ptr_base<T, AT>& other)
{
return vm::cast(other.addr());
}
}
struct null_t
{
template<typename T, typename AT>
operator _ptr_base<T, AT>() const
{
return _ptr_base<T, AT>{};
}
template<typename T, typename AT>
constexpr bool operator ==(const _ptr_base<T, AT>& ptr) const
{
return !ptr;
}
template<typename T, typename AT>
constexpr bool operator <(const _ptr_base<T, AT>& ptr) const
{
return 0 < ptr.addr();
}
};
// Null pointer convertible to any vm::ptr* type
constexpr null_t null{};
}
template<typename T1, typename AT1, typename T2, typename AT2> requires vm::PtrComparable<T1, T2>
inline bool operator ==(const vm::_ptr_base<T1, AT1>& left, const vm::_ptr_base<T2, AT2>& right)
{
return left.addr() == right.addr();
}
template<typename T1, typename AT1, typename T2, typename AT2> requires vm::PtrComparable<T1, T2>
inline bool operator <(const vm::_ptr_base<T1, AT1>& left, const vm::_ptr_base<T2, AT2>& right)
{
return left.addr() < right.addr();
}
template<typename T1, typename AT1, typename T2, typename AT2> requires vm::PtrComparable<T1, T2>
inline bool operator <=(const vm::_ptr_base<T1, AT1>& left, const vm::_ptr_base<T2, AT2>& right)
{
return left.addr() <= right.addr();
}
template<typename T1, typename AT1, typename T2, typename AT2> requires vm::PtrComparable<T1, T2>
inline bool operator >(const vm::_ptr_base<T1, AT1>& left, const vm::_ptr_base<T2, AT2>& right)
{
return left.addr() > right.addr();
}
template<typename T1, typename AT1, typename T2, typename AT2> requires vm::PtrComparable<T1, T2>
inline bool operator >=(const vm::_ptr_base<T1, AT1>& left, const vm::_ptr_base<T2, AT2>& right)
{
return left.addr() >= right.addr();
}
// Change AT endianness to BE/LE
template<typename T, typename AT, bool Se>
struct to_se<vm::_ptr_base<T, AT>, Se>
{
using type = vm::_ptr_base<T, typename to_se<AT, Se>::type>;
};
// Format pointer
template<typename T, typename AT>
struct fmt_unveil<vm::_ptr_base<T, AT>, void>
{
using type = vm::_ptr_base<T, u32>; // Use only T, ignoring AT
static inline auto get(const vm::_ptr_base<T, AT>& arg)
{
return fmt_unveil<AT>::get(arg.addr());
}
};
template <>
struct fmt_class_string<vm::_ptr_base<const void, u32>, void>
{
static void format(std::string& out, u64 arg);
};
template <typename T>
struct fmt_class_string<vm::_ptr_base<T, u32>, void> : fmt_class_string<vm::_ptr_base<const void, u32>, void>
{
// Classify all pointers as const void*
};
template <>
struct fmt_class_string<vm::_ptr_base<const char, u32>, void>
{
static void format(std::string& out, u64 arg);
};
template <>
struct fmt_class_string<vm::_ptr_base<char, u32>, void> : fmt_class_string<vm::_ptr_base<const char, u32>>
{
// Classify char* as const char*
};
template <usz Size>
struct fmt_class_string<vm::_ptr_base<const char[Size], u32>, void> : fmt_class_string<vm::_ptr_base<const char, u32>>
{
// Classify const char[] as const char*
};
template <usz Size>
struct fmt_class_string<vm::_ptr_base<char[Size], u32>, void> : fmt_class_string<vm::_ptr_base<const char, u32>>
{
// Classify char[] as const char*
};
| 13,288
|
C++
|
.h
| 386
| 31.502591
| 146
| 0.666615
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
5,979
|
vm_var.h
|
RPCS3_rpcs3/rpcs3/Emu/Memory/vm_var.h
|
#pragma once
#include "vm_ptr.h"
#include "util/to_endian.hpp"
namespace vm
{
template <memory_location_t Location = vm::main>
struct page_allocator
{
static inline std::pair<vm::addr_t, u32> alloc(u32 size, u32 align)
{
return {vm::cast(vm::alloc(size, Location, std::max<u32>(align, 0x10000))), size};
}
static inline void dealloc(u32 addr, u32 size) noexcept
{
ensure(vm::dealloc(addr, Location) >= size);
}
};
template <typename T>
struct stack_allocator
{
static inline std::pair<vm::addr_t, u32> alloc(u32 size, u32 align)
{
return T::stack_push(size, align);
}
static inline void dealloc(u32 addr, u32 size) noexcept
{
T::stack_pop_verbose(addr, size);
}
};
// General variable base class
template <typename T, typename A>
class _var_base final : public _ptr_base<T, const u32>
{
using pointer = _ptr_base<T, const u32>;
const u32 m_mem_size;
_var_base(std::pair<vm::addr_t, u32> alloc_info)
: pointer(alloc_info.first)
, m_mem_size(alloc_info.second)
{
}
public:
// Unmoveable object
_var_base(const _var_base&) = delete;
_var_base& operator=(const _var_base&) = delete;
using enable_bitcopy = std::false_type; // Disable bitcopy inheritence
_var_base()
: _var_base(A::alloc(sizeof(T), alignof(T)))
{
}
_var_base(const T& right)
: _var_base()
{
std::memcpy(pointer::get_ptr(), &right, sizeof(T));
}
~_var_base()
{
if (pointer::addr())
{
A::dealloc(pointer::addr(), m_mem_size);
}
}
};
// Dynamic length array variable specialization
template <typename T, typename A>
class _var_base<T[], A> final : public _ptr_base<T, const u32>
{
using pointer = _ptr_base<T, const u32>;
const u32 m_mem_size;
const u32 m_size;
_var_base(u32 count, std::pair<vm::addr_t, u32> alloc_info)
: pointer(alloc_info.first)
, m_mem_size(alloc_info.second)
, m_size(u32{sizeof(T)} * count)
{
}
public:
_var_base(const _var_base&) = delete;
_var_base& operator=(const _var_base&) = delete;
using enable_bitcopy = std::false_type; // Disable bitcopy inheritence
_var_base(u32 count)
: _var_base(count, A::alloc(u32{sizeof(T)} * count, alignof(T)))
{
}
// Initialize via the iterator
template <typename I>
_var_base(u32 count, I&& it)
: _var_base(count)
{
std::copy_n(std::forward<I>(it), count, pointer::get_ptr());
}
~_var_base()
{
if (pointer::addr())
{
A::dealloc(pointer::addr(), m_mem_size);
}
}
// Remove operator ->
T* operator->() const = delete;
u32 get_count() const
{
return m_size / u32{sizeof(T)};
}
auto begin() const
{
return *this + 0;
}
auto end() const
{
return *this + get_count();
}
};
// LE variable
template <typename T, typename A>
using varl = _var_base<to_le_t<T>, A>;
// BE variable
template <typename T, typename A>
using varb = _var_base<to_be_t<T>, A>;
inline namespace ps3_
{
// BE variable
template <typename T, typename A = stack_allocator<ppu_thread>>
using var = varb<T, A>;
// Make BE variable initialized from value
template <typename T, typename A = stack_allocator<ppu_thread>>
[[nodiscard]] auto make_var(const T& value)
{
return (varb<T, A>(value));
}
// Make char[] variable initialized from std::string
template <typename A = stack_allocator<ppu_thread>>
[[nodiscard]] auto make_str(const std::string& str)
{
return (_var_base<char[], A>(size32(str) + 1, str.c_str()));
}
// Global HLE variable
template <typename T, uint Count = 1>
struct gvar final : ptr<T>
{
static constexpr u32 alloc_size{sizeof(T) * Count};
static constexpr u32 alloc_align{std::max<u32>(alignof(T), 16)};
};
} // namespace ps3_
} // namespace vm
| 3,769
|
C++
|
.h
| 144
| 22.916667
| 85
| 0.654414
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
5,980
|
vm_ref.h
|
RPCS3_rpcs3/rpcs3/Emu/Memory/vm_ref.h
|
#pragma once
#include <type_traits>
#include "vm.h"
#include "util/to_endian.hpp"
#ifndef _MSC_VER
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Weffc++"
#endif
namespace vm
{
template <typename T, typename AT>
class _ptr_base;
template <typename T, typename AT>
class _ref_base
{
AT m_addr;
static_assert(!std::is_pointer_v<T>, "vm::_ref_base<> error: invalid type (pointer)");
static_assert(!std::is_reference_v<T>, "vm::_ref_base<> error: invalid type (reference)");
static_assert(!std::is_function_v<T>, "vm::_ref_base<> error: invalid type (function)");
static_assert(!std::is_void_v<T>, "vm::_ref_base<> error: invalid type (void)");
public:
using type = T;
using addr_type = std::remove_cv_t<AT>;
_ref_base(const _ref_base&) = default;
_ref_base(vm::addr_t addr)
: m_addr(addr)
{
}
addr_type addr() const
{
return m_addr;
}
T& get_ref() const
{
return *static_cast<T*>(vm::base(vm::cast(m_addr)));
}
// convert to vm pointer
vm::_ptr_base<T, u32> ptr() const
{
return vm::cast(m_addr);
}
operator std::common_type_t<T>() const
{
return get_ref();
}
operator T&() const
{
return get_ref();
}
T& operator =(const _ref_base& right)
{
return get_ref() = right.get_ref();
}
T& operator =(const std::common_type_t<T>& right) const
{
return get_ref() = right;
}
decltype(auto) operator ++(int) const
{
return get_ref()++;
}
decltype(auto) operator ++() const
{
return ++get_ref();
}
decltype(auto) operator --(int) const
{
return get_ref()--;
}
decltype(auto) operator --() const
{
return --get_ref();
}
template<typename T2>
decltype(auto) operator +=(const T2& right)
{
return get_ref() += right;
}
template<typename T2>
decltype(auto) operator -=(const T2& right)
{
return get_ref() -= right;
}
template<typename T2>
decltype(auto) operator *=(const T2& right)
{
return get_ref() *= right;
}
template<typename T2>
decltype(auto) operator /=(const T2& right)
{
return get_ref() /= right;
}
template<typename T2>
decltype(auto) operator %=(const T2& right)
{
return get_ref() %= right;
}
template<typename T2>
decltype(auto) operator &=(const T2& right)
{
return get_ref() &= right;
}
template<typename T2>
decltype(auto) operator |=(const T2& right)
{
return get_ref() |= right;
}
template<typename T2>
decltype(auto) operator ^=(const T2& right)
{
return get_ref() ^= right;
}
template<typename T2>
decltype(auto) operator <<=(const T2& right)
{
return get_ref() <<= right;
}
template<typename T2>
decltype(auto) operator >>=(const T2& right)
{
return get_ref() >>= right;
}
};
// Native endianness reference to LE data
template<typename T, typename AT = u32> using refl = _ref_base<to_le_t<T>, AT>;
// Native endianness reference to BE data
template<typename T, typename AT = u32> using refb = _ref_base<to_be_t<T>, AT>;
// BE reference to LE data
template<typename T, typename AT = u32> using brefl = _ref_base<to_le_t<T>, to_be_t<AT>>;
// BE reference to BE data
template<typename T, typename AT = u32> using brefb = _ref_base<to_be_t<T>, to_be_t<AT>>;
// LE reference to LE data
template<typename T, typename AT = u32> using lrefl = _ref_base<to_le_t<T>, to_le_t<AT>>;
// LE reference to BE data
template<typename T, typename AT = u32> using lrefb = _ref_base<to_be_t<T>, to_le_t<AT>>;
inline namespace ps3_
{
// default reference for PS3 HLE functions (Native endianness reference to BE data)
template<typename T, typename AT = u32> using ref = refb<T, AT>;
// default reference for PS3 HLE structures (BE reference to BE data)
template<typename T, typename AT = u32> using bref = brefb<T, AT>;
}
}
#ifndef _MSC_VER
#pragma GCC diagnostic pop
#endif
// Change AT endianness to BE/LE
template<typename T, typename AT, bool Se>
struct to_se<vm::_ref_base<T, AT>, Se>
{
using type = vm::_ref_base<T, typename to_se<AT, Se>::type>;
};
// Forbid formatting
template<typename T, typename AT>
struct fmt_unveil<vm::_ref_base<T, AT>, void>
{
static_assert(!sizeof(T), "vm::_ref_base<>: ambiguous format argument");
};
| 4,242
|
C++
|
.h
| 159
| 23.735849
| 92
| 0.664028
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
5,981
|
vm_reservation.h
|
RPCS3_rpcs3/rpcs3/Emu/Memory/vm_reservation.h
|
#pragma once
#include "vm.h"
#include "vm_locking.h"
#include "util/atomic.hpp"
#include "util/tsc.hpp"
#include <functional>
extern bool g_use_rtm;
extern u64 g_rtm_tx_limit2;
#ifdef _MSC_VER
extern "C"
{
u32 _xbegin();
void _xend();
}
#endif
namespace vm
{
enum : u64
{
rsrv_lock_mask = 127,
rsrv_unique_lock = 64,
rsrv_putunc_flag = 32,
};
// Get reservation status for further atomic update: last update timestamp
inline atomic_t<u64>& reservation_acquire(u32 addr)
{
// Access reservation info: stamp and the lock bit
return *reinterpret_cast<atomic_t<u64>*>(g_reservations + (addr & 0xff80) / 2);
}
// Update reservation status
void reservation_update(u32 addr);
struct reservation_waiter_t
{
u32 wait_flag = 0;
u8 waiters_count = 0;
u8 waiters_index = 0;
};
static inline std::pair<atomic_t<reservation_waiter_t>*, atomic_t<reservation_waiter_t>*> reservation_notifier(u32 raddr)
{
extern std::array<atomic_t<reservation_waiter_t>, 512> g_resrv_waiters_count;
// Storage efficient method to distinguish different nearby addresses (which are likely)
const usz index = std::popcount(raddr & -512) + ((raddr / 128) % 4) * 32;
auto& waiter = g_resrv_waiters_count[index * 4];
return { &g_resrv_waiters_count[index * 4 + waiter.load().waiters_index % 4], &waiter };
}
static inline u32 reservation_notifier_count(u32 raddr)
{
return reservation_notifier(raddr).first->load().waiters_count;
}
static inline void reservation_notifier_end_wait(atomic_t<reservation_waiter_t>& waiter)
{
waiter.atomic_op([](reservation_waiter_t& value)
{
if (value.waiters_count-- == 1)
{
value.wait_flag = 0;
}
});
}
static inline atomic_t<reservation_waiter_t>* reservation_notifier_begin_wait(u32 raddr, u64 rtime)
{
atomic_t<reservation_waiter_t>& waiter = *reservation_notifier(raddr).first;
waiter.atomic_op([](reservation_waiter_t& value)
{
value.wait_flag = 1;
value.waiters_count++;
});
if ((reservation_acquire(raddr) & -128) != rtime)
{
reservation_notifier_end_wait(waiter);
return nullptr;
}
return &waiter;
}
static inline atomic_t<u32>* reservation_notifier_notify(u32 raddr, bool pospone = false)
{
const auto notifiers = reservation_notifier(raddr);
if (notifiers.first->load().wait_flag)
{
if (notifiers.first == notifiers.second)
{
if (!notifiers.first->fetch_op([](reservation_waiter_t& value)
{
if (value.waiters_index == 0)
{
value.wait_flag = 0;
value.waiters_count = 0;
value.waiters_index++;
return true;
}
return false;
}).second)
{
return nullptr;
}
}
else
{
u8 old_index = static_cast<u8>(notifiers.first - notifiers.second);
if (!atomic_storage<u8>::compare_exchange(notifiers.second->raw().waiters_index, old_index, (old_index + 1) % 4))
{
return nullptr;
}
notifiers.first->release(reservation_waiter_t{});
}
if (pospone)
{
return utils::bless<atomic_t<u32>>(¬ifiers.first->raw().wait_flag);
}
utils::bless<atomic_t<u32>>(¬ifiers.first->raw().wait_flag)->notify_all();
}
return nullptr;
}
u64 reservation_lock_internal(u32, atomic_t<u64>&);
void reservation_shared_lock_internal(atomic_t<u64>&);
inline bool reservation_try_lock(atomic_t<u64>& res, u64 rtime)
{
if (res.compare_and_swap_test(rtime, rtime | rsrv_unique_lock)) [[likely]]
{
return true;
}
return false;
}
inline std::pair<atomic_t<u64>&, u64> reservation_lock(u32 addr)
{
auto res = &vm::reservation_acquire(addr);
auto rtime = res->load();
if (rtime & 127 || !reservation_try_lock(*res, rtime)) [[unlikely]]
{
static atomic_t<u64> no_lock{};
rtime = reservation_lock_internal(addr, *res);
if (rtime == umax)
{
res = &no_lock;
}
}
return {*res, rtime};
}
// TODO: remove and make it external
void reservation_op_internal(u32 addr, std::function<bool()> func);
template <bool Ack = false, typename CPU, typename T, typename AT = u32, typename F>
inline SAFE_BUFFERS(auto) reservation_op(CPU& cpu, _ptr_base<T, AT> ptr, F op)
{
// Atomic operation will be performed on aligned 128 bytes of data, so the data size and alignment must comply
static_assert(sizeof(T) <= 128 && alignof(T) == sizeof(T), "vm::reservation_op: unsupported type");
static_assert(std::is_trivially_copyable_v<T>, "vm::reservation_op: not triv copyable (optimization)");
// Use "super" pointer to prevent access violation handling during atomic op
const auto sptr = vm::get_super_ptr<T>(static_cast<u32>(ptr.addr()));
// Prefetch some data
//_m_prefetchw(sptr);
//_m_prefetchw(reinterpret_cast<char*>(sptr) + 64);
// Use 128-byte aligned addr
const u32 addr = static_cast<u32>(ptr.addr()) & -128;
auto& res = vm::reservation_acquire(addr);
//_m_prefetchw(&res);
#if defined(ARCH_X64)
if (g_use_rtm)
{
// Stage 1: single optimistic transaction attempt
unsigned status = -1;
u64 _old = 0;
auto stamp0 = utils::get_tsc(), stamp1 = stamp0, stamp2 = stamp0;
#ifndef _MSC_VER
__asm__ goto ("xbegin %l[stage2];" ::: "memory" : stage2);
#else
status = _xbegin();
if (status == umax)
#endif
{
if (res & rsrv_unique_lock)
{
#ifndef _MSC_VER
__asm__ volatile ("xend; mov $-1, %%eax;" ::: "memory");
#else
_xend();
#endif
goto stage2;
}
if constexpr (std::is_void_v<std::invoke_result_t<F, T&>>)
{
std::invoke(op, *sptr);
res += 128;
#ifndef _MSC_VER
__asm__ volatile ("xend;" ::: "memory");
#else
_xend();
#endif
if constexpr (Ack)
res.notify_all();
return;
}
else
{
if (auto result = std::invoke(op, *sptr))
{
res += 128;
#ifndef _MSC_VER
__asm__ volatile ("xend;" ::: "memory");
#else
_xend();
#endif
if constexpr (Ack)
res.notify_all();
return result;
}
else
{
#ifndef _MSC_VER
__asm__ volatile ("xend;" ::: "memory");
#else
_xend();
#endif
return result;
}
}
}
stage2:
#ifndef _MSC_VER
__asm__ volatile ("mov %%eax, %0;" : "=r" (status) :: "memory");
#endif
stamp1 = utils::get_tsc();
// Stage 2: try to lock reservation first
_old = res.fetch_add(1);
// Compute stamps excluding memory touch
stamp2 = utils::get_tsc() - (stamp1 - stamp0);
// Start lightened transaction
for (; !(_old & vm::rsrv_unique_lock) && stamp2 - stamp0 <= g_rtm_tx_limit2; stamp2 = utils::get_tsc())
{
if (cpu.has_pause_flag())
{
break;
}
#ifndef _MSC_VER
__asm__ goto ("xbegin %l[retry];" ::: "memory" : retry);
#else
status = _xbegin();
if (status != umax) [[unlikely]]
{
goto retry;
}
#endif
if constexpr (std::is_void_v<std::invoke_result_t<F, T&>>)
{
std::invoke(op, *sptr);
#ifndef _MSC_VER
__asm__ volatile ("xend;" ::: "memory");
#else
_xend();
#endif
res += 127;
if (Ack)
res.notify_all();
return;
}
else
{
if (auto result = std::invoke(op, *sptr))
{
#ifndef _MSC_VER
__asm__ volatile ("xend;" ::: "memory");
#else
_xend();
#endif
res += 127;
if (Ack)
res.notify_all();
return result;
}
else
{
#ifndef _MSC_VER
__asm__ volatile ("xend;" ::: "memory");
#else
_xend();
#endif
return result;
}
}
retry:
#ifndef _MSC_VER
__asm__ volatile ("mov %%eax, %0;" : "=r" (status) :: "memory");
#endif
if (!status)
{
break;
}
}
// Stage 3: all failed, heavyweight fallback (see comments at the bottom)
if constexpr (std::is_void_v<std::invoke_result_t<F, T&>>)
{
vm::reservation_op_internal(addr, [&]
{
std::invoke(op, *sptr);
return true;
});
if constexpr (Ack)
res.notify_all();
return;
}
else
{
auto result = std::invoke_result_t<F, T&>();
vm::reservation_op_internal(addr, [&]
{
if ((result = std::invoke(op, *sptr)))
{
return true;
}
else
{
return false;
}
});
if (Ack && result)
res.notify_all();
return result;
}
}
#else
static_cast<void>(cpu);
#endif /* ARCH_X64 */
// Lock reservation and perform heavyweight lock
reservation_shared_lock_internal(res);
if constexpr (std::is_void_v<std::invoke_result_t<F, T&>>)
{
{
vm::writer_lock lock(addr);
std::invoke(op, *sptr);
res += 127;
}
if constexpr (Ack)
res.notify_all();
return;
}
else
{
auto result = std::invoke_result_t<F, T&>();
{
vm::writer_lock lock(addr);
if ((result = std::invoke(op, *sptr)))
{
res += 127;
}
else
{
res -= 1;
}
}
if (Ack && result)
res.notify_all();
return result;
}
}
// For internal usage
[[noreturn]] void reservation_escape_internal();
// Read memory value in pseudo-atomic manner
template <typename CPU, typename T, typename AT = u32, typename F>
inline SAFE_BUFFERS(auto) peek_op(CPU&& cpu, _ptr_base<T, AT> ptr, F op)
{
// Atomic operation will be performed on aligned 128 bytes of data, so the data size and alignment must comply
static_assert(sizeof(T) <= 128 && alignof(T) == sizeof(T), "vm::peek_op: unsupported type");
// Use 128-byte aligned addr
const u32 addr = static_cast<u32>(ptr.addr()) & -128;
while (true)
{
if constexpr (std::is_class_v<std::remove_cvref_t<CPU>>)
{
if (cpu.test_stopped())
{
reservation_escape_internal();
}
}
const u64 rtime = vm::reservation_acquire(addr);
if (rtime & 127)
{
continue;
}
// Observe data non-atomically and make sure no reservation updates were made
if constexpr (std::is_void_v<std::invoke_result_t<F, const T&>>)
{
std::invoke(op, *ptr);
if (rtime == vm::reservation_acquire(addr))
{
return;
}
}
else
{
auto res = std::invoke(op, *ptr);
if (rtime == vm::reservation_acquire(addr))
{
return res;
}
}
}
}
template <bool Ack = false, typename T, typename F>
inline SAFE_BUFFERS(auto) light_op(T& data, F op)
{
// Optimized real ptr -> vm ptr conversion, simply UB if out of range
const u32 addr = static_cast<u32>(reinterpret_cast<const u8*>(&data) - g_base_addr);
// Use "super" pointer to prevent access violation handling during atomic op
const auto sptr = vm::get_super_ptr<T>(addr);
// "Lock" reservation
auto& res = vm::reservation_acquire(addr);
auto [_old, _ok] = res.fetch_op([&](u64& r)
{
if (r & vm::rsrv_unique_lock)
{
return false;
}
r += 1;
return true;
});
if (!_ok) [[unlikely]]
{
vm::reservation_shared_lock_internal(res);
}
if constexpr (std::is_void_v<std::invoke_result_t<F, T&>>)
{
std::invoke(op, *sptr);
res += 127;
if constexpr (Ack)
{
res.notify_all();
}
}
else
{
auto result = std::invoke(op, *sptr);
res += 127;
if constexpr (Ack)
{
res.notify_all();
}
return result;
}
}
template <bool Ack = false, typename T, typename F>
inline SAFE_BUFFERS(auto) atomic_op(T& data, F op)
{
return light_op<Ack, T>(data, [&](T& data)
{
return data.atomic_op(op);
});
}
template <bool Ack = false, typename T, typename F>
inline SAFE_BUFFERS(auto) fetch_op(T& data, F op)
{
return light_op<Ack, T>(data, [&](T& data)
{
return data.fetch_op(op);
});
}
} // namespace vm
| 11,473
|
C++
|
.h
| 455
| 21.279121
| 122
| 0.626931
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
5,982
|
vm_locking.h
|
RPCS3_rpcs3/rpcs3/Emu/Memory/vm_locking.h
|
#pragma once
#include "vm.h"
#include "Emu/RSX/rsx_utils.h"
class cpu_thread;
class shared_mutex;
namespace vm
{
extern thread_local atomic_t<cpu_thread*>* g_tls_locked;
enum range_lock_flags : u64
{
/* flags (3 bits, W + R + Reserved) */
range_writable = 4ull << 61,
range_readable = 2ull << 61,
range_reserved = 1ull << 61,
range_full_mask = 7ull << 61,
/* flag combinations with special meaning */
range_locked = 4ull << 61, // R+W as well, but being exclusively accessed (size extends addr)
range_allocation = 0, // Allocation, no safe access, g_shmem may change at ANY location
range_pos = 61,
range_bits = 3,
};
extern atomic_t<u64, 64> g_range_lock_bits[2];
extern atomic_t<u64> g_shmem[];
// Register reader
void passive_lock(cpu_thread& cpu);
// Register range lock for further use
atomic_t<u64, 64>* alloc_range_lock();
void range_lock_internal(atomic_t<u64, 64>* range_lock, u32 begin, u32 size);
// Lock memory range ignoring memory protection (Size!=0 also implies aligned begin)
template <uint Size = 0>
FORCE_INLINE void range_lock(atomic_t<u64, 64>* range_lock, u32 begin, u32 _size)
{
if constexpr (Size == 0)
{
if (begin >> 28 == rsx::constants::local_mem_base >> 28)
{
return;
}
}
// Optimistic locking.
// Note that we store the range we will be accessing, without any clamping.
range_lock->store(begin | (u64{_size} << 32));
// Old-style conditional constexpr
const u32 size = Size ? Size : _size;
if (Size == 1 || (begin % 4096 + size % 4096) / 4096 == 0 ? !vm::check_addr(begin) : !vm::check_addr(begin, vm::page_readable, size))
{
range_lock->release(0);
range_lock_internal(range_lock, begin, _size);
return;
}
#ifndef _MSC_VER
__asm__(""); // Tiny barrier
#endif
if (!g_range_lock_bits[1]) [[likely]]
{
return;
}
// Fallback to slow path
range_lock_internal(range_lock, begin, size);
}
// Release it
void free_range_lock(atomic_t<u64, 64>*) noexcept;
// Unregister reader
void passive_unlock(cpu_thread& cpu);
// Optimization (set cpu_flag::memory)
bool temporary_unlock(cpu_thread& cpu) noexcept;
void temporary_unlock() noexcept;
struct writer_lock final
{
atomic_t<u64, 64>* range_lock;
writer_lock(const writer_lock&) = delete;
writer_lock& operator=(const writer_lock&) = delete;
writer_lock() noexcept;
writer_lock(u32 addr, atomic_t<u64, 64>* range_lock = nullptr, u32 size = 128, u64 flags = range_locked) noexcept;
~writer_lock() noexcept;
};
} // namespace vm
| 2,542
|
C++
|
.h
| 77
| 30.116883
| 135
| 0.684836
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
5,983
|
vm.h
|
RPCS3_rpcs3/rpcs3/Emu/Memory/vm.h
|
#pragma once
#include <memory>
#include <map>
#include "util/types.hpp"
#include "util/atomic.hpp"
#include "util/auto_typemap.hpp"
#include "Utilities/StrFmt.h"
#include "util/to_endian.hpp"
namespace utils
{
class shm;
class address_range;
}
namespace vm
{
extern u8* const g_base_addr;
extern u8* const g_sudo_addr;
extern u8* const g_exec_addr;
extern u8* const g_stat_addr;
extern u8* const g_free_addr;
extern u8 g_reservations[65536 / 128 * 64];
struct writer_lock;
enum memory_location_t : uint
{
main,
user64k,
user1m,
rsx_context,
video,
stack,
spu,
memory_location_max,
any = 0xffffffff,
};
enum page_info_t : u8
{
page_readable = (1 << 0),
page_writable = (1 << 1),
page_executable = (1 << 2),
page_fault_notification = (1 << 3),
page_no_reservations = (1 << 4),
page_64k_size = (1 << 5),
page_1m_size = (1 << 6),
page_allocated = (1 << 7),
};
// Address type
enum addr_t : u32 {};
// Page information
using memory_page = atomic_t<u8>;
// Change memory protection of specified memory region
bool page_protect(u32 addr, u32 size, u8 flags_test = 0, u8 flags_set = 0, u8 flags_clear = 0);
// Check flags for specified memory range (unsafe)
bool check_addr(u32 addr, u8 flags, u32 size);
template <u32 Size = 1>
bool check_addr(u32 addr, u8 flags = page_readable)
{
extern std::array<memory_page, 0x100000000 / 4096> g_pages;
if (Size - 1 >= 4095u || Size & (Size - 1) || addr % Size)
{
// TODO
return check_addr(addr, flags, Size);
}
return !(~g_pages[addr / 4096] & (flags | page_allocated));
}
// Read string in a safe manner (page aware) (bool true = if null-termination)
bool read_string(u32 addr, u32 max_size, std::string& out_string, bool check_pages = true) noexcept;
// Search and map memory in specified memory location (min alignment is 0x10000)
u32 alloc(u32 size, memory_location_t location, u32 align = 0x10000);
// Map memory at specified address (in optionally specified memory location)
bool falloc(u32 addr, u32 size, memory_location_t location = any, const std::shared_ptr<utils::shm>* src = nullptr);
// Unmap memory at specified address (in optionally specified memory location), return size
u32 dealloc(u32 addr, memory_location_t location = any, const std::shared_ptr<utils::shm>* src = nullptr);
// utils::memory_lock wrapper for locking sudo memory
void lock_sudo(u32 addr, u32 size);
enum block_flags_3
{
page_size_4k = 0x100, // SYS_MEMORY_PAGE_SIZE_4K
page_size_64k = 0x200, // SYS_MEMORY_PAGE_SIZE_64K
page_size_1m = 0x400, // SYS_MEMORY_PAGE_SIZE_1M
page_size_mask = 0xF00, // SYS_MEMORY_PAGE_SIZE_MASK
stack_guarded = 0x10,
preallocated = 0x20, // nonshareable
bf0_0x1 = 0x1, // TODO: document
bf0_0x2 = 0x2, // TODO: document
bf0_mask = bf0_0x1 | bf0_0x2,
};
enum alloc_flags
{
alloc_hidden = 0x1000,
alloc_unwritable = 0x2000,
alloc_executable = 0x4000,
alloc_prot_mask = alloc_hidden | alloc_unwritable | alloc_executable,
};
// Object that handles memory allocations inside specific constant bounds ("location")
class block_t final
{
auto_typemap<block_t> m;
// Common mapped region for special cases
std::shared_ptr<utils::shm> m_common;
atomic_t<u64> m_id = 0;
bool try_alloc(u32 addr, u64 bflags, u32 size, std::shared_ptr<utils::shm>&&) const;
// Unmap block
bool unmap(std::vector<std::pair<u64, u64>>* unmapped = nullptr);
friend bool _unmap_block(const std::shared_ptr<block_t>&, std::vector<std::pair<u64, u64>>* unmapped);
public:
block_t(u32 addr, u32 size, u64 flags);
~block_t();
public:
const u32 addr; // Start address
const u32 size; // Total size
const u64 flags; // Byte 0xF000: block_flags_3
// Byte 0x0F00: block_flags_2_page_size (SYS_MEMORY_PAGE_SIZE_*)
// Byte 0x00F0: block_flags_1
// Byte 0x000F: block_flags_0
// Search and map memory (min alignment is 0x10000)
u32 alloc(u32 size, const std::shared_ptr<utils::shm>* = nullptr, u32 align = 0x10000, u64 flags = 0);
// Try to map memory at fixed location
bool falloc(u32 addr, u32 size, const std::shared_ptr<utils::shm>* = nullptr, u64 flags = 0);
// Unmap memory at specified location previously returned by alloc(), return size
u32 dealloc(u32 addr, const std::shared_ptr<utils::shm>* = nullptr) const;
// Get memory at specified address (if size = 0, addr assumed exact)
std::pair<u32, std::shared_ptr<utils::shm>> peek(u32 addr, u32 size = 0) const;
// Get allocated memory count
u32 used();
// Internal
u32 imp_used(const vm::writer_lock&) const;
// Returns 0 if invalid, none-zero unique id if valid
u64 is_valid() const
{
return m_id;
}
// Serialization helper for shared memory
void get_shared_memory(std::vector<std::pair<utils::shm*, u32>>& shared);
// Returns sample address for shared memory, 0 on failure
u32 get_shm_addr(const std::shared_ptr<utils::shm>& shared);
// Serialization
void save(utils::serial& ar, std::map<utils::shm*, usz>& shared);
block_t(utils::serial& ar, std::vector<std::shared_ptr<utils::shm>>& shared);
};
// Create new memory block with specified parameters and return it
std::shared_ptr<block_t> map(u32 addr, u32 size, u64 flags = 0);
// Create new memory block with at arbitrary position with specified alignment
std::shared_ptr<block_t> find_map(u32 size, u32 align, u64 flags = 0);
// Delete existing memory block with specified start address, .first=its ptr, .second=success
std::pair<std::shared_ptr<block_t>, bool> unmap(u32 addr, bool must_be_empty = false, const std::shared_ptr<block_t>* ptr = nullptr);
// Get memory block associated with optionally specified memory location or optionally specified address
std::shared_ptr<block_t> get(memory_location_t location, u32 addr = 0);
// Allocate segment at specified location, does nothing if exists already
std::shared_ptr<block_t> reserve_map(memory_location_t location, u32 addr, u32 area_size, u64 flags = page_size_64k);
// Get PS3 virtual memory address from the provided pointer (nullptr or pointer from outside is always converted to 0)
// Super memory is allowed as well
inline std::pair<vm::addr_t, bool> try_get_addr(const void* real_ptr)
{
const std::make_unsigned_t<std::ptrdiff_t> diff = static_cast<const u8*>(real_ptr) - g_base_addr;
if (diff <= u64{u32{umax}} * 2 + 1)
{
return {vm::addr_t{static_cast<u32>(diff)}, true};
}
return {};
}
// Unsafe convert host ptr to PS3 VM address (clamp with 4GiB alignment assumption)
inline vm::addr_t get_addr(const void* ptr)
{
return vm::addr_t{static_cast<u32>(uptr(ptr))};
}
template<typename T> requires (std::is_integral_v<decltype(+T{})> && (sizeof(+T{}) > 4 || std::is_signed_v<decltype(+T{})>))
vm::addr_t cast(const T& addr, std::source_location src_loc = std::source_location::current())
{
return vm::addr_t{::narrow<u32>(+addr, src_loc)};
}
template<typename T> requires (std::is_integral_v<decltype(+T{})> && (sizeof(+T{}) <= 4 && !std::is_signed_v<decltype(+T{})>))
vm::addr_t cast(const T& addr, u32 = 0, u32 = 0, const char* = nullptr, const char* = nullptr)
{
return vm::addr_t{static_cast<u32>(+addr)};
}
// Convert specified PS3/PSV virtual memory address to a pointer for common access
template <typename T> requires (std::is_integral_v<decltype(+T{})>)
inline void* base(T addr)
{
return g_base_addr + static_cast<u32>(vm::cast(addr));
}
inline const u8& read8(u32 addr)
{
return g_base_addr[addr];
}
inline void write8(u32 addr, u8 value)
{
g_base_addr[addr] = value;
}
// Read or write virtual memory in a safe manner, returns false on failure
bool try_access(u32 addr, void* ptr, u32 size, bool is_write);
inline namespace ps3_
{
// Convert specified PS3 address to a pointer of specified (possibly converted to BE) type
template <typename T, typename U> inline to_be_t<T>* _ptr(const U& addr)
{
return static_cast<to_be_t<T>*>(base(addr));
}
// Convert specified PS3 address to a reference of specified (possibly converted to BE) type
template <typename T, typename U> inline to_be_t<T>& _ref(const U& addr)
{
return *static_cast<to_be_t<T>*>(base(addr));
}
// Access memory bypassing memory protection
template <typename T = u8>
inline to_be_t<T>* get_super_ptr(u32 addr)
{
return reinterpret_cast<to_be_t<T>*>(g_sudo_addr + addr);
}
inline const be_t<u16>& read16(u32 addr)
{
return _ref<u16>(addr);
}
inline void write16(u32 addr, be_t<u16> value)
{
_ref<u16>(addr) = value;
}
inline const be_t<u32>& read32(u32 addr)
{
return _ref<u32>(addr);
}
inline void write32(u32 addr, be_t<u32> value)
{
_ref<u32>(addr) = value;
}
inline const be_t<u64>& read64(u32 addr)
{
return _ref<u64>(addr);
}
inline void write64(u32 addr, be_t<u64> value)
{
_ref<u64>(addr) = value;
}
void init();
}
void close();
void load(utils::serial& ar);
void save(utils::serial& ar);
// Returns sample address for shared memory, 0 on failure (wraps block_t::get_shm_addr)
u32 get_shm_addr(const std::shared_ptr<utils::shm>& shared);
template <typename T, typename AT>
class _ptr_base;
template <typename T, typename AT>
class _ref_base;
}
| 9,344
|
C++
|
.h
| 245
| 35.240816
| 134
| 0.69345
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
5,984
|
rsx_vertex_data.h
|
RPCS3_rpcs3/rpcs3/Emu/RSX/rsx_vertex_data.h
|
#pragma once
#include "gcm_enums.h"
#include "rsx_decode.h"
#include "Common/simple_array.hpp"
#include "util/types.hpp"
namespace rsx
{
struct data_array_format_info
{
private:
u8 index;
std::array<u32, 0x10000 / 4>& registers;
auto decode_reg() const
{
const rsx::registers_decoder<NV4097_SET_VERTEX_DATA_ARRAY_FORMAT>::decoded_type
decoded_value(registers[NV4097_SET_VERTEX_DATA_ARRAY_FORMAT + index]);
return decoded_value;
}
public:
data_array_format_info(int id, std::array<u32, 0x10000 / 4>& r)
: index(id)
, registers(r)
{
}
u32 offset() const
{
return registers[NV4097_SET_VERTEX_DATA_ARRAY_OFFSET + index];
}
u8 stride() const
{
return decode_reg().stride();
}
u8 size() const
{
return decode_reg().size();
}
u16 frequency() const
{
return decode_reg().frequency();
}
vertex_base_type type() const
{
return decode_reg().type();
}
};
struct push_buffer_vertex_info
{
u32 attr = 0;
u32 size = 0;
vertex_base_type type = vertex_base_type::f;
u32 vertex_count = 0;
u32 dword_count = 0;
rsx::simple_array<u32> data;
push_buffer_vertex_info() = default;
~push_buffer_vertex_info() = default;
u8 get_vertex_size_in_dwords() const;
u32 get_vertex_id() const;
void clear();
void set_vertex_data(u32 attribute_id, u32 vertex_id, u32 sub_index, vertex_base_type type, u32 size, u32 arg);
void pad_to(u32 required_vertex_count, bool skip_last);
};
struct register_vertex_data_info
{
u16 frequency = 0;
u8 stride = 0;
u8 size = 0;
vertex_base_type type = vertex_base_type::f;
register_vertex_data_info() = default;
std::array<u32, 4> data{};
};
}
| 1,628
|
C++
|
.h
| 71
| 20.746479
| 112
| 0.71475
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
5,985
|
RSXFIFO.h
|
RPCS3_rpcs3/rpcs3/Emu/RSX/RSXFIFO.h
|
#pragma once
#include "util/types.hpp"
#include "Emu/RSX/gcm_enums.h"
#include <span>
struct RsxDmaControl;
namespace rsx
{
class thread;
struct rsx_iomap_table;
namespace FIFO
{
enum internal_commands : u32
{
FIFO_NOP = 0xBABEF1F4,
FIFO_EMPTY = 0xDEADF1F0,
FIFO_BUSY = 0xBABEF1F0,
FIFO_ERROR = 0xDEADBEEF,
FIFO_PACKET_BEGIN = 0xF1F0,
FIFO_DISABLED_COMMAND = 0xF1F4,
FIFO_DRAW_BARRIER = 0xF1F8,
};
enum flatten_op : u32
{
NOTHING = 0,
EMIT_END = 1,
EMIT_BARRIER = 2
};
enum class state : u8
{
running = 0,
empty = 1, // PUT == GET
spinning = 2, // Puller continuously jumps to self addr (synchronization technique)
nop = 3, // Puller is processing a NOP command
lock_wait = 4,// Puller is processing a lock acquire
paused = 5, // Puller is paused externallly
};
enum class interrupt_hint : u8
{
conditional_render_eval = 1,
zcull_sync = 2
};
struct register_pair
{
u32 reg;
u32 value;
void set(u32 reg, u32 val)
{
this->reg = reg;
this->value = val;
}
};
class flattening_helper
{
enum register_props : u8
{
none = 0,
skip_on_match = 1,
always_ignore = 2
};
enum optimization_hint : u8
{
unknown,
load_low,
load_unoptimizable,
application_not_compatible
};
// Workaround for MSVC, C2248
static constexpr u8 register_props_always_ignore = register_props::always_ignore;
static constexpr std::array<u8, 0x10000 / 4> m_register_properties = []
{
constexpr std::array<std::pair<u32, u32>, 4> ignorable_ranges =
{{
// General
{ NV4097_INVALIDATE_VERTEX_FILE, 3 }, // PSLight clears VERTEX_FILE[0-2]
{ NV4097_INVALIDATE_VERTEX_CACHE_FILE, 1 },
{ NV4097_INVALIDATE_L2, 1 },
{ NV4097_INVALIDATE_ZCULL, 1 }
}};
std::array<u8, 0x10000 / 4> register_properties{};
for (const auto &method : ignorable_ranges)
{
for (u32 i = 0; i < method.second; ++i)
{
register_properties[method.first + i] |= register_props_always_ignore;
}
}
return register_properties;
}();
u32 deferred_primitive = 0;
u32 draw_count = 0;
bool in_begin_end = false;
bool enabled = false;
u32 num_collapsed = 0;
optimization_hint fifo_hint = unknown;
void reset(bool _enabled);
public:
flattening_helper() = default;
~flattening_helper() = default;
u32 get_primitive() const { return deferred_primitive; }
bool is_enabled() const { return enabled; }
void force_disable();
void evaluate_performance(u32 total_draw_count);
inline flatten_op test(register_pair& command);
};
class FIFO_control
{
private:
mutable rsx::thread* m_thread;
RsxDmaControl* m_ctrl = nullptr;
const rsx::rsx_iomap_table* m_iotable;
u32 m_internal_get = 0;
u32 m_memwatch_addr = 0;
u32 m_memwatch_cmp = 0;
u32 m_command_reg = 0;
u32 m_command_inc = 0;
u32 m_remaining_commands = 0;
u32 m_args_ptr = 0;
u32 m_cmd = ~0u;
u32 m_cache_addr = 0;
u32 m_cache_size = 0;
alignas(64) std::byte m_cache[8][128];
public:
FIFO_control(rsx::thread* pctrl);
~FIFO_control() = default;
u32 translate_address(u32 addr) const;
std::pair<bool, u32> fetch_u32(u32 addr);
void invalidate_cache() { m_cache_size = 0; }
u32 get_pos() const { return m_internal_get; }
u32 last_cmd() const { return m_cmd; }
void sync_get() const;
std::span<const u32> get_current_arg_ptr() const;
u32 get_remaining_args_count() const { return m_remaining_commands; }
void restore_state(u32 cmd, u32 count);
void inc_get(bool wait);
void set_get(u32 get, u32 spin_cmd = 0);
void abort();
template <bool = true>
u32 read_put() const;
void read(register_pair& data);
inline bool read_unsafe(register_pair& data);
bool skip_methods(u32 count);
};
}
}
| 3,890
|
C++
|
.h
| 144
| 22.9375
| 86
| 0.658497
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
5,986
|
rsx_methods.h
|
RPCS3_rpcs3/rpcs3/Emu/RSX/rsx_methods.h
|
#pragma once
#include <array>
#include <numeric>
#include "rsx_decode.h"
#include "RSXTexture.h"
#include "rsx_vertex_data.h"
#include "Common/simple_array.hpp"
#include "Emu/Cell/timers.hpp"
#include "Program/program_util.h"
#include "NV47/FW/draw_call.hpp"
namespace rsx
{
using rsx_method_t = void(*)(struct context*, u32 reg, u32 arg);
//TODO
union alignas(4) method_registers_t
{
u8 _u8[0x10000];
u32 _u32[0x10000 >> 2];
/*
struct alignas(4)
{
u8 pad[NV4097_SET_TEXTURE_OFFSET - 4];
struct alignas(4) texture_t
{
u32 offset;
union format_t
{
u32 _u32;
struct
{
u32: 1;
u32 location : 1;
u32 cubemap : 1;
u32 border_type : 1;
u32 dimension : 4;
u32 format : 8;
u32 mipmap : 16;
};
} format;
union address_t
{
u32 _u32;
struct
{
u32 wrap_s : 4;
u32 aniso_bias : 4;
u32 wrap_t : 4;
u32 unsigned_remap : 4;
u32 wrap_r : 4;
u32 gamma : 4;
u32 signed_remap : 4;
u32 zfunc : 4;
};
} address;
u32 control0;
u32 control1;
u32 filter;
u32 image_rect;
u32 border_color;
} textures[limits::textures_count];
};
*/
u32& operator[](int index)
{
return _u32[index >> 2];
}
};
struct rsx_state
{
public:
std::array<u32, 0x10000 / 4> registers{};
u32 latch{};
template<u32 opcode>
using decoded_type = typename registers_decoder<opcode>::decoded_type;
template<u32 opcode>
decoded_type<opcode> decode() const
{
u32 register_value = registers[opcode];
return decoded_type<opcode>(register_value);
}
template<u32 opcode>
decoded_type<opcode> decode(u32 register_value) const
{
return decoded_type<opcode>(register_value);
}
rsx_state& operator=(const rsx_state& in)
{
registers = in.registers;
transform_program = in.transform_program;
transform_constants = in.transform_constants;
register_vertex_info = in.register_vertex_info;
return *this;
}
rsx_state& operator=(rsx_state&& in) noexcept
{
registers = std::move(in.registers);
transform_program = std::move(in.transform_program);
transform_constants = std::move(in.transform_constants);
register_vertex_info = std::move(in.register_vertex_info);
return *this;
}
std::array<fragment_texture, 16> fragment_textures;
std::array<vertex_texture, 4> vertex_textures;
std::array<u32, max_vertex_program_instructions * 4> transform_program{};
std::array<u32[4], 512> transform_constants{};
draw_clause current_draw_clause{};
/**
* RSX can sources vertex attributes from 2 places:
* 1. Immediate values passed by NV4097_SET_VERTEX_DATA*_M + ARRAY_ID write.
* For a given ARRAY_ID the last command of this type defines the actual type of the immediate value.
* If there is only a single value on an ARRAY_ID passed this way, all vertex in the draw call
* shares it.
* Immediate mode rendering uses this method as well to upload vertex data.
*
* 2. Vertex array values passed by offset/stride/size/format description.
* A given ARRAY_ID can have both an immediate value and a vertex array enabled at the same time
* (See After Burner Climax intro cutscene). In such case the vertex array has precedence over the
* immediate value. As soon as the vertex array is disabled (size set to 0) the immediate value
* must be used if the vertex attrib mask request it.
*
* Note that behavior when both vertex array and immediate value system are disabled but vertex attrib mask
* request inputs is unknown.
*/
std::array<register_vertex_data_info, 16> register_vertex_info{};
std::array<data_array_format_info, 16> vertex_arrays_info;
private:
template<typename T, usz... N, typename Args>
static std::array<T, sizeof...(N)> fill_array(Args&& arg, std::index_sequence<N...>)
{
return{ T(N, std::forward<Args>(arg))... };
}
public:
rsx_state()
: fragment_textures(fill_array<fragment_texture>(registers, std::make_index_sequence<16>()))
, vertex_textures(fill_array<vertex_texture>(registers, std::make_index_sequence<4>()))
, vertex_arrays_info(fill_array<data_array_format_info>(registers, std::make_index_sequence<16>()))
{
}
rsx_state(const rsx_state& other)
: rsx_state()
{
this->operator=(other);
}
rsx_state(rsx_state&& other) noexcept
: rsx_state()
{
this->operator=(std::move(other));
}
~rsx_state() = default;
void decode(u32 reg, u32 value);
bool test(u32 reg, u32 value) const;
void reset();
void init();
u16 viewport_width() const
{
return decode<NV4097_SET_VIEWPORT_HORIZONTAL>().width();
}
u16 viewport_origin_x() const
{
return decode<NV4097_SET_VIEWPORT_HORIZONTAL>().origin_x();
}
u16 viewport_height() const
{
return decode<NV4097_SET_VIEWPORT_VERTICAL>().height();
}
u16 viewport_origin_y() const
{
return decode<NV4097_SET_VIEWPORT_VERTICAL>().origin_y();
}
u16 scissor_origin_x() const
{
return decode<NV4097_SET_SCISSOR_HORIZONTAL>().origin_x();
}
u16 scissor_width() const
{
return decode<NV4097_SET_SCISSOR_HORIZONTAL>().width();
}
u16 scissor_origin_y() const
{
return decode<NV4097_SET_SCISSOR_VERTICAL>().origin_y();
}
u16 scissor_height() const
{
return decode<NV4097_SET_SCISSOR_VERTICAL>().height();
}
window_origin shader_window_origin() const
{
return decode<NV4097_SET_SHADER_WINDOW>().window_shader_origin();
}
window_pixel_center shader_window_pixel() const
{
return decode<NV4097_SET_SHADER_WINDOW>().window_shader_pixel_center();
}
u16 shader_window_height() const
{
return decode<NV4097_SET_SHADER_WINDOW>().window_shader_height();
}
u16 window_offset_x() const
{
return decode<NV4097_SET_WINDOW_OFFSET>().window_offset_x();
}
u16 window_offset_y() const
{
return decode<NV4097_SET_WINDOW_OFFSET>().window_offset_y();
}
u32 window_clip_type() const
{
return registers[NV4097_SET_WINDOW_CLIP_TYPE];
}
u32 window_clip_horizontal() const
{
return registers[NV4097_SET_WINDOW_CLIP_HORIZONTAL];
}
u32 window_clip_vertical() const
{
return registers[NV4097_SET_WINDOW_CLIP_VERTICAL];
}
bool depth_test_enabled() const
{
return decode<NV4097_SET_DEPTH_TEST_ENABLE>().depth_test_enabled();
}
bool depth_write_enabled() const
{
return decode<NV4097_SET_DEPTH_MASK>().depth_write_enabled();
}
bool alpha_test_enabled() const
{
switch (surface_color())
{
case rsx::surface_color_format::x32:
case rsx::surface_color_format::w32z32y32x32:
return false;
default:
return decode<NV4097_SET_ALPHA_TEST_ENABLE>().alpha_test_enabled();
}
}
bool stencil_test_enabled() const
{
return decode<NV4097_SET_STENCIL_TEST_ENABLE>().stencil_test_enabled();
}
u8 index_array_location() const
{
return decode<NV4097_SET_INDEX_ARRAY_DMA>().index_dma();
}
rsx::index_array_type index_type() const
{
return decode<NV4097_SET_INDEX_ARRAY_DMA>().type();
}
u32 restart_index() const
{
return decode<NV4097_SET_RESTART_INDEX>().restart_index();
}
bool restart_index_enabled_raw() const
{
return decode<NV4097_SET_RESTART_INDEX_ENABLE>().restart_index_enabled();
}
bool restart_index_enabled() const
{
if (!restart_index_enabled_raw())
{
return false;
}
if (index_type() == rsx::index_array_type::u16 &&
restart_index() > 0xffff)
{
return false;
}
return true;
}
u32 z_clear_value(bool is_depth_stencil) const
{
return decode<NV4097_SET_ZSTENCIL_CLEAR_VALUE>().clear_z(is_depth_stencil);
}
u8 stencil_clear_value() const
{
return decode<NV4097_SET_ZSTENCIL_CLEAR_VALUE>().clear_stencil();
}
f32 fog_params_0() const
{
return decode<NV4097_SET_FOG_PARAMS>().fog_param_0();
}
f32 fog_params_1() const
{
return decode<NV4097_SET_FOG_PARAMS + 1>().fog_param_1();
}
bool color_mask_b(int index) const
{
if (index == 0)
{
return decode<NV4097_SET_COLOR_MASK>().color_b();
}
else
{
return decode<NV4097_SET_COLOR_MASK_MRT>().color_b(index);
}
}
bool color_mask_g(int index) const
{
if (index == 0)
{
return decode<NV4097_SET_COLOR_MASK>().color_g();
}
else
{
return decode<NV4097_SET_COLOR_MASK_MRT>().color_g(index);
}
}
bool color_mask_r(int index) const
{
if (index == 0)
{
return decode<NV4097_SET_COLOR_MASK>().color_r();
}
else
{
return decode<NV4097_SET_COLOR_MASK_MRT>().color_r(index);
}
}
bool color_mask_a(int index) const
{
if (index == 0)
{
return decode<NV4097_SET_COLOR_MASK>().color_a();
}
else
{
return decode<NV4097_SET_COLOR_MASK_MRT>().color_a(index);
}
}
bool color_write_enabled(int index) const
{
if (index == 0)
{
return decode<NV4097_SET_COLOR_MASK>().color_write_enabled();
}
else
{
return decode<NV4097_SET_COLOR_MASK_MRT>().color_write_enabled(index);
}
}
u8 clear_color_b() const
{
return decode<NV4097_SET_COLOR_CLEAR_VALUE>().blue();
}
u8 clear_color_r() const
{
return decode<NV4097_SET_COLOR_CLEAR_VALUE>().red();
}
u8 clear_color_g() const
{
return decode<NV4097_SET_COLOR_CLEAR_VALUE>().green();
}
u8 clear_color_a() const
{
return decode<NV4097_SET_COLOR_CLEAR_VALUE>().alpha();
}
bool depth_bounds_test_enabled() const
{
return decode<NV4097_SET_DEPTH_BOUNDS_TEST_ENABLE>().depth_bound_enabled();
}
f32 depth_bounds_min() const
{
return decode<NV4097_SET_DEPTH_BOUNDS_MIN>().depth_bound_min();
}
f32 depth_bounds_max() const
{
return decode<NV4097_SET_DEPTH_BOUNDS_MAX>().depth_bound_max();
}
f32 clip_min() const
{
return decode<NV4097_SET_CLIP_MIN>().clip_min();
}
f32 clip_max() const
{
return decode<NV4097_SET_CLIP_MAX>().clip_max();
}
bool logic_op_enabled() const
{
return decode<NV4097_SET_LOGIC_OP_ENABLE>().logic_op_enabled();
}
u8 stencil_mask() const
{
return decode<NV4097_SET_STENCIL_MASK>().stencil_mask();
}
u8 back_stencil_mask() const
{
return decode<NV4097_SET_BACK_STENCIL_MASK>().back_stencil_mask();
}
bool dither_enabled() const
{
return decode<NV4097_SET_DITHER_ENABLE>().dither_enabled();
}
bool blend_enabled() const
{
return decode<NV4097_SET_BLEND_ENABLE>().blend_enabled();
}
bool blend_enabled_surface_1() const
{
return decode<NV4097_SET_BLEND_ENABLE_MRT>().blend_surface_b();
}
bool blend_enabled_surface_2() const
{
return decode<NV4097_SET_BLEND_ENABLE_MRT>().blend_surface_c();
}
bool blend_enabled_surface_3() const
{
return decode<NV4097_SET_BLEND_ENABLE_MRT>().blend_surface_d();
}
bool line_smooth_enabled() const
{
return decode<NV4097_SET_LINE_SMOOTH_ENABLE>().line_smooth_enabled();
}
bool poly_offset_point_enabled() const
{
return decode<NV4097_SET_POLY_OFFSET_POINT_ENABLE>().poly_offset_point_enabled();
}
bool poly_offset_line_enabled() const
{
return decode<NV4097_SET_POLY_OFFSET_LINE_ENABLE>().poly_offset_line_enabled();
}
bool poly_offset_fill_enabled() const
{
return decode<NV4097_SET_POLY_OFFSET_FILL_ENABLE>().poly_offset_fill_enabled();
}
f32 poly_offset_scale() const
{
return decode<NV4097_SET_POLYGON_OFFSET_SCALE_FACTOR>().polygon_offset_scale_factor();
}
f32 poly_offset_bias() const
{
return decode<NV4097_SET_POLYGON_OFFSET_BIAS>().polygon_offset_scale_bias();
}
bool cull_face_enabled() const
{
return decode<NV4097_SET_CULL_FACE_ENABLE>().cull_face_enabled();
}
bool poly_smooth_enabled() const
{
return decode<NV4097_SET_POLY_SMOOTH_ENABLE>().poly_smooth_enabled();
}
bool two_sided_stencil_test_enabled() const
{
return decode<NV4097_SET_TWO_SIDED_STENCIL_TEST_ENABLE>().two_sided_stencil_test_enabled();
}
comparison_function depth_func() const
{
return decode<NV4097_SET_DEPTH_FUNC>().depth_func();
}
comparison_function stencil_func() const
{
return decode<NV4097_SET_STENCIL_FUNC>().stencil_func();
}
comparison_function back_stencil_func() const
{
return decode<NV4097_SET_BACK_STENCIL_FUNC>().back_stencil_func();
}
u8 stencil_func_ref() const
{
return decode<NV4097_SET_STENCIL_FUNC_REF>().stencil_func_ref();
}
u8 back_stencil_func_ref() const
{
return decode<NV4097_SET_BACK_STENCIL_FUNC_REF>().back_stencil_func_ref();
}
u8 stencil_func_mask() const
{
return decode<NV4097_SET_STENCIL_FUNC_MASK>().stencil_func_mask();
}
u8 back_stencil_func_mask() const
{
return decode<NV4097_SET_BACK_STENCIL_FUNC_MASK>().back_stencil_func_mask();
}
stencil_op stencil_op_fail() const
{
return decode<NV4097_SET_STENCIL_OP_FAIL>().fail();
}
stencil_op stencil_op_zfail() const
{
return decode<NV4097_SET_STENCIL_OP_ZFAIL>().zfail();
}
stencil_op stencil_op_zpass() const
{
return decode<NV4097_SET_STENCIL_OP_ZPASS>().zpass();
}
stencil_op back_stencil_op_fail() const
{
return decode<NV4097_SET_BACK_STENCIL_OP_FAIL>().back_fail();
}
rsx::stencil_op back_stencil_op_zfail() const
{
return decode<NV4097_SET_BACK_STENCIL_OP_ZFAIL>().back_zfail();
}
rsx::stencil_op back_stencil_op_zpass() const
{
return decode<NV4097_SET_BACK_STENCIL_OP_ZPASS>().back_zpass();
}
u8 blend_color_8b_r() const
{
return decode<NV4097_SET_BLEND_COLOR>().red8();
}
u8 blend_color_8b_g() const
{
return decode<NV4097_SET_BLEND_COLOR>().green8();
}
u8 blend_color_8b_b() const
{
return decode<NV4097_SET_BLEND_COLOR>().blue8();
}
u8 blend_color_8b_a() const
{
return decode<NV4097_SET_BLEND_COLOR>().alpha8();
}
u16 blend_color_16b_r() const
{
return decode<NV4097_SET_BLEND_COLOR>().red16();
}
u16 blend_color_16b_g() const
{
return decode<NV4097_SET_BLEND_COLOR>().green16();
}
u16 blend_color_16b_b() const
{
return decode<NV4097_SET_BLEND_COLOR2>().blue();
}
u16 blend_color_16b_a() const
{
return decode<NV4097_SET_BLEND_COLOR2>().alpha();
}
blend_equation blend_equation_rgb() const
{
return decode<NV4097_SET_BLEND_EQUATION>().blend_rgb();
}
blend_equation blend_equation_a() const
{
return decode<NV4097_SET_BLEND_EQUATION>().blend_a();
}
blend_factor blend_func_sfactor_rgb() const
{
return decode<NV4097_SET_BLEND_FUNC_SFACTOR>().src_blend_rgb();
}
blend_factor blend_func_sfactor_a() const
{
return decode<NV4097_SET_BLEND_FUNC_SFACTOR>().src_blend_a();
}
blend_factor blend_func_dfactor_rgb() const
{
return decode<NV4097_SET_BLEND_FUNC_DFACTOR>().dst_blend_rgb();
}
blend_factor blend_func_dfactor_a() const
{
return decode<NV4097_SET_BLEND_FUNC_DFACTOR>().dst_blend_a();
}
logic_op logic_operation() const
{
return decode<NV4097_SET_LOGIC_OP>().logic_operation();
}
user_clip_plane_op clip_plane_0_enabled() const
{
return decode<NV4097_SET_USER_CLIP_PLANE_CONTROL>().clip_plane0();
}
user_clip_plane_op clip_plane_1_enabled() const
{
return decode<NV4097_SET_USER_CLIP_PLANE_CONTROL>().clip_plane1();
}
user_clip_plane_op clip_plane_2_enabled() const
{
return decode<NV4097_SET_USER_CLIP_PLANE_CONTROL>().clip_plane2();
}
user_clip_plane_op clip_plane_3_enabled() const
{
return decode<NV4097_SET_USER_CLIP_PLANE_CONTROL>().clip_plane3();
}
user_clip_plane_op clip_plane_4_enabled() const
{
return decode<NV4097_SET_USER_CLIP_PLANE_CONTROL>().clip_plane4();
}
user_clip_plane_op clip_plane_5_enabled() const
{
return decode<NV4097_SET_USER_CLIP_PLANE_CONTROL>().clip_plane5();
}
front_face front_face_mode() const
{
return decode<NV4097_SET_FRONT_FACE>().front_face_mode();
}
cull_face cull_face_mode() const
{
return decode<NV4097_SET_CULL_FACE>().cull_face_mode();
}
f32 line_width() const
{
return decode<NV4097_SET_LINE_WIDTH>().line_width();
}
f32 point_size() const
{
return decode<NV4097_SET_POINT_SIZE>().point_size();
}
bool point_sprite_enabled() const
{
return decode<NV4097_SET_POINT_SPRITE_CONTROL>().enabled();
}
f32 alpha_ref() const
{
switch (surface_color())
{
case rsx::surface_color_format::x32:
case rsx::surface_color_format::w32z32y32x32:
return decode<NV4097_SET_ALPHA_REF>().alpha_ref32();
case rsx::surface_color_format::w16z16y16x16:
return decode<NV4097_SET_ALPHA_REF>().alpha_ref16();
default:
return decode<NV4097_SET_ALPHA_REF>().alpha_ref8();
}
}
surface_target surface_color_target() const
{
return decode<NV4097_SET_SURFACE_COLOR_TARGET>().target();
}
u16 surface_clip_origin_x() const
{
return decode<NV4097_SET_SURFACE_CLIP_HORIZONTAL>().origin_x();
}
u16 surface_clip_width() const
{
return decode<NV4097_SET_SURFACE_CLIP_HORIZONTAL>().width();
}
u16 surface_clip_origin_y() const
{
return decode<NV4097_SET_SURFACE_CLIP_VERTICAL>().origin_y();
}
u16 surface_clip_height() const
{
return decode<NV4097_SET_SURFACE_CLIP_VERTICAL>().height();
}
u32 surface_offset(u32 index) const
{
switch (index)
{
case 0: return decode<NV4097_SET_SURFACE_COLOR_AOFFSET>().surface_a_offset();
case 1: return decode<NV4097_SET_SURFACE_COLOR_BOFFSET>().surface_b_offset();
case 2: return decode<NV4097_SET_SURFACE_COLOR_COFFSET>().surface_c_offset();
default: return decode<NV4097_SET_SURFACE_COLOR_DOFFSET>().surface_d_offset();
}
}
u32 surface_pitch(u32 index) const
{
switch (index)
{
case 0: return decode<NV4097_SET_SURFACE_PITCH_A>().surface_a_pitch();
case 1: return decode<NV4097_SET_SURFACE_PITCH_B>().surface_b_pitch();
case 2: return decode<NV4097_SET_SURFACE_PITCH_C>().surface_c_pitch();
default: return decode<NV4097_SET_SURFACE_PITCH_D>().surface_d_pitch();
}
}
u32 surface_dma(u32 index) const
{
switch (index)
{
case 0: return decode<NV4097_SET_CONTEXT_DMA_COLOR_A>().dma_surface_a();
case 1: return decode<NV4097_SET_CONTEXT_DMA_COLOR_B>().dma_surface_b();
case 2: return decode<NV4097_SET_CONTEXT_DMA_COLOR_C>().dma_surface_c();
default: return decode<NV4097_SET_CONTEXT_DMA_COLOR_D>().dma_surface_d();
}
}
u32 surface_z_offset() const
{
return decode<NV4097_SET_SURFACE_ZETA_OFFSET>().surface_z_offset();
}
u32 surface_z_pitch() const
{
return decode<NV4097_SET_SURFACE_PITCH_Z>().surface_z_pitch();
}
u32 surface_z_dma() const
{
return decode<NV4097_SET_CONTEXT_DMA_ZETA>().dma_surface_z();
}
f32 viewport_scale_x() const
{
return decode<NV4097_SET_VIEWPORT_SCALE>().viewport_scale_x();
}
f32 viewport_scale_y() const
{
return decode<NV4097_SET_VIEWPORT_SCALE + 1>().viewport_scale_y();
}
f32 viewport_scale_z() const
{
return decode<NV4097_SET_VIEWPORT_SCALE + 2>().viewport_scale_z();
}
f32 viewport_scale_w() const
{
return decode<NV4097_SET_VIEWPORT_SCALE + 3>().viewport_scale_w();
}
f32 viewport_offset_x() const
{
return decode<NV4097_SET_VIEWPORT_OFFSET>().viewport_offset_x();
}
f32 viewport_offset_y() const
{
return decode<NV4097_SET_VIEWPORT_OFFSET + 1>().viewport_offset_y();
}
f32 viewport_offset_z() const
{
return decode<NV4097_SET_VIEWPORT_OFFSET + 2>().viewport_offset_z();
}
f32 viewport_offset_w() const
{
return decode<NV4097_SET_VIEWPORT_OFFSET + 3>().viewport_offset_w();
}
bool two_side_light_en() const
{
return decode<NV4097_SET_TWO_SIDE_LIGHT_EN>().two_sided_lighting_enabled();
}
fog_mode fog_equation() const
{
return decode<NV4097_SET_FOG_MODE>().fog_equation();
}
comparison_function alpha_func() const
{
return decode<NV4097_SET_ALPHA_FUNC>().alpha_func();
}
u16 vertex_attrib_input_mask() const
{
return decode<NV4097_SET_VERTEX_ATTRIB_INPUT_MASK>().mask();
}
u16 frequency_divider_operation_mask() const
{
return decode<NV4097_SET_FREQUENCY_DIVIDER_OPERATION>().frequency_divider_operation_mask();
}
u32 vertex_attrib_output_mask() const
{
return decode<NV4097_SET_VERTEX_ATTRIB_OUTPUT_MASK>().output_mask();
}
u32 shader_control() const
{
return decode<NV4097_SET_SHADER_CONTROL>().shader_ctrl();
}
surface_color_format surface_color() const
{
return decode<NV4097_SET_SURFACE_FORMAT>().color_fmt();
}
surface_depth_format2 surface_depth_fmt() const
{
const auto base_fmt = *decode<NV4097_SET_SURFACE_FORMAT>().depth_fmt();
if (!depth_buffer_float_enabled()) [[likely]]
{
return static_cast<surface_depth_format2>(base_fmt);
}
return base_fmt == surface_depth_format::z16 ?
surface_depth_format2::z16_float :
surface_depth_format2::z24s8_float;
}
surface_raster_type surface_type() const
{
return decode<NV4097_SET_SURFACE_FORMAT>().type();
}
surface_antialiasing surface_antialias() const
{
return decode<NV4097_SET_SURFACE_FORMAT>().antialias();
}
u8 surface_log2_height() const
{
return decode<NV4097_SET_SURFACE_FORMAT>().log2height();
}
u8 surface_log2_width() const
{
return decode<NV4097_SET_SURFACE_FORMAT>().log2width();
}
u32 vertex_data_base_offset() const
{
return decode<NV4097_SET_VERTEX_DATA_BASE_OFFSET>().vertex_data_base_offset();
}
u32 index_array_address() const
{
return decode<NV4097_SET_INDEX_ARRAY_ADDRESS>().index_array_offset();
}
u32 vertex_data_base_index() const
{
return decode<NV4097_SET_VERTEX_DATA_BASE_INDEX>().vertex_data_base_index();
}
std::pair<u32, u32> shader_program_address() const
{
const u32 shader_address = decode<NV4097_SET_SHADER_PROGRAM>().shader_program_address();
return { shader_address & ~3, (shader_address & 3) - 1 };
}
u32 transform_program_start() const
{
return decode<NV4097_SET_TRANSFORM_PROGRAM_START>().transform_program_start();
}
primitive_type primitive_mode() const
{
return decode<NV4097_SET_BEGIN_END>().primitive();
}
u32 semaphore_context_dma_406e() const
{
return decode<NV406E_SET_CONTEXT_DMA_SEMAPHORE>().context_dma();
}
u32 semaphore_offset_406e() const
{
return decode<NV406E_SEMAPHORE_OFFSET>().semaphore_offset();
}
u32 semaphore_context_dma_4097() const
{
return decode<NV4097_SET_CONTEXT_DMA_SEMAPHORE>().context_dma();
}
u32 semaphore_offset_4097() const
{
return decode<NV4097_SET_SEMAPHORE_OFFSET>().semaphore_offset();
}
blit_engine::context_dma context_dma_report() const
{
return decode<NV4097_SET_CONTEXT_DMA_REPORT>().context_dma_report();
}
u32 context_dma_notify() const
{
return decode<NV4097_SET_CONTEXT_DMA_NOTIFIES>().context_dma_notify();
}
blit_engine::transfer_operation blit_engine_operation() const
{
return decode<NV3089_SET_OPERATION>().transfer_op();
}
/// TODO: find the purpose vs in/out equivalents
u16 blit_engine_clip_x() const
{
return decode<NV3089_CLIP_POINT>().clip_x();
}
u16 blit_engine_clip_y() const
{
return decode<NV3089_CLIP_POINT>().clip_y();
}
u16 blit_engine_clip_width() const
{
return decode<NV3089_CLIP_SIZE>().clip_width();
}
u16 blit_engine_clip_height() const
{
return decode<NV3089_CLIP_SIZE>().clip_height();
}
u16 blit_engine_output_x() const
{
return decode<NV3089_IMAGE_OUT_POINT>().x();
}
u16 blit_engine_output_y() const
{
return decode<NV3089_IMAGE_OUT_POINT>().y();
}
u16 blit_engine_output_width() const
{
return decode<NV3089_IMAGE_OUT_SIZE>().width();
}
u16 blit_engine_output_height() const
{
return decode<NV3089_IMAGE_OUT_SIZE>().height();
}
// there is no x/y ?
u16 blit_engine_input_width() const
{
return decode<NV3089_IMAGE_IN_SIZE>().width();
}
u16 blit_engine_input_height() const
{
return decode<NV3089_IMAGE_IN_SIZE>().height();
}
u16 blit_engine_input_pitch() const
{
return decode<NV3089_IMAGE_IN_FORMAT>().format();
}
blit_engine::transfer_origin blit_engine_input_origin() const
{
return decode<NV3089_IMAGE_IN_FORMAT>().transfer_origin();
}
blit_engine::transfer_interpolator blit_engine_input_inter() const
{
return decode<NV3089_IMAGE_IN_FORMAT>().transfer_interpolator();
}
expected<blit_engine::transfer_source_format> blit_engine_src_color_format() const
{
return decode<NV3089_SET_COLOR_FORMAT>().transfer_source_fmt();
}
// ???
f32 blit_engine_in_x() const
{
return decode<NV3089_IMAGE_IN>().x();
}
// ???
f32 blit_engine_in_y() const
{
return decode<NV3089_IMAGE_IN>().y();
}
u32 blit_engine_input_offset() const
{
return decode<NV3089_IMAGE_IN_OFFSET>().input_offset();
}
u32 blit_engine_input_location() const
{
return decode<NV3089_SET_CONTEXT_DMA_IMAGE>().context_dma();
}
blit_engine::context_surface blit_engine_context_surface() const
{
return decode<NV3089_SET_CONTEXT_SURFACE>().ctx_surface();
}
u32 blit_engine_output_location_nv3062() const
{
return decode<NV3062_SET_CONTEXT_DMA_IMAGE_DESTIN>().output_dma();
}
u32 blit_engine_output_offset_nv3062() const
{
return decode<NV3062_SET_OFFSET_DESTIN>().output_offset();
}
expected<blit_engine::transfer_destination_format> blit_engine_nv3062_color_format() const
{
return decode<NV3062_SET_COLOR_FORMAT>().transfer_dest_fmt();
}
u16 blit_engine_output_alignment_nv3062() const
{
return decode<NV3062_SET_PITCH>().alignment();
}
u16 blit_engine_output_pitch_nv3062() const
{
return decode<NV3062_SET_PITCH>().pitch();
}
u32 blit_engine_nv309E_location() const
{
return decode<NV309E_SET_CONTEXT_DMA_IMAGE>().context_dma();
}
u32 blit_engine_nv309E_offset() const
{
return decode<NV309E_SET_OFFSET>().offset();
}
expected<blit_engine::transfer_destination_format> blit_engine_output_format_nv309E() const
{
return decode<NV309E_SET_FORMAT>().format();
}
f32 blit_engine_ds_dx() const
{
return decode<NV3089_DS_DX>().ds_dx();
}
f32 blit_engine_dt_dy() const
{
return decode<NV3089_DT_DY>().dt_dy();
}
u8 nv309e_sw_width_log2() const
{
return decode<NV309E_SET_FORMAT>().sw_width_log2();
}
u8 nv309e_sw_height_log2() const
{
return decode<NV309E_SET_FORMAT>().sw_height_log2();
}
u32 nv0039_input_pitch() const
{
return decode<NV0039_PITCH_IN>().input_pitch();
}
u32 nv0039_output_pitch() const
{
return decode<NV0039_PITCH_OUT>().output_pitch();
}
u32 nv0039_line_length() const
{
return decode<NV0039_LINE_LENGTH_IN>().input_line_length();
}
u32 nv0039_line_count() const
{
return decode<NV0039_LINE_COUNT>().line_count();
}
u8 nv0039_output_format() const
{
return decode<NV0039_FORMAT>().output_format();
}
u8 nv0039_input_format() const
{
return decode<NV0039_FORMAT>().input_format();
}
u32 nv0039_output_offset() const
{
return decode<NV0039_OFFSET_OUT>().output_offset();
}
u32 nv0039_output_location() const
{
return decode<NV0039_SET_CONTEXT_DMA_BUFFER_OUT>().output_dma();
}
u32 nv0039_input_offset() const
{
return decode<NV0039_OFFSET_IN>().input_offset();
}
u32 nv0039_input_location() const
{
return decode<NV0039_SET_CONTEXT_DMA_BUFFER_IN>().input_dma();
}
u16 nv308a_x() const
{
return decode<NV308A_POINT>().x();
}
u16 nv308a_y() const
{
return decode<NV308A_POINT>().y();
}
u16 nv308a_size_in_x() const
{
return u16(registers[NV308A_SIZE_IN] & 0xFFFF);
}
u16 nv308a_size_out_x() const
{
return u16(registers[NV308A_SIZE_OUT] & 0xFFFF);
}
u32 transform_program_load() const
{
return registers[NV4097_SET_TRANSFORM_PROGRAM_LOAD];
}
void transform_program_load_set(u32 value)
{
registers[NV4097_SET_TRANSFORM_PROGRAM_LOAD] = value;
}
u32 transform_constant_load() const
{
return registers[NV4097_SET_TRANSFORM_CONSTANT_LOAD];
}
u32 transform_branch_bits() const
{
return registers[NV4097_SET_TRANSFORM_BRANCH_BITS];
}
u16 msaa_sample_mask() const
{
return decode<NV4097_SET_ANTI_ALIASING_CONTROL>().msaa_sample_mask();
}
bool msaa_enabled() const
{
return decode<NV4097_SET_ANTI_ALIASING_CONTROL>().msaa_enabled();
}
bool msaa_alpha_to_coverage_enabled() const
{
return decode<NV4097_SET_ANTI_ALIASING_CONTROL>().msaa_alpha_to_coverage();
}
bool msaa_alpha_to_one_enabled() const
{
return decode<NV4097_SET_ANTI_ALIASING_CONTROL>().msaa_alpha_to_one();
}
bool depth_clamp_enabled() const
{
return decode<NV4097_SET_ZMIN_MAX_CONTROL>().depth_clamp_enabled();
}
bool depth_clip_enabled() const
{
return decode<NV4097_SET_ZMIN_MAX_CONTROL>().depth_clip_enabled();
}
bool depth_clip_ignore_w() const
{
return decode<NV4097_SET_ZMIN_MAX_CONTROL>().depth_clip_ignore_w();
}
bool framebuffer_srgb_enabled() const
{
return decode<NV4097_SET_SHADER_PACKER>().srgb_output_enabled();
}
bool depth_buffer_float_enabled() const
{
return decode<NV4097_SET_CONTROL0>().depth_float();
}
u16 texcoord_control_mask() const
{
// Only 10 texture coords exist [0-9]
u16 control_mask = 0;
for (u8 index = 0; index < 10; ++index)
{
control_mask |= ((registers[NV4097_SET_TEX_COORD_CONTROL + index] & 1) << index);
}
return control_mask;
}
u16 point_sprite_control_mask() const
{
return decode<NV4097_SET_POINT_SPRITE_CONTROL>().texcoord_mask();
}
const void* polygon_stipple_pattern() const
{
return registers.data() + NV4097_SET_POLYGON_STIPPLE_PATTERN;
}
bool polygon_stipple_enabled() const
{
return decode<NV4097_SET_POLYGON_STIPPLE>().enabled();
}
};
extern rsx_state method_registers;
extern std::array<rsx_method_t, 0x10000 / 4> methods;
extern std::array<u32, 0x10000 / 4> state_signals;
}
| 29,798
|
C++
|
.h
| 1,082
| 24.034196
| 108
| 0.694085
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
5,987
|
GSFrameBase.h
|
RPCS3_rpcs3/rpcs3/Emu/RSX/GSFrameBase.h
|
#pragma once
#include "util/types.hpp"
#include "util/atomic.hpp"
#include <vector>
#include "display.h"
class GSFrameBase
{
public:
GSFrameBase() = default;
GSFrameBase(const GSFrameBase&) = delete;
virtual ~GSFrameBase() = default;
virtual void close() = 0;
virtual bool shown() = 0;
virtual void hide() = 0;
virtual void show() = 0;
virtual void toggle_fullscreen() = 0;
virtual void delete_context(draw_context_t ctx) = 0;
virtual draw_context_t make_context() = 0;
virtual void set_current(draw_context_t ctx) = 0;
virtual void flip(draw_context_t ctx, bool skip_frame = false) = 0;
virtual int client_width() = 0;
virtual int client_height() = 0;
virtual f64 client_display_rate() = 0;
virtual bool has_alpha() = 0;
virtual display_handle_t handle() const = 0;
virtual bool can_consume_frame() const = 0;
virtual void present_frame(std::vector<u8>& data, u32 pitch, u32 width, u32 height, bool is_bgra) const = 0;
virtual void take_screenshot(const std::vector<u8> sshot_data, u32 sshot_width, u32 sshot_height, bool is_bgra) = 0;
};
| 1,067
|
C++
|
.h
| 29
| 34.862069
| 117
| 0.728419
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
5,988
|
rsx_utils.h
|
RPCS3_rpcs3/rpcs3/Emu/RSX/rsx_utils.h
|
#pragma once
#include "../system_config.h"
#include "Utilities/address_range.h"
#include "Utilities/geometry.h"
#include "gcm_enums.h"
#include <memory>
#include <bitset>
#include <chrono>
extern "C"
{
#include <libavutil/pixfmt.h>
}
#define RSX_SURFACE_DIMENSION_IGNORED 1
namespace rsx
{
// Import address_range utilities
using utils::address_range;
using utils::address_range_vector;
using utils::page_for;
using utils::page_start;
using utils::page_end;
using utils::next_page;
using flags64_t = u64;
using flags32_t = u32;
using flags16_t = u16;
using flags8_t = u8;
extern atomic_t<u64> g_rsx_shared_tag;
enum class problem_severity : u8
{
low,
moderate,
severe,
fatal
};
//Base for resources with reference counting
class ref_counted
{
protected:
atomic_t<s32> ref_count{ 0 }; // References held
atomic_t<u8> idle_time{ 0 }; // Number of times the resource has been tagged idle
public:
void add_ref()
{
++ref_count;
idle_time = 0;
}
void release()
{
--ref_count;
}
bool has_refs() const
{
return (ref_count > 0);
}
// Returns number of times the resource has been checked without being used in-between checks
u8 unused_check_count()
{
if (ref_count)
{
return 0;
}
return idle_time++;
}
};
namespace limits
{
enum
{
fragment_textures_count = 16,
vertex_textures_count = 4,
vertex_count = 16,
fragment_count = 32,
tiles_count = 15,
zculls_count = 8,
color_buffers_count = 4
};
}
namespace constants
{
constexpr std::array<const char*, 16> fragment_texture_names =
{
"tex0", "tex1", "tex2", "tex3", "tex4", "tex5", "tex6", "tex7",
"tex8", "tex9", "tex10", "tex11", "tex12", "tex13", "tex14", "tex15",
};
constexpr std::array<const char*, 4> vertex_texture_names =
{
"vtex0", "vtex1", "vtex2", "vtex3",
};
// Local RSX memory base (known as constant)
constexpr u32 local_mem_base = 0xC0000000;
}
/**
* Holds information about a framebuffer
*/
struct gcm_framebuffer_info
{
u32 address = 0;
u32 pitch = 0;
rsx::surface_color_format color_format;
rsx::surface_depth_format2 depth_format;
u16 width = 0;
u16 height = 0;
u8 bpp = 0;
u8 samples = 0;
address_range range{};
gcm_framebuffer_info() = default;
ENABLE_BITWISE_SERIALIZATION;
void calculate_memory_range(u32 aa_factor_u, u32 aa_factor_v)
{
// Account for the last line of the block not reaching the end
const u32 block_size = pitch * (height - 1) * aa_factor_v;
const u32 line_size = width * aa_factor_u * bpp;
range = address_range::start_length(address, block_size + line_size);
}
address_range get_memory_range(const u32* aa_factors)
{
calculate_memory_range(aa_factors[0], aa_factors[1]);
return range;
}
address_range get_memory_range() const
{
ensure(range.start == address);
return range;
}
};
struct avconf
{
stereo_render_mode_options stereo_mode = stereo_render_mode_options::disabled; // Stereo 3D display mode
u8 format = 0; // XRGB
u8 aspect = 0; // AUTO
u8 resolution_id = 2; // 720p
u32 scanline_pitch = 0; // PACKED
atomic_t<f32> gamma = 1.f; // NO GAMMA CORRECTION
u32 resolution_x = 1280; // X RES
u32 resolution_y = 720; // Y RES
atomic_t<u32> state = 0; // 1 after cellVideoOutConfigure was called
u8 scan_mode = 1; // CELL_VIDEO_OUT_SCAN_MODE_PROGRESSIVE
ENABLE_BITWISE_SERIALIZATION;
SAVESTATE_INIT_POS(12);
avconf() noexcept;
~avconf() = default;
avconf(utils::serial& ar);
void save(utils::serial& ar);
u32 get_compatible_gcm_format() const;
u8 get_bpp() const;
double get_aspect_ratio() const;
areau aspect_convert_region(const size2u& image_dimensions, const size2u& output_dimensions) const;
size2u aspect_convert_dimensions(const size2u& image_dimensions) const;
};
struct blit_src_info
{
blit_engine::transfer_source_format format;
blit_engine::transfer_origin origin;
u16 offset_x;
u16 offset_y;
u16 width;
u16 height;
u32 pitch;
u8 bpp;
u32 dma;
u32 rsx_address;
u8 *pixels;
};
struct blit_dst_info
{
blit_engine::transfer_destination_format format;
u16 offset_x;
u16 offset_y;
u16 width;
u16 height;
u16 clip_x;
u16 clip_y;
u16 clip_width;
u16 clip_height;
f32 scale_x;
f32 scale_y;
u32 pitch;
u8 bpp;
u32 dma;
u32 rsx_address;
u8 *pixels;
bool swizzled;
};
template <typename T>
void pad_texture(void* input_pixels, void* output_pixels, u16 input_width, u16 input_height, u16 output_width, u16 /*output_height*/)
{
T *src = static_cast<T*>(input_pixels);
T *dst = static_cast<T*>(output_pixels);
for (u16 h = 0; h < input_height; ++h)
{
const u32 padded_pos = h * output_width;
const u32 pos = h * input_width;
for (u16 w = 0; w < input_width; ++w)
{
dst[padded_pos + w] = src[pos + w];
}
}
}
static constexpr u32 floor_log2(u32 value)
{
return value <= 1 ? 0 : std::countl_zero(value) ^ 31;
}
static constexpr u32 ceil_log2(u32 value)
{
return floor_log2(value) + u32{!!(value & (value - 1))};
}
static constexpr u32 next_pow2(u32 x)
{
if (x <= 2) return x;
return static_cast<u32>((1ULL << 32) >> std::countl_zero(x - 1));
}
static inline bool fcmp(float a, float b, float epsilon = 0.000001f)
{
return fabsf(a - b) < epsilon;
}
// Returns an ever-increasing tag value
static inline u64 get_shared_tag()
{
return g_rsx_shared_tag++;
}
static inline u32 get_location(u32 addr)
{
// We don't really care about the actual memory map, it shouldn't be possible to use the mmio bar region anyway
constexpr address_range local_mem_range = address_range::start_length(rsx::constants::local_mem_base, 0x1000'0000);
return local_mem_range.overlaps(addr) ?
CELL_GCM_LOCATION_LOCAL :
CELL_GCM_LOCATION_MAIN;
}
// General purpose alignment without power-of-2 constraint
template <typename T, typename U>
static inline T align2(T value, U alignment)
{
return ((value + alignment - 1) / alignment) * alignment;
}
// General purpose downward alignment without power-of-2 constraint
template <typename T, typename U>
static inline T align_down2(T value, U alignment)
{
return (value / alignment) * alignment;
}
// Copy memory in inverse direction from source
// Used to scale negatively x axis while transfering image data
template <typename Ts = u8, typename Td = Ts>
static void memcpy_r(void* dst, void* src, usz size)
{
for (u32 i = 0; i < size; i++)
{
*(static_cast<Td*>(dst) + i) = *(static_cast<Ts*>(src) - i);
}
}
// Returns interleaved bits of X|Y|Z used as Z-order curve indices
static inline u32 calculate_z_index(u32 x, u32 y, u32 z, u32 log2_width, u32 log2_height, u32 log2_depth)
{
AUDIT(x < (1u << log2_width) && y < (1u << log2_height) && z < (1u << log2_depth));
// offset = X' | Y' | Z' which are x,y,z bits interleaved
u32 offset = 0;
u32 shift_count = 0;
do
{
if (log2_width)
{
offset |= (x & 0x1) << shift_count++;
x >>= 1;
log2_width--;
}
if (log2_height)
{
offset |= (y & 0x1) << shift_count++;
y >>= 1;
log2_height--;
}
if (log2_depth)
{
offset |= (z & 0x1) << shift_count++;
z >>= 1;
log2_depth--;
}
}
while (x | y | z);
return offset;
}
/* Note: What the ps3 calls swizzling in this case is actually z-ordering / morton ordering of pixels
* - Input can be swizzled or linear, bool flag handles conversion to and from
* - It will handle any width and height that are a power of 2, square or non square
* Restriction: It has mixed results if the height or width is not a power of 2
* Restriction: Only works with 2D surfaces
*/
template <typename T, bool input_is_swizzled>
void convert_linear_swizzle(const void* input_pixels, void* output_pixels, u16 width, u16 height, u32 pitch)
{
u32 log2width = ceil_log2(width);
u32 log2height = ceil_log2(height);
// Max mask possible for square texture
u32 x_mask = 0x55555555;
u32 y_mask = 0xAAAAAAAA;
// We have to limit the masks to the lower of the two dimensions to allow for non-square textures
u32 limit_mask = (log2width < log2height) ? log2width : log2height;
// double the limit mask to account for bits in both x and y
limit_mask = 1 << (limit_mask << 1);
//x_mask, bits above limit are 1's for x-carry
x_mask = (x_mask | ~(limit_mask - 1));
//y_mask. bits above limit are 0'd, as we use a different method for y-carry over
y_mask = (y_mask & (limit_mask - 1));
u32 offs_y = 0;
u32 offs_x = 0;
u32 offs_x0 = 0; //total y-carry offset for x
u32 y_incr = limit_mask;
// NOTE: The swizzled area is always a POT region and we must scan all of it to fill in the linear.
// It is assumed that there is no padding on the linear side for simplicity - backend upload/download will crop as needed.
// Remember, in cases of swizzling (and also tiled addressing) it is possible for tiled pixels to fall outside of their linear memory region.
const u32 pitch_in_blocks = pitch / sizeof(T);
u32 row_offset = 0;
if constexpr (!input_is_swizzled)
{
for (int y = 0; y < height; ++y, row_offset += pitch_in_blocks)
{
auto src = static_cast<const T*>(input_pixels) + row_offset;
auto dst = static_cast<T*>(output_pixels) + offs_y;
offs_x = offs_x0;
for (int x = 0; x < width; ++x)
{
dst[offs_x] = src[x];
offs_x = (offs_x - x_mask) & x_mask;
}
offs_y = (offs_y - y_mask) & y_mask;
if (offs_y == 0)
{
offs_x0 += y_incr;
}
}
}
else
{
for (int y = 0; y < height; ++y, row_offset += pitch_in_blocks)
{
auto src = static_cast<const T*>(input_pixels) + offs_y;
auto dst = static_cast<T*>(output_pixels) + row_offset;
offs_x = offs_x0;
for (int x = 0; x < width; ++x)
{
dst[x] = src[offs_x];
offs_x = (offs_x - x_mask) & x_mask;
}
offs_y = (offs_y - y_mask) & y_mask;
if (offs_y == 0)
{
offs_x0 += y_incr;
}
}
}
}
/**
* Write swizzled data to linear memory with support for 3 dimensions
* Z ordering is done in all 3 planes independently with a unit being a 2x2 block per-plane
* A unit in 3d textures is a group of 2x2x2 texels advancing towards depth in units of 2x2x1 blocks
* i.e 32 texels per "unit"
*/
template <typename T>
void convert_linear_swizzle_3d(const void* input_pixels, void* output_pixels, u16 width, u16 height, u16 depth)
{
if (depth == 1)
{
convert_linear_swizzle<T, true>(input_pixels, output_pixels, width, height, width * sizeof(T));
return;
}
auto src = static_cast<const T*>(input_pixels);
auto dst = static_cast<T*>(output_pixels);
const u32 log2_w = ceil_log2(width);
const u32 log2_h = ceil_log2(height);
const u32 log2_d = ceil_log2(depth);
for (u32 z = 0; z < depth; ++z)
{
for (u32 y = 0; y < height; ++y)
{
for (u32 x = 0; x < width; ++x)
{
*dst++ = src[calculate_z_index(x, y, z, log2_w, log2_h, log2_d)];
}
}
}
}
void convert_scale_image(u8 *dst, AVPixelFormat dst_format, int dst_width, int dst_height, int dst_pitch,
const u8 *src, AVPixelFormat src_format, int src_width, int src_height, int src_pitch, int src_slice_h, bool bilinear);
void clip_image(u8 *dst, const u8 *src, int clip_x, int clip_y, int clip_w, int clip_h, int bpp, int src_pitch, int dst_pitch);
void clip_image_may_overlap(u8 *dst, const u8 *src, int clip_x, int clip_y, int clip_w, int clip_h, int bpp, int src_pitch, int dst_pitch, u8* buffer);
std::array<float, 4> get_constant_blend_colors();
/**
* Shuffle texel layout from xyzw to wzyx
* TODO: Variable src/dst and optional se conversion
*/
template <typename T>
void shuffle_texel_data_wzyx(void* data, u32 row_pitch_in_bytes, u16 row_length_in_texels, u16 num_rows)
{
char* raw_src = static_cast<char*>(data);
T tmp[4];
for (u16 n = 0; n < num_rows; ++n)
{
T* src = reinterpret_cast<T*>(raw_src);
raw_src += row_pitch_in_bytes;
for (u16 m = 0; m < row_length_in_texels; ++m)
{
tmp[0] = src[3];
tmp[1] = src[2];
tmp[2] = src[1];
tmp[3] = src[0];
src[0] = tmp[0];
src[1] = tmp[1];
src[2] = tmp[2];
src[3] = tmp[3];
src += 4;
}
}
}
/**
* Clips a rect so that it never falls outside the parent region
* attempt_fit: allows resizing of the requested region. If false, failure to fit will result in the child rect being pinned to (0, 0)
*/
template <typename T>
std::tuple<T, T, T, T> clip_region(T parent_width, T parent_height, T clip_x, T clip_y, T clip_width, T clip_height, bool attempt_fit)
{
T x = clip_x;
T y = clip_y;
T width = clip_width;
T height = clip_height;
if ((clip_x + clip_width) > parent_width)
{
if (clip_x >= parent_width)
{
if (clip_width < parent_width)
width = clip_width;
else
width = parent_width;
x = static_cast<T>(0);
}
else
{
if (attempt_fit)
width = parent_width - clip_x;
else
width = std::min(clip_width, parent_width);
}
}
if ((clip_y + clip_height) > parent_height)
{
if (clip_y >= parent_height)
{
if (clip_height < parent_height)
height = clip_height;
else
height = parent_height;
y = static_cast<T>(0);
}
else
{
if (attempt_fit)
height = parent_height - clip_y;
else
height = std::min(clip_height, parent_height);
}
}
return std::make_tuple(x, y, width, height);
}
/**
* Extracts from 'parent' a region that fits in 'child'
*/
static inline std::tuple<position2u, position2u, size2u> intersect_region(
u32 parent_address, u16 parent_w, u16 parent_h,
u32 child_address, u16 child_w, u16 child_h,
u32 pitch)
{
if (child_address < parent_address)
{
const auto offset = parent_address - child_address;
const auto src_x = 0u;
const auto src_y = 0u;
const auto dst_y = (offset / pitch);
const auto dst_x = (offset % pitch);
const auto w = std::min<u32>(parent_w, std::max<u32>(child_w, dst_x) - dst_x); // Clamp negatives to 0!
const auto h = std::min<u32>(parent_h, std::max<u32>(child_h, dst_y) - dst_y);
return std::make_tuple<position2u, position2u, size2u>({ src_x, src_y }, { dst_x, dst_y }, { w, h });
}
else
{
const auto offset = child_address - parent_address;
const auto src_y = (offset / pitch);
const auto src_x = (offset % pitch);
const auto dst_x = 0u;
const auto dst_y = 0u;
const auto w = std::min<u32>(child_w, std::max<u32>(parent_w, src_x) - src_x);
const auto h = std::min<u32>(child_h, std::max<u32>(parent_h, src_y) - src_y);
return std::make_tuple<position2u, position2u, size2u>({ src_x, src_y }, { dst_x, dst_y }, { w, h });
}
}
static inline f32 get_resolution_scale()
{
return g_cfg.video.strict_rendering_mode ? 1.f : (g_cfg.video.resolution_scale_percent / 100.f);
}
static inline int get_resolution_scale_percent()
{
return g_cfg.video.strict_rendering_mode ? 100 : g_cfg.video.resolution_scale_percent;
}
template <bool clamp = false>
static inline const std::pair<u16, u16> apply_resolution_scale(u16 width, u16 height, u16 ref_width = 0, u16 ref_height = 0)
{
ref_width = (ref_width)? ref_width : width;
ref_height = (ref_height)? ref_height : height;
const u16 ref = std::max(ref_width, ref_height);
if (ref > g_cfg.video.min_scalable_dimension)
{
// Upscale both width and height
width = (get_resolution_scale_percent() * width) / 100;
height = (get_resolution_scale_percent() * height) / 100;
if constexpr (clamp)
{
width = std::max<u16>(width, 1);
height = std::max<u16>(height, 1);
}
}
return { width, height };
}
template <bool clamp = false>
static inline const std::pair<u16, u16> apply_inverse_resolution_scale(u16 width, u16 height)
{
// Inverse scale
auto width_ = (width * 100) / get_resolution_scale_percent();
auto height_ = (height * 100) / get_resolution_scale_percent();
if constexpr (clamp)
{
width_ = std::max<u16>(width_, 1);
height_ = std::max<u16>(height_, 1);
}
if (std::max(width_, height_) > g_cfg.video.min_scalable_dimension)
{
return { width_, height_ };
}
return { width, height };
}
/**
* Calculates the regions used for memory transfer between rendertargets on succession events
* Returns <src_w, src_h, dst_w, dst_h>
*/
template <typename SurfaceType>
std::tuple<u16, u16, u16, u16> get_transferable_region(const SurfaceType* surface)
{
auto src = static_cast<const SurfaceType*>(surface->old_contents[0].source);
auto area1 = src->get_normalized_memory_area();
auto area2 = surface->get_normalized_memory_area();
auto w = std::min(area1.x2, area2.x2);
auto h = std::min(area1.y2, area2.y2);
const auto src_scale_x = src->get_bpp() * src->samples_x;
const auto src_scale_y = src->samples_y;
const auto dst_scale_x = surface->get_bpp() * surface->samples_x;
const auto dst_scale_y = surface->samples_y;
const u16 src_w = u16(w / src_scale_x);
const u16 src_h = u16(h / src_scale_y);
const u16 dst_w = u16(w / dst_scale_x);
const u16 dst_h = u16(h / dst_scale_y);
return std::make_tuple(src_w, src_h, dst_w, dst_h);
}
template <typename SurfaceType>
inline bool pitch_compatible(const SurfaceType* a, const SurfaceType* b)
{
if (a->get_surface_height() == 1 || b->get_surface_height() == 1)
return true;
return (a->get_rsx_pitch() == b->get_rsx_pitch());
}
template <bool __is_surface = true, typename SurfaceType>
inline bool pitch_compatible(const SurfaceType* surface, u32 pitch_required, u16 height_required)
{
if constexpr (__is_surface)
{
if (height_required == 1 || surface->get_surface_height() == 1)
return true;
}
else
{
if (height_required == 1 || surface->get_height() == 1)
return true;
}
return (surface->get_rsx_pitch() == pitch_required);
}
/**
* Remove restart index and emulate using degenerate triangles
* Can be used as a workaround when restart_index doesnt work too well
* dst should be able to hold at least 2xcount entries
*/
template <typename T>
u32 remove_restart_index(T* dst, T* src, int count, T restart_index)
{
// Converts a stream e.g [1, 2, 3, -1, 4, 5, 6] to a stream with degenerate splits
// Output is e.g [1, 2, 3, 3, 3, 4, 4, 5, 6] (5 bogus triangles)
T last_index{}, index;
u32 dst_index = 0;
for (int n = 0; n < count;)
{
index = src[n];
if (index == restart_index)
{
for (; n < count; ++n)
{
if (src[n] != restart_index)
break;
}
if (n == count)
return dst_index;
dst[dst_index++] = last_index; //Duplicate last
if ((dst_index & 1) == 0)
//Duplicate last again to fix face winding
dst[dst_index++] = last_index;
last_index = src[n];
dst[dst_index++] = last_index; //Duplicate next
}
else
{
dst[dst_index++] = index;
last_index = index;
++n;
}
}
return dst_index;
}
// The rsx internally adds the 'data_base_offset' and the 'vert_offset' and masks it
// before actually attempting to translate to the internal address. Seen happening heavily in R&C games
static inline u32 get_vertex_offset_from_base(u32 vert_data_base_offset, u32 vert_base_offset)
{
return (vert_data_base_offset + vert_base_offset) & 0xFFFFFFF;
}
// Similar to vertex_offset_base calculation, the rsx internally adds and masks index
// before using
static inline u32 get_index_from_base(u32 index, u32 index_base)
{
return (index + index_base) & 0x000FFFFF;
}
template <uint integer, uint frac, bool sign = true, typename To = f32>
static inline To decode_fxp(u32 bits)
{
static_assert(u64{sign} + integer + frac <= 32, "Invalid decode_fxp range");
// Classic fixed point, see PGRAPH section of nouveau docs for TEX_FILTER (lod_bias) and TEX_CONTROL (min_lod, max_lod)
// Technically min/max lod are fixed 4.8 but a 5.8 decoder should work just as well since sign bit is 0
if constexpr (sign) if (bits & (1 << (integer + frac)))
{
bits = (0 - bits) & (~0u >> (31 - (integer + frac)));
return bits / (-To(1u << frac));
}
return bits / To(1u << frac);
}
static inline f32 decode_fp16(u16 bits)
{
if (bits == 0)
{
return 0.f;
}
// Extract components
unsigned int sign = (bits >> 15) & 1;
unsigned int exp = (bits >> 10) & 0x1f;
unsigned int mantissa = bits & 0x3ff;
float base = (sign != 0) ? -1.f : 1.f;
float scale;
if (exp == 0x1F)
{
// specials (nan, inf)
u32 nan = 0x7F800000 | mantissa;
nan |= (sign << 31);
return std::bit_cast<f32>(nan);
}
else if (exp > 0)
{
// normal number, borrows a '1' from the hidden mantissa bit
base *= std::exp2f(f32(exp) - 15.f);
scale = (float(mantissa) / 1024.f) + 1.f;
}
else
{
// subnormal number, borrows a '0' from the hidden mantissa bit
base *= std::exp2f(1.f - 15.f);
scale = float(mantissa) / 1024.f;
}
return base * scale;
}
template<bool _signed = false>
u16 encode_fx12(f32 value)
{
u16 raw = u16(std::abs(value) * 256.);
if constexpr (!_signed)
{
return raw;
}
else
{
if (value >= 0.f) [[likely]]
{
return raw;
}
else
{
return u16(0 - raw) & 0x1fff;
}
}
}
}
| 21,327
|
C++
|
.h
| 700
| 27.114286
| 152
| 0.656067
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
5,989
|
rsx_cache.h
|
RPCS3_rpcs3/rpcs3/Emu/RSX/rsx_cache.h
|
#pragma once
#include "Utilities/File.h"
#include "Utilities/lockless.h"
#include "Utilities/Thread.h"
#include "Common/bitfield.hpp"
#include "Common/unordered_map.hpp"
#include "Emu/System.h"
#include "Emu/cache_utils.hpp"
#include "Program/ProgramStateCache.h"
#include "Common/texture_cache_checker.h"
#include "Overlays/Shaders/shader_loading_dialog.h"
#include <chrono>
#include "util/sysinfo.hpp"
#include "util/fnv_hash.hpp"
namespace rsx
{
template <typename pipeline_storage_type, typename backend_storage>
class shaders_cache
{
using unpacked_type = lf_fifo<std::tuple<pipeline_storage_type, RSXVertexProgram, RSXFragmentProgram>, 1000>; // TODO: Determine best size
struct pipeline_data
{
u64 vertex_program_hash;
u64 fragment_program_hash;
u64 pipeline_storage_hash;
u32 vp_ctrl0;
u32 vp_ctrl1;
u32 vp_texture_dimensions;
u32 vp_reserved_0;
u64 vp_instruction_mask[9];
u32 vp_base_address;
u32 vp_entry;
u16 vp_jump_table[32];
u16 vp_multisampled_textures;
u16 vp_reserved_1;
u32 vp_reserved_2;
u32 fp_ctrl;
u32 fp_texture_dimensions;
u32 fp_texcoord_control;
u16 fp_height;
u16 fp_pixel_layout;
u16 fp_lighting_flags;
u16 fp_shadow_textures;
u16 fp_redirected_textures;
u16 fp_multisampled_textures;
u64 fp_reserved_0;
pipeline_storage_type pipeline_properties;
};
std::string version_prefix;
std::string root_path;
std::string pipeline_class_name;
lf_fifo<std::unique_ptr<u8[]>, 100> fragment_program_data;
backend_storage& m_storage;
static std::string get_message(u32 index, u32 processed, u32 entry_count)
{
return fmt::format("%s pipeline object %u of %u", index == 0 ? "Loading" : "Compiling", processed, entry_count);
}
void load_shaders(uint nb_workers, unpacked_type& unpacked, std::string& directory_path, std::vector<fs::dir_entry>& entries, u32 entry_count,
shader_loading_dialog* dlg)
{
atomic_t<u32> processed(0);
std::function<void(u32)> shader_load_worker = [&](u32 stop_at)
{
u32 pos;
// Processed is incremented before work starts in order to avoid two workers working on the same shader
while (((pos = processed++) < stop_at) && !Emu.IsStopped())
{
fs::dir_entry tmp = entries[pos];
const auto filename = directory_path + "/" + tmp.name;
fs::file f(filename);
if (!f)
{
// Unexpected error, but avoid crash
continue;
}
if (f.size() != sizeof(pipeline_data))
{
rsx_log.error("Removing cached pipeline object %s since it's not binary compatible with the current shader cache", tmp.name.c_str());
fs::remove_file(filename);
continue;
}
pipeline_data pdata{};
f.read(&pdata, f.size());
auto entry = unpack(pdata);
if (std::get<1>(entry).data.empty() || !std::get<2>(entry).ucode_length)
{
continue;
}
m_storage.preload_programs(std::get<1>(entry), std::get<2>(entry));
unpacked[unpacked.push_begin()] = std::move(entry);
}
// Do not account for an extra shader that was never processed
processed--;
};
await_workers(nb_workers, 0, shader_load_worker, processed, entry_count, dlg);
}
template <typename... Args>
void compile_shaders(uint nb_workers, unpacked_type& unpacked, u32 entry_count, shader_loading_dialog* dlg, Args&&... args)
{
atomic_t<u32> processed(0);
std::function<void(u32)> shader_comp_worker = [&](u32 stop_at)
{
u32 pos;
// Processed is incremented before work starts in order to avoid two workers working on the same shader
while (((pos = processed++) < stop_at) && !Emu.IsStopped())
{
auto& entry = unpacked[pos];
m_storage.add_pipeline_entry(std::get<1>(entry), std::get<2>(entry), std::get<0>(entry), std::forward<Args>(args)...);
}
// Do not account for an extra shader that was never processed
processed--;
};
await_workers(nb_workers, 1, shader_comp_worker, processed, entry_count, dlg);
}
void await_workers(uint nb_workers, u8 step, std::function<void(u32)>& worker, atomic_t<u32>& processed, u32 entry_count, shader_loading_dialog* dlg)
{
if (nb_workers == 1)
{
steady_clock::time_point last_update;
// Call the worker function directly, stopping it prematurely to be able update the screen
u32 stop_at = 0;
do
{
stop_at = std::min(stop_at + 10, entry_count);
worker(stop_at);
// Only update the screen at about 60fps since updating it everytime slows down the process
steady_clock::time_point now = steady_clock::now();
if ((std::chrono::duration_cast<std::chrono::milliseconds>(now - last_update) > 16ms) || (stop_at == entry_count))
{
dlg->update_msg(step, get_message(step, stop_at, entry_count));
dlg->set_value(step, stop_at);
last_update = now;
}
} while (stop_at < entry_count && !Emu.IsStopped());
}
else
{
named_thread_group workers("RSX Worker ", nb_workers, [&]()
{
worker(entry_count);
});
u32 current_progress = 0;
u32 last_update_progress = 0;
while ((current_progress < entry_count) && !Emu.IsStopped())
{
thread_ctrl::wait_for(16'000); // Around 60fps should be good enough
if (Emu.IsStopped()) break;
current_progress = std::min(processed.load(), entry_count);
if (last_update_progress != current_progress)
{
last_update_progress = current_progress;
dlg->update_msg(step, get_message(step, current_progress, entry_count));
dlg->set_value(step, current_progress);
}
}
}
if (!Emu.IsStopped())
{
ensure(processed == entry_count);
}
}
public:
shaders_cache(backend_storage& storage, std::string pipeline_class, std::string version_prefix_str = "v1")
: version_prefix(std::move(version_prefix_str))
, pipeline_class_name(std::move(pipeline_class))
, m_storage(storage)
{
if (!g_cfg.video.disable_on_disk_shader_cache)
{
if (std::string cache_path = rpcs3::cache::get_ppu_cache(); !cache_path.empty())
{
root_path = std::move(cache_path) + "shaders_cache/";
}
}
}
template <typename... Args>
void load(shader_loading_dialog* dlg, Args&& ...args)
{
if (root_path.empty())
{
return;
}
std::string directory_path = root_path + "/pipelines/" + pipeline_class_name + "/" + version_prefix;
fs::dir root = fs::dir(directory_path);
if (!root)
{
fs::create_path(directory_path);
fs::create_path(root_path + "/raw");
return;
}
std::vector<fs::dir_entry> entries;
for (auto&& tmp : root)
{
if (tmp.is_directory)
continue;
entries.push_back(tmp);
}
u32 entry_count = ::size32(entries);
if (!entry_count)
return;
root.rewind();
// Progress dialog
std::unique_ptr<shader_loading_dialog> fallback_dlg;
if (!dlg)
{
fallback_dlg = std::make_unique<shader_loading_dialog>();
dlg = fallback_dlg.get();
}
dlg->create("Preloading cached shaders from disk.\nPlease wait...", "Shader Compilation");
dlg->set_limit(0, entry_count);
dlg->set_limit(1, entry_count);
dlg->update_msg(0, get_message(0, 0, entry_count));
dlg->update_msg(1, get_message(1, 0, entry_count));
// Preload everything needed to compile the shaders
unpacked_type unpacked;
uint nb_workers = g_cfg.video.renderer == video_renderer::vulkan ? utils::get_thread_count() : 1;
load_shaders(nb_workers, unpacked, directory_path, entries, entry_count, dlg);
// Account for any invalid entries
entry_count = unpacked.size();
compile_shaders(nb_workers, unpacked, entry_count, dlg, std::forward<Args>(args)...);
dlg->refresh();
dlg->close();
}
void store(const pipeline_storage_type &pipeline, const RSXVertexProgram &vp, const RSXFragmentProgram &fp)
{
if (root_path.empty())
{
return;
}
if (vp.jump_table.size() > 32)
{
rsx_log.error("shaders_cache: vertex program has more than 32 jump addresses. Entry not saved to cache");
return;
}
pipeline_data data = pack(pipeline, vp, fp);
std::string fp_name = root_path + "/raw/" + fmt::format("%llX.fp", data.fragment_program_hash);
std::string vp_name = root_path + "/raw/" + fmt::format("%llX.vp", data.vertex_program_hash);
// Writeback to cache either if file does not exist or it is invalid (unexpected size)
// Note: fs::write_file is not atomic, if the process is terminated in the middle an empty file is created
if (fs::stat_t s{}; !fs::get_stat(fp_name, s) || s.size != fp.ucode_length)
{
fs::write_file(fp_name, fs::rewrite, fp.get_data(), fp.ucode_length);
}
if (fs::stat_t s{}; !fs::get_stat(vp_name, s) || s.size != vp.data.size() * sizeof(u32))
{
fs::write_file(vp_name, fs::rewrite, vp.data);
}
u64 state_hash = 0;
state_hash ^= rpcs3::hash_base<u32>(data.vp_ctrl0);
state_hash ^= rpcs3::hash_base<u32>(data.vp_ctrl1);
state_hash ^= rpcs3::hash_base<u32>(data.fp_ctrl);
state_hash ^= rpcs3::hash_base<u32>(data.vp_texture_dimensions);
state_hash ^= rpcs3::hash_base<u32>(data.fp_texture_dimensions);
state_hash ^= rpcs3::hash_base<u32>(data.fp_texcoord_control);
state_hash ^= rpcs3::hash_base<u16>(data.fp_height);
state_hash ^= rpcs3::hash_base<u16>(data.fp_pixel_layout);
state_hash ^= rpcs3::hash_base<u16>(data.fp_lighting_flags);
state_hash ^= rpcs3::hash_base<u16>(data.fp_shadow_textures);
state_hash ^= rpcs3::hash_base<u16>(data.fp_redirected_textures);
state_hash ^= rpcs3::hash_base<u16>(data.vp_multisampled_textures);
state_hash ^= rpcs3::hash_base<u16>(data.fp_multisampled_textures);
const std::string pipeline_file_name = fmt::format("%llX+%llX+%llX+%llX.bin", data.vertex_program_hash, data.fragment_program_hash, data.pipeline_storage_hash, state_hash);
const std::string pipeline_path = root_path + "/pipelines/" + pipeline_class_name + "/" + version_prefix + "/" + pipeline_file_name;
fs::write_file(pipeline_path, fs::rewrite, &data, sizeof(data));
}
RSXVertexProgram load_vp_raw(u64 program_hash) const
{
RSXVertexProgram vp = {};
fs::file f(fmt::format("%s/raw/%llX.vp", root_path, program_hash));
if (f) f.read(vp.data, f.size() / sizeof(u32));
return vp;
}
RSXFragmentProgram load_fp_raw(u64 program_hash)
{
fs::file f(fmt::format("%s/raw/%llX.fp", root_path, program_hash));
RSXFragmentProgram fp = {};
const u32 size = fp.ucode_length = f ? ::size32(f) : 0;
if (!size)
{
return fp;
}
auto buf = std::make_unique<u8[]>(size);
fp.data = buf.get();
f.read(buf.get(), size);
fragment_program_data[fragment_program_data.push_begin()] = std::move(buf);
return fp;
}
std::tuple<pipeline_storage_type, RSXVertexProgram, RSXFragmentProgram> unpack(pipeline_data &data)
{
std::tuple<pipeline_storage_type, RSXVertexProgram, RSXFragmentProgram> result;
auto& [pipeline, vp, fp] = result;
vp = load_vp_raw(data.vertex_program_hash);
fp = load_fp_raw(data.fragment_program_hash);
pipeline = data.pipeline_properties;
vp.ctrl = data.vp_ctrl0;
vp.output_mask = data.vp_ctrl1;
vp.texture_state.texture_dimensions = data.vp_texture_dimensions;
vp.texture_state.multisampled_textures = data.vp_multisampled_textures;
vp.base_address = data.vp_base_address;
vp.entry = data.vp_entry;
pack_bitset<max_vertex_program_instructions>(vp.instruction_mask, data.vp_instruction_mask);
for (u8 index = 0; index < 32; ++index)
{
const auto address = data.vp_jump_table[index];
if (address == u16{umax})
{
// End of list marker
break;
}
vp.jump_table.emplace(address);
}
fp.ctrl = data.fp_ctrl;
fp.texture_state.texture_dimensions = data.fp_texture_dimensions;
fp.texture_state.shadow_textures = data.fp_shadow_textures;
fp.texture_state.redirected_textures = data.fp_redirected_textures;
fp.texture_state.multisampled_textures = data.fp_multisampled_textures;
fp.texcoord_control_mask = data.fp_texcoord_control;
fp.two_sided_lighting = !!(data.fp_lighting_flags & 0x1);
return result;
}
pipeline_data pack(const pipeline_storage_type &pipeline, const RSXVertexProgram &vp, const RSXFragmentProgram &fp)
{
pipeline_data data_block = {};
data_block.pipeline_properties = pipeline;
data_block.vertex_program_hash = m_storage.get_hash(vp);
data_block.fragment_program_hash = m_storage.get_hash(fp);
data_block.pipeline_storage_hash = m_storage.get_hash(pipeline);
data_block.vp_ctrl0 = vp.ctrl;
data_block.vp_ctrl1 = vp.output_mask;
data_block.vp_texture_dimensions = vp.texture_state.texture_dimensions;
data_block.vp_multisampled_textures = vp.texture_state.multisampled_textures;
data_block.vp_base_address = vp.base_address;
data_block.vp_entry = vp.entry;
unpack_bitset<max_vertex_program_instructions>(vp.instruction_mask, data_block.vp_instruction_mask);
u8 index = 0;
while (index < 32)
{
if (!index && !vp.jump_table.empty())
{
for (auto &address : vp.jump_table)
{
data_block.vp_jump_table[index++] = static_cast<u16>(address);
}
}
else
{
// End of list marker
data_block.vp_jump_table[index] = u16{umax};
break;
}
}
data_block.fp_ctrl = fp.ctrl;
data_block.fp_texture_dimensions = fp.texture_state.texture_dimensions;
data_block.fp_texcoord_control = fp.texcoord_control_mask;
data_block.fp_lighting_flags = u16(fp.two_sided_lighting);
data_block.fp_shadow_textures = fp.texture_state.shadow_textures;
data_block.fp_redirected_textures = fp.texture_state.redirected_textures;
data_block.fp_multisampled_textures = fp.texture_state.multisampled_textures;
return data_block;
}
};
namespace vertex_cache
{
// A null vertex cache
template <typename storage_type>
class default_vertex_cache
{
public:
virtual ~default_vertex_cache() = default;
virtual const storage_type* find_vertex_range(u32 /*local_addr*/, u32 /*data_length*/) { return nullptr; }
virtual void store_range(u32 /*local_addr*/, u32 /*data_length*/, u32 /*offset_in_heap*/) {}
virtual void purge() {}
};
struct uploaded_range
{
uptr local_address;
u32 offset_in_heap;
u32 data_length;
};
// A weak vertex cache with no data checks or memory range locks
// Of limited use since contents are only guaranteed to be valid once per frame
// Supports upto 1GiB block lengths if typed and full 4GiB otherwise.
// Using a 1:1 hash-value with robin-hood is 2x faster than what we had before with std-map-of-arrays.
class weak_vertex_cache : public default_vertex_cache<uploaded_range>
{
using storage_type = uploaded_range;
private:
rsx::unordered_map<uptr, storage_type> vertex_ranges;
FORCE_INLINE u64 hash(u32 local_addr, u32 data_length) const
{
return u64(local_addr) | (u64(data_length) << 32);
}
public:
const storage_type* find_vertex_range(u32 local_addr, u32 data_length) override
{
const auto key = hash(local_addr, data_length);
const auto found = vertex_ranges.find(key);
if (found == vertex_ranges.end())
{
return nullptr;
}
return std::addressof(found->second);
}
void store_range(u32 local_addr, u32 data_length, u32 offset_in_heap) override
{
storage_type v = {};
v.data_length = data_length;
v.local_address = local_addr;
v.offset_in_heap = offset_in_heap;
const auto key = hash(local_addr, data_length);
vertex_ranges[key] = v;
}
void purge() override
{
vertex_ranges.clear();
}
};
}
}
| 15,658
|
C++
|
.h
| 419
| 32.968974
| 175
| 0.68159
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
5,990
|
RSXThread.h
|
RPCS3_rpcs3/rpcs3/Emu/RSX/RSXThread.h
|
#pragma once
#include <thread>
#include <queue>
#include <deque>
#include <variant>
#include <stack>
#include <unordered_map>
#include "GCM.h"
#include "rsx_cache.h"
#include "RSXFIFO.h"
#include "RSXOffload.h"
#include "RSXZCULL.h"
#include "rsx_utils.h"
#include "Common/bitfield.hpp"
#include "Common/profiling_timer.hpp"
#include "Common/texture_cache_types.h"
#include "Program/RSXVertexProgram.h"
#include "Program/RSXFragmentProgram.h"
#include "Utilities/Thread.h"
#include "Utilities/geometry.h"
#include "Capture/rsx_trace.h"
#include "Capture/rsx_replay.h"
#include "Emu/Cell/lv2/sys_rsx.h"
#include "Emu/IdManager.h"
#include "Core/RSXDisplay.h"
#include "Core/RSXFrameBuffer.h"
#include "Core/RSXContext.h"
#include "Core/RSXIOMap.hpp"
#include "Core/RSXVertexTypes.h"
#include "NV47/FW/GRAPH_backend.h"
extern atomic_t<bool> g_user_asked_for_frame_capture;
extern atomic_t<bool> g_disable_frame_limit;
extern rsx::frame_trace_data frame_debug;
extern rsx::frame_capture_data frame_capture;
namespace rsx
{
class RSXDMAWriter;
struct context;
namespace overlays
{
class display_manager;
}
enum framebuffer_creation_context : u8
{
context_draw = 0,
context_clear_color = 1,
context_clear_depth = 2,
context_clear_all = context_clear_color | context_clear_depth
};
enum pipeline_state : u32
{
fragment_program_ucode_dirty = (1 << 0), // Fragment program ucode changed
vertex_program_ucode_dirty = (1 << 1), // Vertex program ucode changed
fragment_program_state_dirty = (1 << 2), // Fragment program state changed
vertex_program_state_dirty = (1 << 3), // Vertex program state changed
fragment_state_dirty = (1 << 4), // Fragment state changed (alpha test, etc)
vertex_state_dirty = (1 << 5), // Vertex state changed (scale_offset, clip planes, etc)
transform_constants_dirty = (1 << 6), // Transform constants changed
fragment_constants_dirty = (1 << 7), // Fragment constants changed
framebuffer_reads_dirty = (1 << 8), // Framebuffer contents changed
fragment_texture_state_dirty = (1 << 9), // Fragment texture parameters changed
vertex_texture_state_dirty = (1 << 10), // Fragment texture parameters changed
scissor_config_state_dirty = (1 << 11), // Scissor region changed
zclip_config_state_dirty = (1 << 12), // Viewport Z clip changed
scissor_setup_invalid = (1 << 13), // Scissor configuration is broken
scissor_setup_clipped = (1 << 14), // Scissor region is cropped by viewport constraint
polygon_stipple_pattern_dirty = (1 << 15), // Rasterizer stippling pattern changed
line_stipple_pattern_dirty = (1 << 16), // Line stippling pattern changed
push_buffer_arrays_dirty = (1 << 17), // Push buffers have data written to them (immediate mode vertex buffers)
polygon_offset_state_dirty = (1 << 18), // Polygon offset config was changed
depth_bounds_state_dirty = (1 << 19), // Depth bounds configuration changed
pipeline_config_dirty = (1 << 20), // Generic pipeline configuration changes. Shader peek hint.
rtt_config_dirty = (1 << 21), // Render target configuration changed
rtt_config_contested = (1 << 22), // Render target configuration is indeterminate
rtt_config_valid = (1 << 23), // Render target configuration is valid
rtt_cache_state_dirty = (1 << 24), // Texture cache state is indeterminate
fragment_program_dirty = fragment_program_ucode_dirty | fragment_program_state_dirty,
vertex_program_dirty = vertex_program_ucode_dirty | vertex_program_state_dirty,
invalidate_pipeline_bits = fragment_program_dirty | vertex_program_dirty,
invalidate_zclip_bits = vertex_state_dirty | zclip_config_state_dirty,
memory_barrier_bits = framebuffer_reads_dirty,
// Vulkan-specific signals
invalidate_vk_dynamic_state = zclip_config_state_dirty | scissor_config_state_dirty | polygon_offset_state_dirty | depth_bounds_state_dirty,
all_dirty = ~0u
};
enum eng_interrupt_reason : u32
{
backend_interrupt = 0x0001, // Backend-related interrupt
memory_config_interrupt = 0x0002, // Memory configuration changed
display_interrupt = 0x0004, // Display handling
pipe_flush_interrupt = 0x0008, // Flush pipelines
dma_control_interrupt = 0x0010, // DMA interrupt
all_interrupt_bits = memory_config_interrupt | backend_interrupt | display_interrupt | pipe_flush_interrupt
};
enum result_flags: u8
{
result_none = 0,
result_error = 1,
result_zcull_intr = 2
};
u32 get_vertex_type_size_on_host(vertex_base_type type, u32 size);
u32 get_address(u32 offset, u32 location, u32 size_to_check = 0, std::source_location src_loc = std::source_location::current());
struct backend_configuration
{
bool supports_multidraw; // Draw call batching
bool supports_hw_a2c; // Alpha to coverage
bool supports_hw_renormalization; // Should be true on NV hardware which matches PS3 texture renormalization behaviour
bool supports_hw_msaa; // MSAA support
bool supports_hw_a2one; // Alpha to one
bool supports_hw_conditional_render; // Conditional render
bool supports_passthrough_dma; // DMA passthrough
bool supports_asynchronous_compute; // Async compute
bool supports_host_gpu_labels; // Advanced host synchronization
bool supports_normalized_barycentrics; // Basically all GPUs except NVIDIA have properly normalized barycentrics
};
class sampled_image_descriptor_base;
struct desync_fifo_cmd_info
{
u32 cmd;
u64 timestamp;
};
// TODO: This class is a mess, this needs to be broken into smaller chunks, like I did for RSXFIFO and RSXZCULL (kd)
class thread : public cpu_thread, public GCM_context, public GRAPH_backend
{
u64 timestamp_ctrl = 0;
u64 timestamp_subvalue = 0;
u64 m_cycles_counter = 0;
display_flip_info_t m_queued_flip{};
void cpu_task() override;
protected:
std::array<push_buffer_vertex_info, 16> vertex_push_buffers;
s32 m_skip_frame_ctr = 0;
bool skip_current_frame = false;
primitive_class m_current_draw_mode = primitive_class::polygon;
backend_configuration backend_config{};
// FIFO
public:
std::unique_ptr<FIFO::FIFO_control> fifo_ctrl;
atomic_t<bool> rsx_thread_running{ false };
std::vector<std::pair<u32, u32>> dump_callstack_list() const override;
std::string dump_misc() const override;
protected:
FIFO::flattening_helper m_flattener;
u32 fifo_ret_addr = RSX_CALL_STACK_EMPTY;
u32 saved_fifo_ret = RSX_CALL_STACK_EMPTY;
u32 restore_fifo_cmd = 0;
u32 restore_fifo_count = 0;
// Occlusion query
bool zcull_surface_active = false;
std::unique_ptr<reports::ZCULL_control> zcull_ctrl;
// Framebuffer setup
rsx::gcm_framebuffer_info m_surface_info[rsx::limits::color_buffers_count];
rsx::gcm_framebuffer_info m_depth_surface_info;
framebuffer_layout m_framebuffer_layout{};
// Overlays
rsx::overlays::display_manager* m_overlay_manager = nullptr;
atomic_t<u64> m_display_rate_fetch_count = 0;
atomic_t<f64> m_cached_display_rate = 60.;
f64 get_cached_display_refresh_rate();
virtual f64 get_display_refresh_rate() const = 0;
// Invalidated memory range
address_range m_invalidated_memory_range;
// Profiler
rsx::profiling_timer m_profiler;
frame_statistics_t m_frame_stats{};
// Savestates related
u32 m_pause_after_x_flips = 0;
// Context
context* m_ctx = nullptr;
// Host DMA
std::unique_ptr<RSXDMAWriter> m_host_dma_ctrl;
public:
atomic_t<u64> new_get_put = u64{umax};
u32 restore_point = 0;
u32 dbg_step_pc = 0;
u32 last_known_code_start = 0;
atomic_t<u32> external_interrupt_lock{ 0 };
atomic_t<bool> external_interrupt_ack{ false };
atomic_t<u32> is_initialized{0};
rsx::simple_array<u32> element_push_buffer;
bool is_fifo_idle() const;
void flush_fifo();
// Returns [count of found commands, PC of their start]
std::pair<u32, u32> try_get_pc_of_x_cmds_backwards(s32 count, u32 get) const;
void recover_fifo(std::source_location src_loc = std::source_location::current());
static void fifo_wake_delay(u64 div = 1);
u32 get_fifo_cmd() const;
void dump_regs(std::string&, std::any& custom_data) const override;
void cpu_wait(bs_t<cpu_flag> old) override;
static constexpr u32 id_base = 0x5555'5555; // See get_current_cpu_thread()
// Performance approximation counters
struct
{
atomic_t<u64> idle_time{ 0 }; // Time spent idling in microseconds
u64 last_update_timestamp = 0; // Timestamp of last load update
u64 FIFO_idle_timestamp = 0; // Timestamp of when FIFO queue becomes idle
FIFO::state state = FIFO::state::running;
u32 approximate_load = 0;
u32 sampled_frames = 0;
}
performance_counters;
enum class flip_request : u32
{
emu_requested = 1,
native_ui = 2,
any = emu_requested | native_ui
};
atomic_bitmask_t<flip_request> async_flip_requested{};
u8 async_flip_buffer{ 0 };
void capture_frame(const std::string& name);
const backend_configuration& get_backend_config() const { return backend_config; }
public:
std::shared_ptr<named_thread<class ppu_thread>> intr_thread;
// I hate this flag, but until hle is closer to lle, its needed
bool isHLE{ false };
bool serialized = false;
u32 flip_status = CELL_GCM_DISPLAY_FLIP_STATUS_DONE;
int debug_level = CELL_GCM_DEBUG_LEVEL0;
atomic_t<bool> requested_vsync{true};
atomic_t<bool> enable_second_vhandler{false};
bool send_event(u64, u64, u64);
std::array<bool, 16> m_textures_dirty;
std::array<bool, 4> m_vertex_textures_dirty;
rsx::framebuffer_creation_context m_current_framebuffer_context = rsx::framebuffer_creation_context::context_draw;
rsx::atomic_bitmask_t<rsx::eng_interrupt_reason> m_eng_interrupt_mask;
rsx::bitmask_t<rsx::pipeline_state> m_graphics_state;
u64 ROP_sync_timestamp = 0;
program_hash_util::fragment_program_utils::fragment_program_metadata current_fp_metadata = {};
program_hash_util::vertex_program_utils::vertex_program_metadata current_vp_metadata = {};
std::array<u32, 4> get_color_surface_addresses() const;
u32 get_zeta_surface_address() const;
protected:
void get_framebuffer_layout(rsx::framebuffer_creation_context context, framebuffer_layout &layout);
bool get_scissor(areau& region, bool clip_viewport);
/**
* Analyze vertex inputs and group all interleaved blocks
*/
void analyse_inputs_interleaved(vertex_input_layout&);
RSXVertexProgram current_vertex_program = {};
RSXFragmentProgram current_fragment_program = {};
vertex_program_texture_state current_vp_texture_state = {};
fragment_program_texture_state current_fp_texture_state = {};
// Runs shader prefetch and resolves pipeline status flags
void analyse_current_rsx_pipeline();
// Prefetch and analyze the currently active fragment program ucode
void prefetch_fragment_program();
// Prefetch and analyze the currently active vertex program ucode
void prefetch_vertex_program();
void get_current_vertex_program(const std::array<std::unique_ptr<rsx::sampled_image_descriptor_base>, rsx::limits::vertex_textures_count>& sampler_descriptors);
/**
* Gets current fragment program and associated fragment state
*/
void get_current_fragment_program(const std::array<std::unique_ptr<rsx::sampled_image_descriptor_base>, rsx::limits::fragment_textures_count>& sampler_descriptors);
public:
bool invalidate_fragment_program(u32 dst_dma, u32 dst_offset, u32 size);
void on_framebuffer_options_changed(u32 opt);
public:
u64 target_rsx_flip_time = 0;
u64 int_flip_index = 0;
u64 last_guest_flip_timestamp = 0;
u64 last_host_flip_timestamp = 0;
vm::ptr<void(u32)> flip_handler = vm::null;
vm::ptr<void(u32)> user_handler = vm::null;
vm::ptr<void(u32)> vblank_handler = vm::null;
vm::ptr<void(u32)> queue_handler = vm::null;
atomic_t<u64> vblank_count{0};
bool capture_current_frame = false;
u64 vblank_at_flip = umax;
u64 flip_notification_count = 0;
void post_vblank_event(u64 post_event_time);
public:
atomic_t<bool> sync_point_request = false;
atomic_t<bool> pause_on_draw = false;
bool in_begin_end = false;
std::queue<desync_fifo_cmd_info> recovered_fifo_cmds_history;
std::deque<frame_time_t> frame_times;
u32 prevent_preempt_increase_tickets = 0;
u64 preempt_fail_old_preempt_count = 0;
atomic_t<s32> async_tasks_pending{ 0 };
reports::conditional_render_eval cond_render_ctrl;
virtual u64 get_cycles() = 0;
virtual ~thread();
static constexpr auto thread_name = "rsx::thread"sv;
protected:
thread(utils::serial* ar);
thread() : thread(static_cast<utils::serial*>(nullptr)) {}
virtual void on_task();
virtual void on_exit();
/**
* Execute a backend local task queue
*/
virtual void do_local_task(FIFO::state state);
virtual void emit_geometry(u32) {}
void run_FIFO();
public:
thread(const thread&) = delete;
thread& operator=(const thread&) = delete;
void save(utils::serial& ar);
virtual void clear_surface(u32 /*arg*/) {}
virtual void begin();
virtual void end();
virtual void execute_nop_draw();
virtual void on_init_thread() = 0;
virtual void on_frame_end(u32 buffer, bool forced = false);
virtual void flip(const display_flip_info_t& info) = 0;
virtual u64 timestamp();
virtual bool on_access_violation(u32 /*address*/, bool /*is_writing*/) { return false; }
virtual void on_invalidate_memory_range(const address_range & /*range*/, rsx::invalidation_cause) {}
virtual void notify_tile_unbound(u32 /*tile*/) {}
// control
virtual void renderctl(u32 /*request_code*/, void* /*args*/) {}
// zcull
void notify_zcull_info_changed();
void clear_zcull_stats(u32 type);
void check_zcull_status(bool framebuffer_swap);
void get_zcull_stats(u32 type, vm::addr_t sink);
u32 copy_zcull_stats(u32 memory_range_start, u32 memory_range, u32 destination);
void enable_conditional_rendering(vm::addr_t ref);
void disable_conditional_rendering();
virtual void begin_conditional_rendering(const std::vector<reports::occlusion_query_info*>& sources);
virtual void end_conditional_rendering();
// sync
void sync();
flags32_t read_barrier(u32 memory_address, u32 memory_range, bool unconditional);
virtual void sync_hint(FIFO::interrupt_hint hint, reports::sync_hint_payload_t payload);
virtual bool release_GCM_label(u32 /*address*/, u32 /*value*/) { return false; }
std::span<const std::byte> get_raw_index_array(const draw_clause& draw_indexed_clause) const;
std::variant<draw_array_command, draw_indexed_array_command, draw_inlined_array>
get_draw_command(const rsx::rsx_state& state) const;
/**
* Immediate mode rendering requires a temp push buffer to hold attrib values
* Appends a value to the push buffer (currently only supports 32-wide types)
*/
void append_to_push_buffer(u32 attribute, u32 size, u32 subreg_index, vertex_base_type type, u32 value);
u32 get_push_buffer_vertex_count() const;
void append_array_element(u32 index);
u32 get_push_buffer_index_count() const;
protected:
/**
* Computes VRAM requirements needed to upload raw vertex streams
* result.first contains persistent memory requirements
* result.second contains volatile memory requirements
*/
std::pair<u32, u32> calculate_memory_requirements(const vertex_input_layout& layout, u32 first_vertex, u32 vertex_count);
/**
* Generates vertex input descriptors as an array of 16x4 s32s
*/
void fill_vertex_layout_state(const vertex_input_layout& layout, u32 first_vertex, u32 vertex_count, s32* buffer, u32 persistent_offset = 0, u32 volatile_offset = 0);
/**
* Uploads vertex data described in the layout descriptor
* Copies from local memory to the write-only output buffers provided in a sequential manner
*/
void write_vertex_data_to_memory(const vertex_input_layout& layout, u32 first_vertex, u32 vertex_count, void *persistent_data, void *volatile_data);
void evaluate_cpu_usage_reduction_limits();
private:
shared_mutex m_mtx_task;
void handle_emu_flip(u32 buffer);
void handle_invalidated_memory_range();
public:
/**
* Fill buffer with 4x4 scale offset matrix.
* Vertex shader's position is to be multiplied by this matrix.
* if flip_y is set, the matrix is modified to use d3d convention.
*/
void fill_scale_offset_data(void *buffer, bool flip_y) const;
/**
* Fill buffer with user clip information
*/
void fill_user_clip_data(void *buffer) const;
/**
* Fill buffer with vertex program constants.
* Relocation table allows to do a partial fill with only selected registers.
*/
void fill_vertex_program_constants_data(void* buffer, const std::span<const u16>& reloc_table);
/**
* Fill buffer with fragment rasterization state.
* Fills current fog values, alpha test parameters and texture scaling parameters
*/
void fill_fragment_state_buffer(void* buffer, const RSXFragmentProgram& fragment_program);
/**
* Notify that a section of memory has been mapped
* If there is a notify_memory_unmapped request on this range yet to be handled,
* handles it immediately.
*/
void on_notify_memory_mapped(u32 address_base, u32 size);
/**
* Notify that a section of memory is to be unmapped
* Any data held in the defined range is discarded
* Sets optional unmap event data
*/
void on_notify_pre_memory_unmapped(u32 address_base, u32 size, std::vector<std::pair<u64, u64>>& event_data);
/**
* Notify that a section of memory has been unmapped
* Any data held in the defined range is discarded
*/
void on_notify_post_memory_unmapped(u64 event_data1, u64 event_data2);
/**
* Notify to check internal state during semaphore wait
*/
virtual void on_semaphore_acquire_wait() {}
virtual std::pair<std::string, std::string> get_programs() const { return std::make_pair("", ""); }
virtual bool scaled_image_from_memory(const blit_src_info& /*src_info*/, const blit_dst_info& /*dst_info*/, bool /*interpolate*/) { return false; }
public:
void reset();
void init(u32 ctrlAddress);
// Emu App/Game flip, only immediately flips when called from rsxthread
bool request_emu_flip(u32 buffer);
void pause();
void unpause();
void wait_pause();
// Get RSX approximate load in %
u32 get_load();
// Get stats object
frame_statistics_t& get_stats() { return m_frame_stats; }
// Returns true if the current thread is the active RSX thread
inline bool is_current_thread() const
{
return !!cpu_thread::get_current<rsx::thread>();
}
};
inline thread* get_current_renderer()
{
return g_fxo->try_get<rsx::thread>();
}
}
| 18,811
|
C++
|
.h
| 425
| 41.138824
| 168
| 0.724793
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
5,991
|
GCM.h
|
RPCS3_rpcs3/rpcs3/Emu/RSX/GCM.h
|
#pragma once
#include "Emu/Memory/vm_ptr.h"
#include "gcm_enums.h"
#include "util/atomic.hpp"
struct CellGcmControl
{
atomic_be_t<u32> put;
atomic_be_t<u32> get;
atomic_be_t<u32> ref;
};
struct CellGcmConfig
{
be_t<u32> localAddress;
be_t<u32> ioAddress;
be_t<u32> localSize;
be_t<u32> ioSize;
be_t<u32> memoryFrequency;
be_t<u32> coreFrequency;
};
struct CellGcmContextData;
using CellGcmContextCallback = s32 (vm::ptr<CellGcmContextData>, u32);
struct CellGcmContextData
{
vm::bptr<u32> begin;
vm::bptr<u32> end;
vm::bptr<u32> current;
vm::bptr<CellGcmContextCallback> callback;
};
struct gcmInfo
{
u32 config_addr;
u32 context_addr;
u32 control_addr;
u32 command_size = 0x400;
u32 segment_size = 0x100;
};
struct CellGcmSurface
{
u8 type;
u8 antialias;
u8 colorFormat;
u8 colorTarget;
u8 colorLocation[4];
be_t<u32> colorOffset[4];
be_t<u32> colorPitch[4];
u8 depthFormat;
u8 depthLocation;
u8 _padding[2];
be_t<u32> depthOffset;
be_t<u32> depthPitch;
be_t<u16> width;
be_t<u16> height;
be_t<u16> x;
be_t<u16> y;
};
struct alignas(16) CellGcmReportData
{
be_t<u64> timer;
be_t<u32> value;
be_t<u32> padding;
};
struct CellGcmZcullInfo
{
be_t<u32> region;
be_t<u32> size;
be_t<u32> start;
be_t<u32> offset;
be_t<u32> status0;
be_t<u32> status1;
};
struct CellGcmDisplayInfo
{
be_t<u32> offset;
be_t<u32> pitch;
be_t<u32> width;
be_t<u32> height;
};
struct CellGcmTileInfo
{
be_t<u32> tile;
be_t<u32> limit;
be_t<u32> pitch;
be_t<u32> format;
};
struct GcmZcullInfo
{
ENABLE_BITWISE_SERIALIZATION;
u32 offset;
u32 width;
u32 height;
u32 cullStart;
u32 zFormat;
u32 aaFormat;
u32 zcullDir;
u32 zcullFormat;
u32 sFunc;
u32 sRef;
u32 sMask;
bool bound = false;
CellGcmZcullInfo pack() const
{
CellGcmZcullInfo ret
{
.region = (1<<0) | (zFormat<<4) | (aaFormat<<8),
.size = ((width>>6)<<22) | ((height>>6)<<6),
.start = cullStart&(~0xFFF),
.offset = offset,
.status0 = (zcullDir<<1) | (zcullFormat<<2) | ((sFunc&0xF)<<12) | (sRef<<16) | (sMask<<24),
.status1 = (0x2000<<0) | (0x20<<16)
};
return ret;
}
};
struct GcmTileInfo
{
ENABLE_BITWISE_SERIALIZATION;
u32 location;
u32 offset;
u32 size;
u32 pitch;
u32 comp;
u32 base;
u32 bank;
bool bound = false;
CellGcmTileInfo pack() const
{
CellGcmTileInfo ret
{
.tile = (location + 1) | (bank << 4) | ((offset / 0x10000) << 16) | (location << 31),
.limit = ((offset + size - 1) / 0x10000) << 16 | (location << 31),
.pitch = (pitch / 0x100) << 8,
.format = base | ((base + ((size - 1) / 0x10000)) << 13) | (comp << 26) | (1 << 30)
};
return ret;
}
};
struct any32
{
u32 m_data;
template <typename T, typename T2 = std::common_type_t<T>>
any32(const T& value)
: m_data(std::bit_cast<u32, T2>(value))
{
}
template <typename T>
T as() const
{
return std::bit_cast<T>(m_data);
}
};
| 2,881
|
C++
|
.h
| 150
| 17.126667
| 94
| 0.684152
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
5,992
|
rsx_decode.h
|
RPCS3_rpcs3/rpcs3/Emu/RSX/rsx_decode.h
|
#pragma once
#include "util/types.hpp"
#include "Utilities/BitField.h"
#include "Utilities/StrFmt.h"
#include <tuple>
#include <algorithm>
#include "gcm_enums.h"
#include "rsx_utils.h"
namespace rsx
{
enum class boolean_to_string_t : u8 {};
constexpr boolean_to_string_t print_boolean(bool b)
{
return boolean_to_string_t{static_cast<u8>(b)};
}
template <u16 Register>
struct registers_decoder
{};
// Use the smallest type by default
template <u32 I, u32 N, typename T = get_uint_t<(N <= 8 ? 1 : (N <= 16 ? 2 : 4))>>
constexpr T bf_decoder(u32 bits)
{
return static_cast<T>(bf_t<u32, I, N>::extract(bits));
}
template <>
struct registers_decoder<NV406E_SET_REFERENCE>
{
struct decoded_type
{
const u32 value;
constexpr decoded_type(u32 value) noexcept : value(value) {}
};
static void dump(std::string& out, const decoded_type& decoded)
{
fmt::append(out, "NV406E Ref: 0x%08x", decoded.value);
}
};
template<>
struct registers_decoder<NV4097_SET_VIEWPORT_HORIZONTAL>
{
struct decoded_type
{
private:
u32 value;
public:
constexpr decoded_type(u32 value) noexcept : value(value) {}
u16 origin_x() const
{
return bf_decoder<0, 16>(value);
}
u16 width() const
{
return bf_decoder<16, 16>(value);
}
};
static void dump(std::string& out, const decoded_type& decoded)
{
fmt::append(out, "Viewport: x: %u width: %u", decoded.origin_x(), decoded.width());
}
};
template<>
struct registers_decoder<NV4097_SET_VIEWPORT_VERTICAL>
{
struct decoded_type
{
private:
u32 value;
public:
constexpr decoded_type(u32 value) noexcept : value(value) {}
u16 origin_y() const
{
return bf_decoder<0, 16>(value);
}
u16 height() const
{
return bf_decoder<16, 16>(value);
}
};
static void dump(std::string& out, const decoded_type& decoded)
{
fmt::append(out, "Viewport: y: %u height: %u", decoded.origin_y(), decoded.height());
}
};
template<>
struct registers_decoder<NV4097_SET_SCISSOR_HORIZONTAL>
{
struct decoded_type
{
private:
u32 value;
public:
constexpr decoded_type(u32 value) noexcept : value(value) {}
u16 origin_x() const
{
return bf_decoder<0, 16>(value);
}
u16 width() const
{
return bf_decoder<16, 16>(value);
}
};
static void dump(std::string& out, const decoded_type& decoded)
{
fmt::append(out, "Scissor: x: %u width: %u", decoded.origin_x(), decoded.width());
}
};
template<>
struct registers_decoder<NV4097_SET_SCISSOR_VERTICAL>
{
struct decoded_type
{
private:
u32 value;
public:
constexpr decoded_type(u32 value) noexcept : value(value) {}
u16 origin_y() const
{
return bf_decoder<0, 16>(value);
}
u16 height() const
{
return bf_decoder<16, 16>(value);
}
};
static void dump(std::string& out, const decoded_type& decoded)
{
fmt::append(out, "Scissor: y: %u height: %u", decoded.origin_y(), decoded.height());
}
};
template<>
struct registers_decoder<NV4097_SET_SURFACE_CLIP_HORIZONTAL>
{
struct decoded_type
{
private:
u32 value;
public:
constexpr decoded_type(u32 value) noexcept : value(value) {}
u16 origin_x() const
{
return bf_decoder<0, 16>(value);
}
u16 width() const
{
return bf_decoder<16, 16>(value);
}
};
static void dump(std::string& out, const decoded_type& decoded)
{
fmt::append(out, "Surface: clip x: %u width: %u", decoded.origin_x(), decoded.width());
}
};
template<>
struct registers_decoder< NV4097_SET_SURFACE_CLIP_VERTICAL>
{
struct decoded_type
{
private:
u32 value;
public:
constexpr decoded_type(u32 value) noexcept : value(value) {}
u16 origin_y() const
{
return bf_decoder<0, 16>(value);
}
u16 height() const
{
return bf_decoder<16, 16>(value);
}
};
static void dump(std::string& out, const decoded_type& decoded)
{
fmt::append(out, "Surface: clip y: %u height: %u", decoded.origin_y(), decoded.height());
}
};
template<>
struct registers_decoder<NV4097_SET_CLEAR_RECT_HORIZONTAL>
{
struct decoded_type
{
private:
u32 value;
public:
constexpr decoded_type(u32 value) noexcept : value(value) {}
u16 origin_x() const
{
return bf_decoder<0, 16>(value);
}
u16 width() const
{
return bf_decoder<16, 16>(value);
}
};
static void dump(std::string& out, const decoded_type& decoded)
{
fmt::append(out, "Clear: rect x: %u width: %u", decoded.origin_x(), decoded.width());
}
};
template<>
struct registers_decoder<NV4097_SET_CLEAR_RECT_VERTICAL>
{
struct decoded_type
{
private:
u32 value;
public:
constexpr decoded_type(u32 value) noexcept : value(value) {}
u16 origin_y() const
{
return bf_decoder<0, 16>(value);
}
u16 height() const
{
return bf_decoder<16, 16>(value);
}
};
static void dump(std::string& out, const decoded_type& decoded)
{
fmt::append(out, "Clear: rect y: %u height: %u", decoded.origin_y(), decoded.height());
}
};
template<>
struct registers_decoder< NV3089_CLIP_POINT>
{
struct decoded_type
{
private:
u32 value;
public:
constexpr decoded_type(u32 value) noexcept : value(value) {}
u16 clip_x() const
{
return bf_decoder<0, 16>(value);
}
u16 clip_y() const
{
return bf_decoder<16, 16>(value);
}
};
static void dump(std::string& out, const decoded_type& decoded)
{
fmt::append(out, "Blit engine: clip x: %u y: %u", decoded.clip_x(), decoded.clip_y());
}
};
template<>
struct registers_decoder<NV3089_CLIP_SIZE>
{
struct decoded_type
{
private:
u32 value;
public:
constexpr decoded_type(u32 value) noexcept : value(value) {}
u16 clip_width() const
{
return bf_decoder<0, 16>(value);
}
u16 clip_height() const
{
return bf_decoder<16, 16>(value);
}
};
static void dump(std::string& out, const decoded_type& decoded)
{
fmt::append(out, "Blit engine: clip width: %u height: %u", decoded.clip_width(), decoded.clip_height());
}
};
template<>
struct registers_decoder<NV3089_IMAGE_OUT_POINT>
{
struct decoded_type
{
private:
u32 value;
public:
constexpr decoded_type(u32 value) noexcept : value(value) {}
u16 x() const
{
return bf_decoder<0, 16>(value);
}
u16 y() const
{
return bf_decoder<16, 16>(value);
}
};
static void dump(std::string& out, const decoded_type& decoded)
{
fmt::append(out, "Blit engine: output x: %u y: %u", decoded.x(), decoded.y());
}
};
template<>
struct registers_decoder<NV4097_SET_WINDOW_OFFSET>
{
struct decoded_type
{
private:
u32 value;
public:
constexpr decoded_type(u32 value) noexcept : value(value) {}
u16 window_offset_x() const
{
return bf_decoder<0, 16>(value);
}
u16 window_offset_y() const
{
return bf_decoder<16, 16>(value);
}
};
static void dump(std::string& out, const decoded_type& decoded)
{
fmt::append(out, "Window: offset x: %u y: %u", decoded.window_offset_x(), decoded.window_offset_y());
}
};
template<>
struct registers_decoder<NV3089_IMAGE_OUT_SIZE>
{
struct decoded_type
{
private:
u32 value;
public:
constexpr decoded_type(u32 value) noexcept : value(value) {}
u16 width() const
{
return bf_decoder<0, 16>(value);
}
u16 height() const
{
return bf_decoder<16, 16>(value);
}
};
static void dump(std::string& out, const decoded_type& decoded)
{
fmt::append(out, "Blit engine: output width: %u height: %u", decoded.width(), decoded.height());
}
};
template<>
struct registers_decoder<NV3089_IMAGE_IN_SIZE>
{
struct decoded_type
{
private:
u32 value;
public:
constexpr decoded_type(u32 value) noexcept : value(value) {}
u16 width() const
{
return bf_decoder<0, 16>(value);
}
u16 height() const
{
return bf_decoder<16, 16>(value);
}
};
static void dump(std::string& out, const decoded_type& decoded)
{
fmt::append(out, "Blit engine: input width: %u height: %u", decoded.width(), decoded.height());
}
};
template<>
struct registers_decoder<NV3062_SET_PITCH>
{
struct decoded_type
{
private:
u32 value;
public:
constexpr decoded_type(u32 value) noexcept : value(value) {}
u16 alignment() const
{
return bf_decoder<0, 16>(value);
}
u16 pitch() const
{
return bf_decoder<16, 16>(value);
}
};
static void dump(std::string& out, const decoded_type& decoded)
{
fmt::append(out, "Blit engine: output alignment: %u pitch: %u", decoded.alignment(), decoded.pitch());
}
};
template<>
struct registers_decoder< NV308A_POINT>
{
struct decoded_type
{
private:
u32 value;
public:
constexpr decoded_type(u32 value) noexcept : value(value) {}
u16 x() const
{
return bf_decoder<0, 16>(value);
}
u16 y() const
{
return bf_decoder<16, 16>(value);
}
};
static void dump(std::string& out, const decoded_type& decoded)
{
fmt::append(out, "NV308A: x: %u y: %u", decoded.x(), decoded.y());
}
};
template<>
struct registers_decoder<NV4097_SET_VERTEX_ATTRIB_INPUT_MASK>
{
struct decoded_type
{
private:
u32 value;
public:
constexpr decoded_type(u32 value) noexcept : value(value) {}
u32 mask() const
{
return value;
}
};
static void dump(std::string& out, const decoded_type& decoded)
{
out += "Transform program enabled inputs:";
constexpr std::string_view input_names[] =
{
"in_pos", "in_weight", "in_normal",
"in_diff_color", "in_spec_color",
"in_fog",
"in_point_size", "in_7",
"in_tc0", "in_tc1", "in_tc2", "in_tc3",
"in_tc4", "in_tc5", "in_tc6", "in_tc7"
};
for (u32 i = 0; i < 16; i++)
{
if (decoded.mask() & (1 << i))
{
out += ' ';
out += input_names[i];
}
}
}
};
template<>
struct registers_decoder<NV4097_SET_FREQUENCY_DIVIDER_OPERATION>
{
struct decoded_type
{
private:
u32 value;
public:
constexpr decoded_type(u32 value) noexcept : value(value) {}
u32 frequency_divider_operation_mask() const
{
return value;
}
};
static void dump(std::string& out, const decoded_type& decoded)
{
out += "Frequency divider:";
const u32 mask = decoded.frequency_divider_operation_mask();
if (!mask)
{
out += " (none)";
return;
}
for (u32 i = 0; i < 16; i++)
{
if (mask & (1 << i))
{
out += ' ';
fmt::append(out, "%u", i);
}
}
}
};
template<>
struct registers_decoder<NV4097_SET_DEPTH_TEST_ENABLE>
{
struct decoded_type
{
private:
u32 enabled;
public:
decoded_type(u32 value) : enabled(value) {}
bool depth_test_enabled() const
{
return bool(enabled);
}
};
static void dump(std::string& out, const decoded_type& decoded)
{
fmt::append(out, "Depth: test %s", print_boolean(decoded.depth_test_enabled()));
}
};
template<>
struct registers_decoder<NV4097_SET_DEPTH_MASK>
{
struct decoded_type
{
private:
u32 enabled;
public:
decoded_type(u32 value) : enabled(value) {}
bool depth_write_enabled() const
{
return bool(enabled);
}
};
static void dump(std::string& out, const decoded_type& decoded)
{
fmt::append(out, "Depth: write: %s", print_boolean(decoded.depth_write_enabled()));
}
};
template<>
struct registers_decoder<NV4097_SET_ZMIN_MAX_CONTROL>
{
struct decoded_type
{
private:
u32 value;
public:
constexpr decoded_type(u32 value) noexcept : value(value) {}
bool depth_clip_enabled() const
{
return bf_decoder<0, 4, bool>(value);
}
bool depth_clamp_enabled() const
{
return bf_decoder<4, 4, bool>(value);
}
bool depth_clip_ignore_w() const
{
return bf_decoder<8, 4, bool>(value);
}
};
static void dump(std::string& out, const decoded_type& decoded)
{
fmt::append(out, "Depth: clip_enabled: %s, clamp: %s, ignore_w: %s", print_boolean(decoded.depth_clip_enabled()), print_boolean(decoded.depth_clamp_enabled()) , print_boolean(decoded.depth_clip_ignore_w()));
}
};
template<>
struct registers_decoder<NV4097_SET_ALPHA_TEST_ENABLE>
{
struct decoded_type
{
private:
u32 enabled;
public:
decoded_type(u32 value) : enabled(value) {}
bool alpha_test_enabled() const
{
return bool(enabled);
}
};
static void dump(std::string& out, const decoded_type& decoded)
{
fmt::append(out, "Alpha: test %s", print_boolean(decoded.alpha_test_enabled()));
}
};
template<>
struct registers_decoder<NV4097_SET_STENCIL_TEST_ENABLE>
{
struct decoded_type
{
private:
u32 enabled;
public:
decoded_type(u32 value) : enabled(value) {}
bool stencil_test_enabled() const
{
return bool(enabled);
}
};
static void dump(std::string& out, const decoded_type& decoded)
{
fmt::append(out, "Stencil: test %s", print_boolean(decoded.stencil_test_enabled()));
}
};
template<>
struct registers_decoder<NV4097_SET_RESTART_INDEX_ENABLE>
{
struct decoded_type
{
private:
u32 enabled;
public:
decoded_type(u32 value) : enabled(value) {}
bool restart_index_enabled() const
{
return bool(enabled);
}
};
static void dump(std::string& out, const decoded_type& decoded)
{
fmt::append(out, "Restart Index: %s", print_boolean(decoded.restart_index_enabled()));
}
};
template<>
struct registers_decoder<NV4097_SET_DEPTH_BOUNDS_TEST_ENABLE>
{
struct decoded_type
{
private:
u32 enabled;
public:
decoded_type(u32 value) : enabled(value) {}
bool depth_bound_enabled() const
{
return bool(enabled);
}
};
static void dump(std::string& out, const decoded_type& decoded)
{
fmt::append(out, "Depth: bound test %s", print_boolean(decoded.depth_bound_enabled()));
}
};
template<>
struct registers_decoder<NV4097_SET_LOGIC_OP_ENABLE>
{
struct decoded_type
{
private:
u32 enabled;
public:
decoded_type(u32 value) : enabled(value) {}
bool logic_op_enabled() const
{
return bool(enabled);
}
};
static void dump(std::string& out, const decoded_type& decoded)
{
fmt::append(out, "Logic: %s", print_boolean(decoded.logic_op_enabled()));
}
};
template<>
struct registers_decoder<NV4097_SET_DITHER_ENABLE>
{
struct decoded_type
{
private:
u32 enabled;
public:
decoded_type(u32 value) : enabled(value) {}
bool dither_enabled() const
{
return bool(enabled);
}
};
static void dump(std::string& out, const decoded_type& decoded)
{
fmt::append(out, "Dither: %s", print_boolean(decoded.dither_enabled()));
}
};
template<>
struct registers_decoder<NV4097_SET_BLEND_ENABLE>
{
struct decoded_type
{
private:
u32 enabled;
public:
decoded_type(u32 value) : enabled(value) {}
bool blend_enabled() const
{
return bool(enabled);
}
};
static void dump(std::string& out, const decoded_type& decoded)
{
fmt::append(out, "Blend: %s", print_boolean(decoded.blend_enabled()));
}
};
template<>
struct registers_decoder<NV4097_SET_LINE_SMOOTH_ENABLE>
{
struct decoded_type
{
private:
u32 enabled;
public:
decoded_type(u32 value) : enabled(value) {}
bool line_smooth_enabled() const
{
return bool(enabled);
}
};
static void dump(std::string& out, const decoded_type& decoded)
{
fmt::append(out, "Line smooth: %s", print_boolean(decoded.line_smooth_enabled()));
}
};
template<>
struct registers_decoder<NV4097_SET_POLY_OFFSET_POINT_ENABLE>
{
struct decoded_type
{
private:
u32 enabled;
public:
decoded_type(u32 value) : enabled(value) {}
bool poly_offset_point_enabled() const
{
return bool(enabled);
}
};
static void dump(std::string& out, const decoded_type& decoded)
{
fmt::append(out, "Polygon: offset point: %s", print_boolean(decoded.poly_offset_point_enabled()));
}
};
template<>
struct registers_decoder<NV4097_SET_POLY_OFFSET_LINE_ENABLE>
{
struct decoded_type
{
private:
u32 enabled;
public:
decoded_type(u32 value) : enabled(value) {}
bool poly_offset_line_enabled() const
{
return bool(enabled);
}
};
static void dump(std::string& out, const decoded_type& decoded)
{
fmt::append(out, "Polygon: offset line: %s", print_boolean(decoded.poly_offset_line_enabled()));
}
};
template<>
struct registers_decoder<NV4097_SET_POLY_OFFSET_FILL_ENABLE>
{
struct decoded_type
{
private:
u32 enabled;
public:
decoded_type(u32 value) : enabled(value) {}
bool poly_offset_fill_enabled() const
{
return bool(enabled);
}
};
static void dump(std::string& out, const decoded_type& decoded)
{
fmt::append(out, "Polygon: offset fill: %s", print_boolean(decoded.poly_offset_fill_enabled()));
}
};
template<>
struct registers_decoder<NV4097_SET_CULL_FACE_ENABLE>
{
struct decoded_type
{
private:
u32 enabled;
public:
decoded_type(u32 value) : enabled(value) {}
bool cull_face_enabled() const
{
return bool(enabled);
}
};
static void dump(std::string& out, const decoded_type& decoded)
{
fmt::append(out, "Cull face: %s", print_boolean(decoded.cull_face_enabled()));
}
};
template<>
struct registers_decoder<NV4097_SET_POLY_SMOOTH_ENABLE>
{
struct decoded_type
{
private:
u32 enabled;
public:
decoded_type(u32 value) : enabled(value) {}
bool poly_smooth_enabled() const
{
return bool(enabled);
}
};
static void dump(std::string& out, const decoded_type& decoded)
{
fmt::append(out, "Polygon: smooth: %s", print_boolean(decoded.poly_smooth_enabled()));
}
};
template<>
struct registers_decoder<NV4097_SET_TWO_SIDED_STENCIL_TEST_ENABLE>
{
struct decoded_type
{
private:
u32 enabled;
public:
decoded_type(u32 value) : enabled(value) {}
bool two_sided_stencil_test_enabled() const
{
return bool(enabled);
}
};
static void dump(std::string& out, const decoded_type& decoded)
{
fmt::append(out, "Stencil: per side: %s", print_boolean(decoded.two_sided_stencil_test_enabled()));
}
};
template<>
struct registers_decoder<NV4097_SET_TWO_SIDE_LIGHT_EN>
{
struct decoded_type
{
private:
u32 enabled;
public:
decoded_type(u32 value) : enabled(value) {}
bool two_sided_lighting_enabled() const
{
return bool(enabled);
}
};
static void dump(std::string& out, const decoded_type& decoded)
{
fmt::append(out, "Light: per side: %s", print_boolean(decoded.two_sided_lighting_enabled()));
}
};
template<>
struct registers_decoder<NV4097_SET_DEPTH_BOUNDS_MIN>
{
struct decoded_type
{
private:
u32 value;
public:
constexpr decoded_type(u32 value) noexcept : value(value) {}
f32 depth_bound_min() const
{
return std::bit_cast<f32>(value);
}
};
static void dump(std::string& out, const decoded_type& decoded)
{
fmt::append(out, "Depth: bound min: %g", decoded.depth_bound_min());
}
};
template<>
struct registers_decoder<NV4097_SET_DEPTH_BOUNDS_MAX>
{
struct decoded_type
{
private:
u32 value;
public:
constexpr decoded_type(u32 value) noexcept : value(value) {}
f32 depth_bound_max() const
{
return std::bit_cast<f32>(value);
}
};
static void dump(std::string& out, const decoded_type& decoded)
{
fmt::append(out, "Depth: bound max: %g", decoded.depth_bound_max());
}
};
template<>
struct registers_decoder<NV4097_SET_FOG_PARAMS>
{
struct decoded_type
{
private:
u32 value;
public:
constexpr decoded_type(u32 value) noexcept : value(value) {}
f32 fog_param_0() const
{
return std::bit_cast<f32>(value);
}
};
static void dump(std::string& out, const decoded_type& decoded)
{
fmt::append(out, "Fog: param 0: %g", decoded.fog_param_0());
}
};
template<>
struct registers_decoder<NV4097_SET_FOG_PARAMS + 1>
{
struct decoded_type
{
private:
u32 value;
public:
constexpr decoded_type(u32 value) noexcept : value(value) {}
f32 fog_param_1() const
{
return std::bit_cast<f32>(value);
}
};
static void dump(std::string& out, const decoded_type& decoded)
{
fmt::append(out, "Fog: param 1: %g", decoded.fog_param_1());
}
};
template<>
struct registers_decoder<NV4097_SET_CLIP_MIN>
{
struct decoded_type
{
private:
u32 value;
public:
constexpr decoded_type(u32 value) noexcept : value(value) {}
f32 clip_min() const
{
return std::bit_cast<f32>(value);
}
};
static void dump(std::string& out, const decoded_type& decoded)
{
fmt::append(out, "Depth: clip min: %g", decoded.clip_min());
}
};
template<>
struct registers_decoder<NV4097_SET_CLIP_MAX>
{
struct decoded_type
{
private:
u32 value;
public:
constexpr decoded_type(u32 value) noexcept : value(value) {}
f32 clip_max() const
{
return std::bit_cast<f32>(value);
}
};
static void dump(std::string& out, const decoded_type& decoded)
{
fmt::append(out, "Depth: clip max: %g", decoded.clip_max());
}
};
template<>
struct registers_decoder<NV4097_SET_POLYGON_OFFSET_SCALE_FACTOR>
{
struct decoded_type
{
private:
u32 value;
public:
constexpr decoded_type(u32 value) noexcept : value(value) {}
f32 polygon_offset_scale_factor() const
{
return std::bit_cast<f32>(value);
}
};
static void dump(std::string& out, const decoded_type& decoded)
{
fmt::append(out, "Polygon: offset scale: %g", decoded.polygon_offset_scale_factor());
}
};
template<>
struct registers_decoder<NV4097_SET_POLYGON_OFFSET_BIAS>
{
struct decoded_type
{
private:
u32 value;
public:
constexpr decoded_type(u32 value) noexcept : value(value) {}
f32 polygon_offset_scale_bias() const
{
return std::bit_cast<f32>(value);
}
};
static void dump(std::string& out, const decoded_type& decoded)
{
fmt::append(out, "Polygon: offset bias: %g", decoded.polygon_offset_scale_bias());
}
};
template<>
struct registers_decoder<NV4097_SET_VIEWPORT_SCALE>
{
struct decoded_type
{
private:
u32 value;
public:
constexpr decoded_type(u32 value) noexcept : value(value) {}
f32 viewport_scale_x() const
{
return std::bit_cast<f32>(value);
}
};
static void dump(std::string& out, const decoded_type& decoded)
{
fmt::append(out, "Viewport: scale x: %g", decoded.viewport_scale_x());
}
};
template<>
struct registers_decoder<NV4097_SET_VIEWPORT_SCALE + 1>
{
struct decoded_type
{
private:
u32 value;
public:
constexpr decoded_type(u32 value) noexcept : value(value) {}
f32 viewport_scale_y() const
{
return std::bit_cast<f32>(value);
}
};
static void dump(std::string& out, const decoded_type& decoded)
{
fmt::append(out, "Viewport: scale y: %g", decoded.viewport_scale_y());
}
};
template<>
struct registers_decoder<NV4097_SET_VIEWPORT_SCALE + 2>
{
struct decoded_type
{
private:
u32 value;
public:
constexpr decoded_type(u32 value) noexcept : value(value) {}
f32 viewport_scale_z() const
{
return std::bit_cast<f32>(value);
}
};
static void dump(std::string& out, const decoded_type& decoded)
{
fmt::append(out, "Viewport: scale z: %g", decoded.viewport_scale_z());
}
};
template<>
struct registers_decoder<NV4097_SET_VIEWPORT_SCALE + 3>
{
struct decoded_type
{
private:
u32 value;
public:
constexpr decoded_type(u32 value) noexcept : value(value) {}
f32 viewport_scale_w() const
{
return std::bit_cast<f32>(value);
}
};
static void dump(std::string& out, const decoded_type& decoded)
{
fmt::append(out, "Viewport: scale w: %g", decoded.viewport_scale_w());
}
};
template<>
struct registers_decoder<NV4097_SET_VIEWPORT_OFFSET>
{
struct decoded_type
{
private:
u32 value;
public:
constexpr decoded_type(u32 value) noexcept : value(value) {}
f32 viewport_offset_x() const
{
return std::bit_cast<f32>(value);
}
};
static void dump(std::string& out, const decoded_type& decoded)
{
fmt::append(out, "Viewport: offset x: %g", decoded.viewport_offset_x());
}
};
template<>
struct registers_decoder<NV4097_SET_VIEWPORT_OFFSET + 1>
{
struct decoded_type
{
private:
u32 value;
public:
constexpr decoded_type(u32 value) noexcept : value(value) {}
f32 viewport_offset_y() const
{
return std::bit_cast<f32>(value);
}
};
static void dump(std::string& out, const decoded_type& decoded)
{
fmt::append(out, "Viewport: offset y: %g", decoded.viewport_offset_y());
}
};
template<>
struct registers_decoder<NV4097_SET_VIEWPORT_OFFSET + 2>
{
struct decoded_type
{
private:
u32 value;
public:
constexpr decoded_type(u32 value) noexcept : value(value) {}
f32 viewport_offset_z() const
{
return std::bit_cast<f32>(value);
}
};
static void dump(std::string& out, const decoded_type& decoded)
{
fmt::append(out, "Viewport: offset z: %g", decoded.viewport_offset_z());
}
};
template<>
struct registers_decoder<NV4097_SET_VIEWPORT_OFFSET + 3>
{
struct decoded_type
{
private:
u32 value;
public:
constexpr decoded_type(u32 value) noexcept : value(value) {}
f32 viewport_offset_w() const
{
return std::bit_cast<f32>(value);
}
};
static void dump(std::string& out, const decoded_type& decoded)
{
fmt::append(out, "Viewport: offset w: %g", decoded.viewport_offset_w());
}
};
template<>
struct registers_decoder<NV4097_SET_RESTART_INDEX>
{
struct decoded_type
{
private:
u32 value;
public:
constexpr decoded_type(u32 value) noexcept : value(value) {}
u32 restart_index() const
{
return value;
}
};
static void dump(std::string& out, const decoded_type& decoded)
{
fmt::append(out, "Restart index: %u", decoded.restart_index());
}
};
template<>
struct registers_decoder<NV4097_SET_SURFACE_COLOR_AOFFSET>
{
struct decoded_type
{
private:
u32 value;
public:
constexpr decoded_type(u32 value) noexcept : value(value) {}
u32 surface_a_offset() const
{
return value;
}
};
static void dump(std::string& out, const decoded_type& decoded)
{
fmt::append(out, "Surface: A offset 0x%x", decoded.surface_a_offset());
}
};
template<>
struct registers_decoder<NV4097_SET_SURFACE_COLOR_BOFFSET>
{
struct decoded_type
{
private:
u32 value;
public:
constexpr decoded_type(u32 value) noexcept : value(value) {}
u32 surface_b_offset() const
{
return value;
}
};
static void dump(std::string& out, const decoded_type& decoded)
{
fmt::append(out, "Surface: B offset 0x%x", decoded.surface_b_offset());
}
};
template<>
struct registers_decoder<NV4097_SET_SURFACE_COLOR_COFFSET>
{
struct decoded_type
{
private:
u32 value;
public:
constexpr decoded_type(u32 value) noexcept : value(value) {}
u32 surface_c_offset() const
{
return value;
}
};
static void dump(std::string& out, const decoded_type& decoded)
{
fmt::append(out, "Surface: C offset 0x%x", decoded.surface_c_offset());
}
};
template<>
struct registers_decoder<NV4097_SET_SURFACE_COLOR_DOFFSET>
{
struct decoded_type
{
private:
u32 value;
public:
constexpr decoded_type(u32 value) noexcept : value(value) {}
u32 surface_d_offset() const
{
return value;
}
};
static void dump(std::string& out, const decoded_type& decoded)
{
fmt::append(out, "Surface: D offset 0x%x", decoded.surface_d_offset());
}
};
template<>
struct registers_decoder<NV4097_SET_SURFACE_PITCH_A>
{
struct decoded_type
{
private:
u32 value;
public:
constexpr decoded_type(u32 value) noexcept : value(value) {}
u32 surface_a_pitch() const
{
return value;
}
};
static void dump(std::string& out, const decoded_type& decoded)
{
fmt::append(out, "Surface: A pitch: %u", decoded.surface_a_pitch());
}
};
template<>
struct registers_decoder<NV4097_SET_SURFACE_PITCH_B>
{
struct decoded_type
{
private:
u32 value;
public:
constexpr decoded_type(u32 value) noexcept : value(value) {}
u32 surface_b_pitch() const
{
return value;
}
};
static void dump(std::string& out, const decoded_type& decoded)
{
fmt::append(out, "Surface: B pitch: %u", decoded.surface_b_pitch());
}
};
template<>
struct registers_decoder<NV4097_SET_SURFACE_PITCH_C>
{
struct decoded_type
{
private:
u32 value;
public:
constexpr decoded_type(u32 value) noexcept : value(value) {}
u32 surface_c_pitch() const
{
return value;
}
};
static void dump(std::string& out, const decoded_type& decoded)
{
fmt::append(out, "Surface: C pitch: %u", decoded.surface_c_pitch());
}
};
template<>
struct registers_decoder<NV4097_SET_SURFACE_PITCH_D>
{
struct decoded_type
{
private:
u32 value;
public:
constexpr decoded_type(u32 value) noexcept : value(value) {}
u32 surface_d_pitch() const
{
return value;
}
};
static void dump(std::string& out, const decoded_type& decoded)
{
fmt::append(out, "Surface: D pitch: %u", decoded.surface_d_pitch());
}
};
template<>
struct registers_decoder<NV4097_SET_SURFACE_ZETA_OFFSET>
{
struct decoded_type
{
private:
u32 value;
public:
constexpr decoded_type(u32 value) noexcept : value(value) {}
u32 surface_z_offset() const
{
return value;
}
};
static void dump(std::string& out, const decoded_type& decoded)
{
fmt::append(out, "Surface: Z offset: 0x%x", decoded.surface_z_offset());
}
};
template<>
struct registers_decoder<NV4097_SET_SURFACE_PITCH_Z>
{
struct decoded_type
{
private:
u32 value;
public:
constexpr decoded_type(u32 value) noexcept : value(value) {}
u32 surface_z_pitch() const
{
return value;
}
};
static void dump(std::string& out, const decoded_type& decoded)
{
fmt::append(out, "Surface: Z pitch: %u", decoded.surface_z_pitch());
}
};
template<>
struct registers_decoder<NV4097_SET_VERTEX_ATTRIB_OUTPUT_MASK>
{
struct decoded_type
{
private:
u32 value;
public:
constexpr decoded_type(u32 value) noexcept : value(value) {}
u32 output_mask() const
{
return value;
}
};
static void dump(std::string& out, const decoded_type& decoded)
{
static constexpr std::string_view output_names[] =
{
"diffuse_color",
"specular_color",
"back_diffuse_color",
"back_specular_color",
"fog",
"point_size",
"clip_distance[0]",
"clip_distance[1]",
"clip_distance[2]",
"clip_distance[3]",
"clip_distance[4]",
"clip_distance[5]",
"tc8",
"tc9",
"tc0",
"tc1",
"tc2",
"tc3",
"tc4",
"tc5",
"tc6",
"tc7"
};
out += "Transform program outputs:";
const u32 mask = decoded.output_mask();
for (u32 i = 0; i < 22; i++)
{
if (mask & (1 << i))
{
out += ' ';
out += output_names[i];
}
}
}
};
template<>
struct registers_decoder<NV4097_SET_SHADER_CONTROL>
{
struct decoded_type
{
private:
u32 value;
public:
constexpr decoded_type(u32 value) noexcept : value(value) {}
u32 shader_ctrl() const
{
return value;
}
};
static void dump(std::string& out, const decoded_type& decoded)
{
fmt::append(out, "Shader control: raw_value: 0x%x reg_count: %u%s%s",
decoded.shader_ctrl(), ((decoded.shader_ctrl() >> 24) & 0xFF), ((decoded.shader_ctrl() & CELL_GCM_SHADER_CONTROL_DEPTH_EXPORT) ? " depth_replace" : ""),
((decoded.shader_ctrl() & CELL_GCM_SHADER_CONTROL_32_BITS_EXPORTS) ? " 32b_exports" : ""));
}
};
template<>
struct registers_decoder<NV4097_SET_SHADER_PACKER>
{
struct decoded_type
{
private:
u32 value;
public:
constexpr decoded_type(u32 value) noexcept : value(value) {}
bool srgb_output_enabled() const
{
return !!value;
}
};
static void dump(std::string& out, const decoded_type& decoded)
{
fmt::append(out, "Shader packer control: srgb_enabled: %s", print_boolean(decoded.srgb_output_enabled()));
}
};
template<>
struct registers_decoder<NV4097_SET_VERTEX_DATA_BASE_OFFSET>
{
struct decoded_type
{
private:
u32 value;
public:
constexpr decoded_type(u32 value) noexcept : value(value) {}
u32 vertex_data_base_offset() const
{
return value;
}
};
static void dump(std::string& out, const decoded_type& decoded)
{
fmt::append(out, "Vertex: base offset 0x%x", decoded.vertex_data_base_offset());
}
};
template<>
struct registers_decoder<NV4097_SET_INDEX_ARRAY_ADDRESS>
{
struct decoded_type
{
private:
u32 value;
public:
constexpr decoded_type(u32 value) noexcept : value(value) {}
u32 index_array_offset() const
{
return bf_decoder<0, 29>(value);
}
};
static void dump(std::string& out, const decoded_type& decoded)
{
fmt::append(out, "Index: array offset 0x%x", decoded.index_array_offset());
}
};
template<>
struct registers_decoder<NV4097_SET_VERTEX_DATA_BASE_INDEX>
{
struct decoded_type
{
private:
u32 value;
public:
constexpr decoded_type(u32 value) noexcept : value(value) {}
u32 vertex_data_base_index() const
{
return value;
}
};
static void dump(std::string& out, const decoded_type& decoded)
{
fmt::append(out, "Vertex: base index: %u", decoded.vertex_data_base_index());
}
};
template<>
struct registers_decoder<NV4097_SET_SHADER_PROGRAM>
{
struct decoded_type
{
private:
u32 value;
public:
constexpr decoded_type(u32 value) noexcept : value(value) {}
u32 shader_program_address() const
{
return bf_decoder<0, 31>(value);
}
};
static void dump(std::string& out, const decoded_type& decoded)
{
const u32 address = decoded.shader_program_address();
fmt::append(out, "Shader: %s, offset: 0x%x", CellGcmLocation{(address & 3) - 1}, address & ~3);
}
};
template<>
struct registers_decoder<NV4097_SET_TRANSFORM_PROGRAM_START>
{
struct decoded_type
{
private:
u32 value;
public:
constexpr decoded_type(u32 value) noexcept : value(value) {}
u32 transform_program_start() const
{
return value;
}
};
static void dump(std::string& out, const decoded_type& decoded)
{
fmt::append(out, "Transform program: start: %u", decoded.transform_program_start());
}
};
template<>
struct registers_decoder<NV406E_SET_CONTEXT_DMA_SEMAPHORE>
{
struct decoded_type
{
private:
u32 value;
public:
constexpr decoded_type(u32 value) noexcept : value(value) {}
CellGcmLocation context_dma() const
{
return CellGcmLocation{value};
}
};
static void dump(std::string& out, const decoded_type& decoded)
{
fmt::append(out, "NV406E semaphore: context: %s", decoded.context_dma());
}
};
template<>
struct registers_decoder<NV406E_SEMAPHORE_OFFSET>
{
struct decoded_type
{
private:
u32 value;
public:
constexpr decoded_type(u32 value) noexcept : value(value) {}
u32 semaphore_offset() const
{
return value;
}
};
static void dump(std::string& out, const decoded_type& decoded)
{
fmt::append(out, "NV406E semaphore: offset: 0x%x", decoded.semaphore_offset());
}
};
template <>
struct registers_decoder<NV406E_SEMAPHORE_RELEASE>
{
struct decoded_type
{
const u32 value;
constexpr decoded_type(u32 value) noexcept : value(value) {}
};
static void dump(std::string& out, const decoded_type& decoded)
{
fmt::append(out, "NV406E semaphore: release: 0x%x", decoded.value);
}
};
template <>
struct registers_decoder<NV406E_SEMAPHORE_ACQUIRE>
{
struct decoded_type
{
const u32 value;
constexpr decoded_type(u32 value) noexcept : value(value) {}
};
static void dump(std::string& out, const decoded_type& decoded)
{
fmt::append(out, "NV406E semaphore: acquire: 0x%x", decoded.value);
}
};
template<>
struct registers_decoder<NV4097_SET_CONTEXT_DMA_SEMAPHORE>
{
struct decoded_type
{
private:
u32 value;
public:
constexpr decoded_type(u32 value) noexcept : value(value) {}
CellGcmLocation context_dma() const
{
return CellGcmLocation{value};
}
};
static void dump(std::string& out, const decoded_type& decoded)
{
fmt::append(out, "NV4097 semaphore: context: %s", decoded.context_dma());
}
};
template<>
struct registers_decoder<NV4097_SET_SEMAPHORE_OFFSET>
{
struct decoded_type
{
private:
u32 value;
public:
constexpr decoded_type(u32 value) noexcept : value(value) {}
u32 semaphore_offset() const
{
return value;
}
};
static void dump(std::string& out, const decoded_type& decoded)
{
fmt::append(out, "NV4097 semaphore: offset: 0x%x", decoded.semaphore_offset());
}
};
template<>
struct registers_decoder<NV3089_IMAGE_IN_OFFSET>
{
struct decoded_type
{
private:
u32 value;
public:
constexpr decoded_type(u32 value) noexcept : value(value) {}
u32 input_offset() const
{
return value;
}
};
static void dump(std::string& out, const decoded_type& decoded)
{
fmt::append(out, "NV3089: input offset: 0x%x", decoded.input_offset());
}
};
template<>
struct registers_decoder<NV3062_SET_OFFSET_DESTIN>
{
struct decoded_type
{
private:
u32 value;
public:
constexpr decoded_type(u32 value) noexcept : value(value) {}
u32 output_offset() const
{
return value;
}
};
static void dump(std::string& out, const decoded_type& decoded)
{
fmt::append(out, "NV3062: output offset: 0x%x", decoded.output_offset());
}
};
template<>
struct registers_decoder<NV309E_SET_OFFSET>
{
struct decoded_type
{
private:
u32 value;
public:
constexpr decoded_type(u32 value) noexcept : value(value) {}
u32 offset() const
{
return value;
}
};
static void dump(std::string& out, const decoded_type& decoded)
{
fmt::append(out, "NV309E: offset: 0x%x", decoded.offset());
}
};
template<>
struct registers_decoder<NV3089_DS_DX>
{
struct decoded_type
{
private:
u32 value;
public:
constexpr decoded_type(u32 value) noexcept : value(value) {}
// Convert signed fixed point 32-bit format
f32 ds_dx() const
{
const u32 val = value;
if (val == 0)
{
// Will get reported in image_in
return 0;
}
return 1.f / rsx::decode_fxp<11, 20>(val);
}
};
static void dump(std::string& out, const decoded_type& decoded)
{
fmt::append(out, "NV3089: DS DX: %g", decoded.ds_dx());
}
};
template<>
struct registers_decoder<NV3089_DT_DY>
{
struct decoded_type
{
private:
u32 value;
public:
constexpr decoded_type(u32 value) noexcept : value(value) {}
// Convert signed fixed point 32-bit format
f32 dt_dy() const
{
const u32 val = value;
if (val == 0)
{
// Will get reported in image_in
return 0.f;
}
return 1.f / rsx::decode_fxp<11, 20>(val);
}
};
static void dump(std::string& out, const decoded_type& decoded)
{
fmt::append(out, "NV3089: DT DY: %g", decoded.dt_dy());
}
};
template<>
struct registers_decoder<NV0039_PITCH_IN>
{
struct decoded_type
{
private:
u32 value;
public:
constexpr decoded_type(u32 value) noexcept : value(value) {}
u32 input_pitch() const
{
return value;
}
};
static void dump(std::string& out, const decoded_type& decoded)
{
fmt::append(out, "NV0039: input pitch: %u", decoded.input_pitch());
}
};
template<>
struct registers_decoder<NV0039_PITCH_OUT>
{
struct decoded_type
{
private:
u32 value;
public:
constexpr decoded_type(u32 value) noexcept : value(value) {}
u32 output_pitch() const
{
return value;
}
};
static void dump(std::string& out, const decoded_type& decoded)
{
fmt::append(out, "NV0039: output pitch: %u", decoded.output_pitch());
}
};
template<>
struct registers_decoder<NV0039_LINE_LENGTH_IN>
{
struct decoded_type
{
private:
u32 value;
public:
constexpr decoded_type(u32 value) noexcept : value(value) {}
u32 input_line_length() const
{
return value;
}
};
static void dump(std::string& out, const decoded_type& decoded)
{
fmt::append(out, "NV0039: line length input: %u", decoded.input_line_length());
}
};
template<>
struct registers_decoder<NV0039_LINE_COUNT>
{
struct decoded_type
{
private:
u32 value;
public:
constexpr decoded_type(u32 value) noexcept : value(value) {}
u32 line_count() const
{
return value;
}
};
static void dump(std::string& out, const decoded_type& decoded)
{
fmt::append(out, "NV0039: line count: %u", decoded.line_count());
}
};
template<>
struct registers_decoder<NV0039_OFFSET_OUT>
{
struct decoded_type
{
private:
u32 value;
public:
constexpr decoded_type(u32 value) noexcept : value(value) {}
u32 output_offset() const
{
return value;
}
};
static void dump(std::string& out, const decoded_type& decoded)
{
fmt::append(out, "NV0039: output offset: 0x%x", decoded.output_offset());
}
};
template<>
struct registers_decoder<NV0039_OFFSET_IN>
{
struct decoded_type
{
private:
u32 value;
public:
constexpr decoded_type(u32 value) noexcept : value(value) {}
u32 input_offset() const
{
return value;
}
};
static void dump(std::string& out, const decoded_type& decoded)
{
fmt::append(out, "NV0039: input offset: 00x%x", decoded.input_offset());
}
};
template<>
struct registers_decoder<NV4097_SET_DEPTH_FUNC>
{
struct decoded_type
{
private:
u32 value;
public:
constexpr decoded_type(u32 value) noexcept : value(value) {}
auto depth_func() const
{
return to_comparison_function(value);
}
};
static void dump(std::string& out, const decoded_type& decoded)
{
fmt::append(out, "Depth: compare_function: %s", decoded.depth_func());
}
};
template<>
struct registers_decoder<NV4097_SET_STENCIL_FUNC>
{
struct decoded_type
{
private:
u32 value;
public:
constexpr decoded_type(u32 value) noexcept : value(value) {}
auto stencil_func() const
{
return to_comparison_function(value);
}
};
static void dump(std::string& out, const decoded_type& decoded)
{
fmt::append(out, "Stencil: (front) compare_function: %s", decoded.stencil_func());
}
};
template<>
struct registers_decoder<NV4097_SET_BACK_STENCIL_FUNC>
{
struct decoded_type
{
private:
u32 value;
public:
constexpr decoded_type(u32 value) noexcept : value(value) {}
auto back_stencil_func() const
{
return to_comparison_function(value);
}
};
static void dump(std::string& out, const decoded_type& decoded)
{
fmt::append(out, "Stencil: back compare_function: %s", decoded.back_stencil_func());
}
};
template<>
struct registers_decoder<NV4097_SET_ALPHA_FUNC>
{
struct decoded_type
{
private:
u32 value;
public:
constexpr decoded_type(u32 value) noexcept : value(value) {}
auto alpha_func() const
{
return to_comparison_function(value);
}
};
static void dump(std::string& out, const decoded_type& decoded)
{
fmt::append(out, "Alpha: compare_function: %s", decoded.alpha_func());
}
};
template<>
struct registers_decoder<NV4097_SET_STENCIL_OP_FAIL>
{
struct decoded_type
{
private:
u32 value;
public:
constexpr decoded_type(u32 value) noexcept : value(value) {}
auto fail() const
{
return to_stencil_op(value);
}
};
static void dump(std::string& out, const decoded_type& decoded)
{
fmt::append(out, "Stencil: (front) fail op: %s", decoded.fail());
}
};
template<>
struct registers_decoder<NV4097_SET_STENCIL_OP_ZFAIL>
{
struct decoded_type
{
private:
u32 value;
public:
constexpr decoded_type(u32 value) noexcept : value(value) {}
auto zfail() const
{
return to_stencil_op(value);
}
};
static void dump(std::string& out, const decoded_type& decoded)
{
fmt::append(out, "Stencil: (front) zfail op: %s", decoded.zfail());
}
};
template<>
struct registers_decoder<NV4097_SET_STENCIL_OP_ZPASS>
{
struct decoded_type
{
private:
u32 value;
public:
constexpr decoded_type(u32 value) noexcept : value(value) {}
auto zpass() const
{
return to_stencil_op(value);
}
};
static void dump(std::string& out, const decoded_type& decoded)
{
fmt::append(out, "Stencil: (front) zpass op: %s", decoded.zpass());
}
};
template<>
struct registers_decoder<NV4097_SET_BACK_STENCIL_OP_FAIL>
{
struct decoded_type
{
private:
u32 value;
public:
constexpr decoded_type(u32 value) noexcept : value(value) {}
auto back_fail() const
{
return to_stencil_op(value);
}
};
static void dump(std::string& out, const decoded_type& decoded)
{
fmt::append(out, "Stencil: (back) fail op: %s", decoded.back_fail());
}
};
template<>
struct registers_decoder<NV4097_SET_BACK_STENCIL_OP_ZFAIL>
{
struct decoded_type
{
private:
u32 value;
public:
constexpr decoded_type(u32 value) noexcept : value(value) {}
auto back_zfail() const
{
return to_stencil_op(value);
}
};
static void dump(std::string& out, const decoded_type& decoded)
{
fmt::append(out, "Stencil: (back) zfail op: %s", decoded.back_zfail());
}
};
template<>
struct registers_decoder<NV4097_SET_BACK_STENCIL_OP_ZPASS>
{
struct decoded_type
{
private:
u32 value;
public:
constexpr decoded_type(u32 value) noexcept : value(value) {}
auto back_zpass() const
{
return to_stencil_op(value);
}
};
static void dump(std::string& out, const decoded_type& decoded)
{
fmt::append(out, "Stencil: (back) zpass op: %s", decoded.back_zpass());
}
};
template<>
struct registers_decoder<NV4097_SET_STENCIL_FUNC_REF>
{
struct decoded_type
{
private:
u32 value;
public:
constexpr decoded_type(u32 value) noexcept : value(value) {}
u8 stencil_func_ref() const
{
return bf_decoder<0, 8>(value);
}
};
static void dump(std::string& out, const decoded_type& decoded)
{
fmt::append(out, "Stencil: (front) func ref: %u", decoded.stencil_func_ref());
}
};
template<>
struct registers_decoder<NV4097_SET_BACK_STENCIL_FUNC_REF>
{
struct decoded_type
{
private:
u32 value;
public:
constexpr decoded_type(u32 value) noexcept : value(value) {}
u8 back_stencil_func_ref() const
{
return bf_decoder<0, 8>(value);
}
};
static void dump(std::string& out, const decoded_type& decoded)
{
fmt::append(out, "Stencil: (back) func ref: %u", decoded.back_stencil_func_ref());
}
};
template<>
struct registers_decoder<NV4097_SET_STENCIL_FUNC_MASK>
{
struct decoded_type
{
private:
u32 value;
public:
constexpr decoded_type(u32 value) noexcept : value(value) {}
u8 stencil_func_mask() const
{
return bf_decoder<0, 8>(value);
}
};
static void dump(std::string& out, const decoded_type& decoded)
{
fmt::append(out, "Stencil: (front) func mask: %u", decoded.stencil_func_mask());
}
};
template<>
struct registers_decoder<NV4097_SET_BACK_STENCIL_FUNC_MASK>
{
struct decoded_type
{
private:
u32 value;
public:
constexpr decoded_type(u32 value) noexcept : value(value) {}
u8 back_stencil_func_mask() const
{
return bf_decoder<0, 8>(value);
}
};
static void dump(std::string& out, const decoded_type& decoded)
{
fmt::append(out, "Stencil: (back) func mask: %u", decoded.back_stencil_func_mask());
}
};
template<>
struct registers_decoder<NV4097_SET_ALPHA_REF>
{
struct decoded_type
{
private:
u32 value;
public:
constexpr decoded_type(u32 value) noexcept : value(value) {}
f32 alpha_ref8() const
{
return bf_decoder<0, 8>(value) / 255.f;
}
f32 alpha_ref16() const
{
return rsx::decode_fp16(bf_decoder<0, 16>(value));
}
f32 alpha_ref32() const
{
return std::bit_cast<f32>(value);
}
};
static void dump(std::string& out, const decoded_type& decoded)
{
fmt::append(out, "Alpha: ref unorm8: %g, f16: %g", decoded.alpha_ref8(), decoded.alpha_ref16());
}
};
template<>
struct registers_decoder<NV4097_SET_COLOR_CLEAR_VALUE>
{
struct decoded_type
{
private:
u32 value;
public:
constexpr decoded_type(u32 value) noexcept : value(value) {}
u8 blue() const { return bf_decoder<0, 8>(value); }
u8 green() const { return bf_decoder<8, 8>(value); }
u8 red() const { return bf_decoder<16, 8>(value); }
u8 alpha() const { return bf_decoder<24, 8>(value); }
};
static void dump(std::string& out, const decoded_type& decoded)
{
fmt::append(out, "Clear: R = %u G = %u B = %u A = %u", decoded.red(), decoded.green(), decoded.blue(), decoded.alpha());
}
};
template<>
struct registers_decoder<NV4097_SET_STENCIL_MASK>
{
struct decoded_type
{
private:
u32 value;
public:
constexpr decoded_type(u32 value) noexcept : value(value) {}
u8 stencil_mask() const
{
return bf_decoder<0, 8>(value);
}
};
static void dump(std::string& out, const decoded_type& decoded)
{
fmt::append(out, "Stencil: (front) mask: %u", decoded.stencil_mask());
}
};
template<>
struct registers_decoder<NV4097_SET_BACK_STENCIL_MASK>
{
struct decoded_type
{
private:
u32 value;
public:
constexpr decoded_type(u32 value) noexcept : value(value) {}
u8 back_stencil_mask() const
{
return bf_decoder<0, 8>(value);
}
};
static void dump(std::string& out, const decoded_type& decoded)
{
fmt::append(out, "Stencil: (back) mask: %u", decoded.back_stencil_mask());
}
};
template<>
struct registers_decoder<NV4097_SET_LOGIC_OP>
{
struct decoded_type
{
private:
u32 value;
public:
constexpr decoded_type(u32 value) noexcept : value(value) {}
auto logic_operation() const
{
return to_logic_op(value);
}
};
static void dump(std::string& out, const decoded_type& decoded)
{
fmt::append(out, "Logic: op: %s", decoded.logic_operation());
}
};
template<>
struct registers_decoder<NV4097_SET_FRONT_FACE>
{
struct decoded_type
{
private:
u32 value;
public:
constexpr decoded_type(u32 value) noexcept : value(value) {}
auto front_face_mode() const
{
return to_front_face(value);
}
};
static void dump(std::string& out, const decoded_type& decoded)
{
fmt::append(out, "Front Face: %s", decoded.front_face_mode());
}
};
template<>
struct registers_decoder<NV4097_SET_CULL_FACE>
{
struct decoded_type
{
private:
u32 value;
public:
constexpr decoded_type(u32 value) noexcept : value(value) {}
cull_face cull_face_mode() const
{
return static_cast<cull_face>(value);
}
};
static void dump(std::string& out, const decoded_type& decoded)
{
fmt::append(out, "Cull Face: %s", decoded.cull_face_mode());
}
};
template<>
struct registers_decoder<NV4097_SET_SURFACE_COLOR_TARGET>
{
struct decoded_type
{
private:
u32 value;
public:
constexpr decoded_type(u32 value) noexcept : value(value) {}
auto target() const
{
return to_surface_target(value);
}
};
static void dump(std::string& out, const decoded_type& decoded)
{
fmt::append(out, "Surface: Color target(s): %s", decoded.target());
}
};
template<>
struct registers_decoder<NV4097_SET_FOG_MODE>
{
struct decoded_type
{
private:
u32 value;
public:
constexpr decoded_type(u32 value) noexcept : value(value) {}
auto fog_equation() const
{
return to_fog_mode(value);
}
};
static void dump(std::string& out, const decoded_type& decoded)
{
fmt::append(out, "Fog: %s", decoded.fog_equation());
}
};
template<>
struct registers_decoder<NV4097_SET_BEGIN_END>
{
struct decoded_type
{
private:
u32 value;
public:
constexpr decoded_type(u32 value) noexcept : value(value) {}
auto primitive() const
{
return to_primitive_type(value);
}
};
static void dump(std::string& out, const decoded_type& decoded)
{
fmt::append(out, "Primitive: %s", decoded.primitive());
}
};
template<>
struct registers_decoder<NV3089_SET_OPERATION>
{
struct decoded_type
{
private:
u32 value;
public:
constexpr decoded_type(u32 value) noexcept : value(value) {}
auto transfer_op() const
{
return blit_engine::to_transfer_operation(value);
}
};
static void dump(std::string& out, const decoded_type& decoded)
{
fmt::append(out, "NV3089: op: %s", decoded.transfer_op());
}
};
template<>
struct registers_decoder<NV3089_SET_COLOR_FORMAT>
{
struct decoded_type
{
private:
u32 value;
public:
constexpr decoded_type(u32 value) noexcept : value(value) {}
auto transfer_source_fmt() const
{
return blit_engine::to_transfer_source_format(value);
}
};
static void dump(std::string& out, const decoded_type& decoded)
{
fmt::append(out, "NV3089: source fmt: %s", decoded.transfer_source_fmt());
}
};
template<>
struct registers_decoder<NV3089_SET_CONTEXT_SURFACE>
{
struct decoded_type
{
private:
u32 value;
public:
constexpr decoded_type(u32 value) noexcept : value(value) {}
auto ctx_surface() const
{
return blit_engine::to_context_surface(value);
}
};
static void dump(std::string& out, const decoded_type& decoded)
{
fmt::append(out, "NV3089: context surface: %s", decoded.ctx_surface());
}
};
template<>
struct registers_decoder<NV3062_SET_COLOR_FORMAT>
{
struct decoded_type
{
private:
u32 value;
public:
constexpr decoded_type(u32 value) noexcept : value(value) {}
auto transfer_dest_fmt() const
{
return blit_engine::to_transfer_destination_format(value);
}
};
static void dump(std::string& out, const decoded_type& decoded)
{
fmt::append(out, "NV3062: output fmt: %s", decoded.transfer_dest_fmt());
}
};
template<>
struct registers_decoder<NV4097_SET_BLEND_EQUATION>
{
struct decoded_type
{
private:
u32 value;
u16 blend_rgb_raw() const
{
return bf_decoder<0, 16>(value);
}
u16 blend_a_raw() const
{
return bf_decoder<16, 16>(value);
}
public:
constexpr decoded_type(u32 value) noexcept : value(value) {}
auto blend_rgb() const
{
return to_blend_equation(blend_rgb_raw());
}
auto blend_a() const
{
return to_blend_equation(blend_a_raw());
}
};
static void dump(std::string& out, const decoded_type& decoded)
{
fmt::append(out, "Blend: equation rgb: %s a: %s", decoded.blend_rgb(), decoded.blend_a());
}
};
template<>
struct registers_decoder<NV4097_SET_BLEND_FUNC_SFACTOR>
{
struct decoded_type
{
private:
u32 value;
u16 src_blend_rgb_raw() const
{
return bf_decoder<0, 16>(value);
}
u16 src_blend_a_raw() const
{
return bf_decoder<16, 16>(value);
}
public:
constexpr decoded_type(u32 value) noexcept : value(value) {}
auto src_blend_rgb() const
{
return to_blend_factor(src_blend_rgb_raw());
}
auto src_blend_a() const
{
return to_blend_factor(src_blend_a_raw());
}
};
static void dump(std::string& out, const decoded_type& decoded)
{
fmt::append(out, "Blend: sfactor rgb: %s a: %s", decoded.src_blend_rgb(), decoded.src_blend_a());
}
};
template<>
struct registers_decoder<NV4097_SET_BLEND_FUNC_DFACTOR>
{
struct decoded_type
{
private:
u32 value;
u16 dst_blend_rgb_raw() const
{
return bf_decoder<0, 16>(value);
}
u16 dst_blend_a_raw() const
{
return bf_decoder<16, 16>(value);
}
public:
constexpr decoded_type(u32 value) noexcept : value(value) {}
auto dst_blend_rgb() const
{
return to_blend_factor(dst_blend_rgb_raw());
}
auto dst_blend_a() const
{
return to_blend_factor(dst_blend_a_raw());
}
};
static void dump(std::string& out, const decoded_type& decoded)
{
fmt::append(out, "Blend: dfactor rgb: %s a: %s", decoded.dst_blend_rgb(), decoded.dst_blend_a());
}
};
template<>
struct registers_decoder<NV4097_SET_COLOR_MASK>
{
struct decoded_type
{
private:
u32 value;
public:
constexpr decoded_type(u32 value) noexcept : value(value) {}
bool color_b() const
{
return bf_decoder<0, 8, bool>(value);
}
bool color_g() const
{
return bf_decoder<8, 8, bool>(value);
}
bool color_r() const
{
return bf_decoder<16, 8, bool>(value);
}
bool color_a() const
{
return bf_decoder<24, 8, bool>(value);
}
bool color_write_enabled() const
{
return value != 0;
}
u32 is_invalid() const
{
return (value & 0xfefefefe) ? value : 0;
}
};
static void dump(std::string& out, const decoded_type& decoded)
{
if (u32 invalid_value = decoded.is_invalid())
{
fmt::append(out, "Surface: color mask: invalid = 0x%08x", invalid_value);
return;
}
fmt::append(out, "Surface: color mask A = %s R = %s G = %s B = %s"
, print_boolean(decoded.color_a()), print_boolean(decoded.color_r()), print_boolean(decoded.color_g()), print_boolean(decoded.color_b()));
}
};
template<>
struct registers_decoder<NV4097_SET_COLOR_MASK_MRT>
{
struct decoded_type
{
private:
u32 value;
public:
constexpr decoded_type(u32 value) noexcept : value(value) {}
bool color_b(int index) const
{
return bf_decoder<3, 1, bool>(value >> (index * 4));
}
bool color_g(int index) const
{
return bf_decoder<2, 1, bool>(value >> (index * 4));
}
bool color_r(int index) const
{
return bf_decoder<1, 1, bool>(value >> (index * 4));
}
bool color_a(int index) const
{
return bf_decoder<0, 1, bool>(value >> (index * 4));
}
bool color_write_enabled(int index) const
{
return ((value >> (index * 4)) & 0xF) != 0;
}
};
static void dump(std::string& out, const decoded_type& decoded)
{
out += "Color Mask MRT:\n";
for (int index = 1; index < 4; ++index)
{
fmt::append(out, "Surface[%d]: A:%d R:%d G:%d B:%d\n",
index,
decoded.color_a(index),
decoded.color_r(index),
decoded.color_g(index),
decoded.color_b(index));
}
}
};
template<>
struct registers_decoder<NV4097_SET_SHADER_WINDOW>
{
struct decoded_type
{
private:
u32 value;
u8 window_shader_origin_raw() const { return bf_decoder<12, 4>(value); }
u8 window_shader_pixel_center_raw() const { return bf_decoder<16, 4>(value); }
public:
constexpr decoded_type(u32 value) noexcept : value(value) {}
auto window_shader_origin() const
{
return to_window_origin(window_shader_origin_raw());
}
auto window_shader_pixel_center() const
{
return to_window_pixel_center(window_shader_pixel_center_raw());
}
u16 window_shader_height() const
{
return bf_decoder<0, 12>(value);
}
};
static void dump(std::string& out, const decoded_type& decoded)
{
fmt::append(out, "Viewport: height: %u origin: %s pixel center: %s", decoded.window_shader_height()
, decoded.window_shader_origin(), decoded.window_shader_pixel_center());
}
};
template<>
struct registers_decoder<NV4097_SET_BLEND_ENABLE_MRT>
{
struct decoded_type
{
private:
u32 value;
public:
constexpr decoded_type(u32 value) noexcept : value(value) {}
bool blend_surface_b() const
{
return bf_decoder<1, 1, bool>(value);
}
bool blend_surface_c() const
{
return bf_decoder<2, 1, bool>(value);
}
bool blend_surface_d() const
{
return bf_decoder<3, 1, bool>(value);
}
};
static void dump(std::string& out, const decoded_type& decoded)
{
fmt::append(out, "Blend: mrt1 = %s, mrt2 = %s, mrt3 = %s", print_boolean(decoded.blend_surface_b()), print_boolean(decoded.blend_surface_c()), print_boolean(decoded.blend_surface_d()));
}
};
template<>
struct registers_decoder<NV4097_SET_USER_CLIP_PLANE_CONTROL>
{
struct decoded_type
{
private:
u32 value;
u8 clip_plane0_raw() const { return bf_decoder<0, 4>(value); }
u8 clip_plane1_raw() const { return bf_decoder<4, 4>(value); }
u8 clip_plane2_raw() const { return bf_decoder<8, 4>(value); }
u8 clip_plane3_raw() const { return bf_decoder<12, 4>(value); }
u8 clip_plane4_raw() const { return bf_decoder<16, 4>(value); }
u8 clip_plane5_raw() const { return bf_decoder<20, 4>(value); }
public:
constexpr decoded_type(u32 value) noexcept : value(value) {}
auto clip_plane0() const
{
return to_user_clip_plane_op(clip_plane0_raw());
}
auto clip_plane1() const
{
return to_user_clip_plane_op(clip_plane1_raw());
}
auto clip_plane2() const
{
return to_user_clip_plane_op(clip_plane2_raw());
}
auto clip_plane3() const
{
return to_user_clip_plane_op(clip_plane3_raw());
}
auto clip_plane4() const
{
return to_user_clip_plane_op(clip_plane4_raw());
}
auto clip_plane5() const
{
return to_user_clip_plane_op(clip_plane5_raw());
}
};
static void dump(std::string& out, const decoded_type& decoded)
{
fmt::append(out, "User clip: UC0: %s UC1: %s UC2: %s UC3: %s UC4: %s"
, decoded.clip_plane0()
, decoded.clip_plane1()
, decoded.clip_plane2()
, decoded.clip_plane3()
, decoded.clip_plane4()
, decoded.clip_plane5());
}
};
template<>
struct registers_decoder<NV4097_SET_LINE_WIDTH>
{
struct decoded_type
{
private:
u32 value;
public:
constexpr decoded_type(u32 value) noexcept : value(value) {}
f32 line_width() const
{
return (value >> 3) + (value & 7) / 8.f;
}
};
static void dump(std::string& out, const decoded_type& decoded)
{
fmt::append(out, "Line width: %g", decoded.line_width());
}
};
template<>
struct registers_decoder<NV4097_SET_POINT_SIZE>
{
struct decoded_type
{
private:
u32 value;
public:
constexpr decoded_type(u32 value) noexcept : value(value) {}
f32 point_size() const
{
return std::bit_cast<f32>(value);
}
};
static void dump(std::string& out, const decoded_type& decoded)
{
fmt::append(out, "Point size: %g", decoded.point_size());
}
};
template<>
struct registers_decoder<NV4097_SET_POINT_SPRITE_CONTROL>
{
struct decoded_type
{
private:
u32 value;
public:
constexpr decoded_type(u32 value) noexcept : value(value) {}
bool enabled() const
{
return bf_decoder<0, 1, bool>(value);
}
u16 texcoord_mask() const
{
return bf_decoder<8, 10>(value);
}
};
static void dump(std::string& out, const decoded_type& decoded)
{
fmt::append(out, "Point sprite: enabled = %s, override mask = 0x%x", print_boolean(decoded.enabled()), decoded.texcoord_mask());
}
};
template<>
struct registers_decoder<NV4097_SET_SURFACE_FORMAT>
{
struct decoded_type
{
private:
u32 value;
u8 color_fmt_raw() const { return bf_decoder<0, 5>(value); }
u8 depth_fmt_raw() const { return bf_decoder<5, 3>(value); }
u8 type_raw() const { return bf_decoder<8, 4>(value); }
u8 antialias_raw() const { return bf_decoder<12, 4>(value); }
public:
constexpr decoded_type(u32 value) noexcept : value(value) {}
auto color_fmt() const
{
return to_surface_color_format(color_fmt_raw());
}
auto depth_fmt() const
{
return to_surface_depth_format(depth_fmt_raw());
}
auto type() const
{
return to_surface_raster_type(type_raw());
}
auto antialias() const
{
return to_surface_antialiasing(antialias_raw());
}
u8 log2width() const
{
return bf_decoder<16, 8>(value);
}
u8 log2height() const
{
return bf_decoder<24, 8>(value);
}
bool is_integer_color_format() const
{
return color_fmt() < surface_color_format::w16z16y16x16;
}
};
static void dump(std::string& out, const decoded_type& decoded)
{
fmt::append(out, "Surface: Color format: %s DepthStencil format: %s Anti aliasing: %s w: %u h: %u", decoded.color_fmt()
, decoded.depth_fmt(), decoded.antialias(), decoded.log2width(), decoded.log2height());
}
};
template<>
struct registers_decoder<NV4097_SET_ZSTENCIL_CLEAR_VALUE>
{
struct decoded_type
{
private:
u32 value;
u32 clear_z16() const { return bf_decoder<0, 16, u32>(value); }
u32 clear_z24() const { return bf_decoder<8, 24>(value); }
public:
constexpr decoded_type(u32 value) noexcept : value(value) {}
u8 clear_stencil() const
{
return bf_decoder<0, 8>(value);
}
u32 clear_z(bool is_depth_stencil) const
{
if (is_depth_stencil)
return clear_z24();
return clear_z16();
}
};
static void dump(std::string& out, const decoded_type& decoded)
{
fmt::append(out, "Clear: Z24 = %u, z16 = %u, Stencil = %u", decoded.clear_z(true), decoded.clear_z(false), decoded.clear_stencil());
}
};
template<>
struct registers_decoder<NV4097_SET_INDEX_ARRAY_DMA>
{
struct decoded_type
{
private:
u32 value;
u8 type_raw() const { return bf_decoder<4, 8>(value); }
public:
constexpr decoded_type(u32 value) noexcept : value(value) {}
CellGcmLocation index_dma() const
{
return CellGcmLocation{bf_decoder<0, 4>(value)};
}
index_array_type type() const
{
// Must be a valid value
return static_cast<index_array_type>(type_raw());
}
};
static void dump(std::string& out, const decoded_type& decoded)
{
fmt::append(out, "Index: type: %s dma: %s", decoded.type(), decoded.index_dma());
}
};
template<>
struct registers_decoder<NV4097_SET_CONTEXT_DMA_COLOR_A>
{
struct decoded_type
{
private:
u32 value;
public:
constexpr decoded_type(u32 value) noexcept : value(value) {}
CellGcmLocation dma_surface_a() const
{
return CellGcmLocation{value};
}
};
static void dump(std::string& out, const decoded_type& decoded)
{
fmt::append(out, "Surface: A DMA: %s", decoded.dma_surface_a());
}
};
template<>
struct registers_decoder<NV4097_SET_CONTEXT_DMA_COLOR_B>
{
struct decoded_type
{
private:
u32 value;
public:
constexpr decoded_type(u32 value) noexcept : value(value) {}
CellGcmLocation dma_surface_b() const
{
return CellGcmLocation{value};
}
};
static void dump(std::string& out, const decoded_type& decoded)
{
fmt::append(out, "Surface: B DMA: %s", decoded.dma_surface_b());
}
};
template<>
struct registers_decoder<NV4097_SET_CONTEXT_DMA_COLOR_C>
{
struct decoded_type
{
private:
u32 value;
public:
constexpr decoded_type(u32 value) noexcept : value(value) {}
CellGcmLocation dma_surface_c() const
{
return CellGcmLocation{value};
}
};
static void dump(std::string& out, const decoded_type& decoded)
{
fmt::append(out, "Surface: C DMA: %s", decoded.dma_surface_c());
}
};
template<>
struct registers_decoder<NV4097_SET_CONTEXT_DMA_COLOR_D>
{
struct decoded_type
{
private:
u32 value;
public:
constexpr decoded_type(u32 value) noexcept : value(value) {}
CellGcmLocation dma_surface_d() const
{
return CellGcmLocation{value};
}
};
static void dump(std::string& out, const decoded_type& decoded)
{
fmt::append(out, "Surface: D DMA: %s", decoded.dma_surface_d());
}
};
template<>
struct registers_decoder<NV4097_SET_CONTEXT_DMA_ZETA>
{
struct decoded_type
{
private:
u32 value;
public:
constexpr decoded_type(u32 value) noexcept : value(value) {}
CellGcmLocation dma_surface_z() const
{
return CellGcmLocation{value};
}
};
static void dump(std::string& out, const decoded_type& decoded)
{
fmt::append(out, "Surface: Z DMA: %s", decoded.dma_surface_z());
}
};
template<>
struct registers_decoder<NV3089_SET_CONTEXT_DMA_IMAGE>
{
struct decoded_type
{
private:
u32 value;
public:
constexpr decoded_type(u32 value) noexcept : value(value) {}
CellGcmLocation context_dma() const
{
return CellGcmLocation{value};
}
};
static void dump(std::string& out, const decoded_type& decoded)
{
fmt::append(out, "NV3089: input DMA: %s", decoded.context_dma());
}
};
template<>
struct registers_decoder<NV3062_SET_CONTEXT_DMA_IMAGE_DESTIN>
{
struct decoded_type
{
private:
u32 value;
public:
constexpr decoded_type(u32 value) noexcept : value(value) {}
CellGcmLocation output_dma() const
{
return CellGcmLocation{value};
}
};
static void dump(std::string& out, const decoded_type& decoded)
{
fmt::append(out, "NV3062: output DMA: %s", decoded.output_dma());
}
};
template<>
struct registers_decoder<NV309E_SET_CONTEXT_DMA_IMAGE>
{
struct decoded_type
{
private:
u32 value;
public:
constexpr decoded_type(u32 value) noexcept : value(value) {}
CellGcmLocation context_dma() const
{
return CellGcmLocation{value};
}
};
static void dump(std::string& out, const decoded_type& decoded)
{
fmt::append(out, "NV309E: output DMA: %s", decoded.context_dma());
}
};
template<>
struct registers_decoder<NV0039_SET_CONTEXT_DMA_BUFFER_OUT>
{
struct decoded_type
{
private:
u32 value;
public:
constexpr decoded_type(u32 value) noexcept : value(value) {}
CellGcmLocation output_dma() const
{
return CellGcmLocation{value};
}
};
static void dump(std::string& out, const decoded_type& decoded)
{
fmt::append(out, "NV0039: output DMA: %s", decoded.output_dma());
}
};
template<>
struct registers_decoder<NV0039_SET_CONTEXT_DMA_BUFFER_IN>
{
struct decoded_type
{
private:
u32 value;
public:
constexpr decoded_type(u32 value) noexcept : value(value) {}
CellGcmLocation input_dma() const
{
return CellGcmLocation{value};
}
};
static void dump(std::string& out, const decoded_type& decoded)
{
fmt::append(out, "NV0039: input DMA: %s", decoded.input_dma());
}
};
template<>
struct registers_decoder<NV4097_SET_CONTEXT_DMA_REPORT>
{
struct decoded_type
{
private:
u32 value;
public:
constexpr decoded_type(u32 value) noexcept : value(value) {}
auto context_dma_report() const
{
return blit_engine::to_context_dma(value);
}
};
static void dump(std::string& out, const decoded_type& decoded)
{
fmt::append(out, "REPORT: context DMA: %s", decoded.context_dma_report());
}
};
template<>
struct registers_decoder<NV4097_SET_CONTEXT_DMA_NOTIFIES>
{
struct decoded_type
{
private:
u32 value;
public:
constexpr decoded_type(u32 value) noexcept : value(value) {}
CellGcmLocation context_dma_notify() const
{
return CellGcmLocation{value};
}
};
static void dump(std::string& out, const decoded_type& decoded)
{
fmt::append(out, "NOTIFY: context DMA: %s, index: %u", decoded.context_dma_notify(), (decoded.context_dma_notify() & 7) ^ 7);
}
};
template<>
struct registers_decoder<NV3089_IMAGE_IN_FORMAT>
{
struct decoded_type
{
private:
u32 value;
u8 transfer_origin_raw() const { return bf_decoder<16, 8>(value); }
u8 transfer_interpolator_raw() const { return bf_decoder<24, 8>(value); }
public:
constexpr decoded_type(u32 value) noexcept : value(value) {}
u16 format() const
{
return bf_decoder<0, 16>(value);
}
auto transfer_origin() const
{
return blit_engine::to_transfer_origin(transfer_origin_raw());
}
auto transfer_interpolator() const
{
return blit_engine::to_transfer_interpolator(transfer_interpolator_raw());
}
};
static void dump(std::string& out, const decoded_type& decoded)
{
fmt::append(out, "NV3089: input fmt: %u origin: %s interp: %s", decoded.format()
, decoded.transfer_origin(), decoded.transfer_interpolator());
}
};
template<>
struct registers_decoder<NV309E_SET_FORMAT>
{
struct decoded_type
{
private:
u32 value;
u32 transfer_destination_fmt() const { return bf_decoder<0, 16, u32>(value); }
public:
constexpr decoded_type(u32 value) noexcept : value(value) {}
auto format() const
{
return blit_engine::to_transfer_destination_format(transfer_destination_fmt());
}
u8 sw_height_log2() const
{
return bf_decoder<16, 8>(value);
}
u8 sw_width_log2() const
{
return bf_decoder<24, 8>(value);
}
};
static void dump(std::string& out, const decoded_type& decoded)
{
fmt::append(out, "NV309E: output fmt: %s log2-width: %u log2-height: %u", decoded.format(),
decoded.sw_width_log2(), decoded.sw_height_log2());
}
};
template<>
struct registers_decoder<NV0039_FORMAT>
{
struct decoded_type
{
private:
u32 value;
public:
constexpr decoded_type(u32 value) noexcept : value(value) {}
u8 input_format() const
{
return bf_decoder<0, 8>(value);
}
u8 output_format() const
{
return bf_decoder<8, 8>(value);
}
};
static auto decode(u32 value) {
return std::make_tuple(static_cast<u8>(value & 0xff), static_cast<u8>(value >> 8));
}
static void dump(std::string& out, const decoded_type& decoded)
{
fmt::append(out, "NV0039: input format = %u, output format = %u", decoded.input_format(), decoded.output_format());
}
};
template<>
struct registers_decoder<NV4097_SET_BLEND_COLOR2>
{
struct decoded_type
{
private:
u32 value;
public:
constexpr decoded_type(u32 value) noexcept : value(value) {}
u16 blue() const
{
return bf_decoder<0, 16>(value);
}
u16 alpha() const
{
return bf_decoder<16, 16>(value);
}
};
static void dump(std::string& out, const decoded_type& decoded)
{
fmt::append(out, "Blend color: 16b BA = %u, %u", decoded.blue(), decoded.alpha());
}
};
template<>
struct registers_decoder<NV4097_SET_BLEND_COLOR>
{
struct decoded_type
{
private:
u32 value;
public:
constexpr decoded_type(u32 value) noexcept : value(value) {}
u16 red16() const
{
return bf_decoder<0, 16>(value);
}
u16 green16() const
{
return bf_decoder<16, 16>(value);
}
u8 blue8() const
{
return bf_decoder<0, 8>(value);
}
u8 green8() const
{
return bf_decoder<8, 8>(value);
}
u8 red8() const
{
return bf_decoder<16, 8>(value);
}
u8 alpha8() const
{
return bf_decoder<24, 8>(value);
}
};
static void dump(std::string& out, const decoded_type& decoded)
{
fmt::append(out, "Blend color: 8b BGRA = %u, %u, %u, %u 16b RG = %u , %u"
, decoded.blue8(), decoded.green8(), decoded.red8(), decoded.alpha8(), decoded.red16(), decoded.green16());
}
};
template<>
struct registers_decoder<NV3089_IMAGE_IN>
{
struct decoded_type
{
private:
u32 value;
u32 x_raw() const
{
return bf_decoder<0, 16, u32>(value);
}
u32 y_raw() const
{
return bf_decoder<16, 16, u32>(value);
}
public:
constexpr decoded_type(u32 value) noexcept : value(value) {}
// x and y given as 16 bit fixed point
f32 x() const
{
return x_raw() / 16.f;
}
f32 y() const
{
return y_raw() / 16.f;
}
};
static void dump(std::string& out, const decoded_type& decoded)
{
fmt::append(out, "NV3089: in x = %u y = %u", decoded.x(), decoded.y());
}
};
template<>
struct registers_decoder<NV4097_NO_OPERATION>
{
struct decoded_type
{
decoded_type(u32) {}
};
static void dump(std::string& out, u32)
{
out += "(nop)";
}
};
template<>
struct registers_decoder<NV4097_INVALIDATE_VERTEX_CACHE_FILE>
{
struct decoded_type
{
decoded_type(u32) {}
};
static void dump(std::string& out, u32)
{
out += "(invalidate vertex cache file)";
}
};
template<>
struct registers_decoder<NV4097_INVALIDATE_VERTEX_FILE>
{
struct decoded_type
{
decoded_type(u32) {}
};
static void dump(std::string& out, u32)
{
out += "(invalidate vertex file)";
}
};
template<>
struct registers_decoder<NV4097_SET_ANTI_ALIASING_CONTROL>
{
struct decoded_type
{
private:
u32 value;
public:
constexpr decoded_type(u32 value) noexcept : value(value) {}
bool msaa_enabled() const
{
return bf_decoder<0, 1, bool>(value);
}
bool msaa_alpha_to_coverage() const
{
return bf_decoder<4, 1, bool>(value);
}
bool msaa_alpha_to_one() const
{
return bf_decoder<8, 1, bool>(value);
}
u16 msaa_sample_mask() const
{
return bf_decoder<16, 16>(value);
}
};
static void dump(std::string& out, const decoded_type& decoded)
{
fmt::append(out, "Anti_aliasing: %s alpha_to_coverage: %s alpha_to_one: %s sample_mask: %u", print_boolean(decoded.msaa_enabled()), print_boolean(decoded.msaa_alpha_to_coverage()), print_boolean(decoded.msaa_alpha_to_one()), decoded.msaa_sample_mask());
}
};
template<>
struct registers_decoder<NV4097_SET_SHADE_MODE>
{
struct decoded_type
{
private:
u32 value;
public:
constexpr decoded_type(u32 value) noexcept : value(value) {}
auto shading() const
{
return to_shading_mode(value);
}
};
static void dump(std::string& out, const decoded_type& decoded)
{
fmt::append(out, "Shading mode: %s", decoded.shading());
}
};
template<>
struct registers_decoder<NV4097_SET_FRONT_POLYGON_MODE>
{
struct decoded_type
{
private:
u32 value;
public:
constexpr decoded_type(u32 value) noexcept : value(value) {}
auto front_polygon_mode() const
{
return to_polygon_mode(value);
}
};
static void dump(std::string& out, const decoded_type& decoded)
{
fmt::append(out, "Front polygon mode: %s", decoded.front_polygon_mode());
}
};
template<>
struct registers_decoder<NV4097_SET_BACK_POLYGON_MODE>
{
struct decoded_type
{
private:
u32 value;
public:
constexpr decoded_type(u32 value) noexcept : value(value) {}
auto back_polygon_mode() const
{
return to_polygon_mode(value);
}
};
static void dump(std::string& out, const decoded_type& decoded)
{
fmt::append(out, "back polygon mode: %s", decoded.back_polygon_mode());
}
};
template<>
struct registers_decoder<NV4097_SET_TRANSFORM_CONSTANT_LOAD>
{
struct decoded_type
{
private:
u32 value;
public:
constexpr decoded_type(u32 value) noexcept : value(value) {}
u32 transform_constant_load() const
{
return value;
}
};
static void dump(std::string& out, const decoded_type& decoded)
{
fmt::append(out, "Transform constant load: %u", decoded.transform_constant_load());
}
};
template<>
struct registers_decoder<NV4097_SET_POLYGON_STIPPLE>
{
struct decoded_type
{
private:
u32 value;
public:
constexpr decoded_type(u32 value) noexcept : value(value) {}
bool enabled() const
{
return value > 0;
}
};
static void dump(std::string& out, const decoded_type& decoded)
{
fmt::append(out, "polygon_stipple: %s", print_boolean(decoded.enabled()));
}
};
template <>
struct registers_decoder<NV4097_SET_ZCULL_EN>
{
struct decoded_type
{
private:
u32 value;
public:
constexpr decoded_type(u32 value) noexcept : value(value) {}
bool enabled() const
{
return value > 0;
}
};
static void dump(std::string& out, const decoded_type& decoded)
{
fmt::append(out, "ZCULL: %s", print_boolean(decoded.enabled()));
}
};
template <>
struct registers_decoder<NV4097_SET_ZCULL_STATS_ENABLE>
{
struct decoded_type
{
private:
u32 value;
public:
constexpr decoded_type(u32 value) noexcept : value(value) {}
bool enabled() const
{
return value > 0;
}
};
static void dump(std::string& out, const decoded_type& decoded)
{
fmt::append(out, "ZCULL: stats %s", print_boolean(decoded.enabled()));
}
};
template <>
struct registers_decoder<NV4097_SET_ZPASS_PIXEL_COUNT_ENABLE>
{
struct decoded_type
{
private:
u32 value;
public:
constexpr decoded_type(u32 value) noexcept : value(value) {}
bool enabled() const
{
return value > 0;
}
};
static void dump(std::string& out, const decoded_type& decoded)
{
fmt::append(out, "ZCULL: pixel count %s", print_boolean(decoded.enabled()));
}
};
#define EXPAND_RANGE_1(index, MACRO) \
MACRO(index)
#define EXPAND_RANGE_2(index, MACRO) \
EXPAND_RANGE_1((index), MACRO) \
EXPAND_RANGE_1((index) + 1, MACRO)
#define EXPAND_RANGE_4(index, MACRO) \
EXPAND_RANGE_2((index), MACRO) \
EXPAND_RANGE_2((index) + 2, MACRO)
#define EXPAND_RANGE_8(index, MACRO) \
EXPAND_RANGE_4((index), MACRO) \
EXPAND_RANGE_4((index) + 4, MACRO)
#define EXPAND_RANGE_16(index, MACRO) \
EXPAND_RANGE_8((index), MACRO) \
EXPAND_RANGE_8((index) + 8, MACRO)
#define EXPAND_RANGE_32(index, MACRO) \
EXPAND_RANGE_16((index), MACRO) \
EXPAND_RANGE_16((index) + 16, MACRO)
#define EXPAND_RANGE_64(index, MACRO) \
EXPAND_RANGE_32((index), MACRO) \
EXPAND_RANGE_32((index) + 32, MACRO)
#define EXPAND_RANGE_128(index, MACRO) \
EXPAND_RANGE_64((index), MACRO) \
EXPAND_RANGE_64((index) + 64, MACRO)
#define EXPAND_RANGE_256(index, MACRO) \
EXPAND_RANGE_128((index), MACRO) \
EXPAND_RANGE_128((index) + 128, MACRO)
#define EXPAND_RANGE_512(index, MACRO) \
EXPAND_RANGE_256((index), MACRO) \
EXPAND_RANGE_256((index) + 256, MACRO)
template<u32 index>
struct transform_constant_helper
{
struct decoded_type
{
private:
u32 value;
public:
constexpr decoded_type(u32 value) noexcept : value(value) {}
f32 constant_value() const
{
return std::bit_cast<f32>(value);
}
};
static constexpr u32 reg = index / 4;
static constexpr u8 subreg = index % 4;
static void dump(std::string& out, const decoded_type& decoded)
{
auto get_subreg_name = [](u8 subreg) -> std::string_view
{
return subreg == 0 ? "x"sv :
subreg == 1 ? "y"sv :
subreg == 2 ? "z"sv :
"w"sv;
};
fmt::append(out, "TransformConstant[%u].%s: %g (0x%08x)", reg, get_subreg_name(subreg), decoded.constant_value(), std::bit_cast<u32>(decoded.constant_value()));
}
};
#define TRANSFORM_CONSTANT(index) template<> struct registers_decoder<NV4097_SET_TRANSFORM_CONSTANT + index> : public transform_constant_helper<index> {};
#define DECLARE_TRANSFORM_CONSTANT(index) NV4097_SET_TRANSFORM_CONSTANT + index,
EXPAND_RANGE_32(0, TRANSFORM_CONSTANT)
template<u32 index>
struct transform_program_helper
{
struct decoded_type
{
const u32 value;
constexpr decoded_type(u32 value) noexcept : value(value) {}
};
static void dump(std::string& out, const decoded_type& decoded)
{
fmt::append(out, "Transform Program (%u): 0x%08x", index, decoded.value);
}
};
template<>
struct registers_decoder<NV4097_SET_TRANSFORM_PROGRAM_LOAD>
{
struct decoded_type
{
private:
u32 value;
public:
constexpr decoded_type(u32 value) noexcept : value(value) {}
u32 transform_program_load() const
{
return value;
}
};
static void dump(std::string& out, const decoded_type& decoded)
{
fmt::append(out, "Transform Program Load: %u", decoded.transform_program_load());
}
};
template <>
struct registers_decoder<NV4097_DRAW_ARRAYS>
{
struct decoded_type
{
private:
u32 value;
u16 count_raw() const
{
return bf_decoder<24, 8>(value);
}
public:
constexpr decoded_type(u32 value) noexcept : value(value) {}
u32 start() const
{
return bf_decoder<0, 24>(value);
}
u16 count() const
{
return count_raw() + 1;
}
};
static void dump(std::string& out, const decoded_type& decoded)
{
fmt::append(out, "Draw vertexes range [%u, %u]", decoded.start(), decoded.start() + decoded.count());
}
};
template <>
struct registers_decoder<NV4097_DRAW_INDEX_ARRAY>
{
struct decoded_type
{
private:
u32 value;
public:
constexpr decoded_type(u32 value) noexcept : value(value) {}
u32 start() const
{
return bf_decoder<0, 24>(value);
}
u16 count() const
{
return static_cast<u16>(bf_decoder<24, 8>(value) + 1);
}
};
static void dump(std::string& out, const decoded_type& decoded)
{
fmt::append(out, "Draw vertexes range {IdxArray[%u], IdxArray[%u]}", decoded.start(), decoded.start() + decoded.count());
}
};
template <>
struct registers_decoder<NV4097_SET_CONTROL0>
{
struct decoded_type
{
private:
u32 value;
public:
constexpr decoded_type(u32 value) noexcept : value(value) {}
bool depth_float() const
{
return bf_decoder<12, 1>(value) != 0;
}
};
static void dump(std::string& out, const decoded_type& decoded)
{
fmt::append(out, "Depth float: %s", print_boolean(decoded.depth_float()));
}
};
#define TRANSFORM_PROGRAM(index) template<> struct registers_decoder<NV4097_SET_TRANSFORM_PROGRAM + index> : public transform_program_helper<index> {};
#define DECLARE_TRANSFORM_PROGRAM(index) NV4097_SET_TRANSFORM_PROGRAM + index,
EXPAND_RANGE_32(0, TRANSFORM_PROGRAM)
template<u32 index>
struct vertex_array_helper
{
struct decoded_type
{
private:
u32 value;
u8 type_raw() const
{
return bf_decoder<0, 3>(value);
}
public:
constexpr decoded_type(u32 value) noexcept : value(value) {}
u16 frequency() const
{
return bf_decoder<16, 16>(value);
}
u8 stride() const
{
return bf_decoder<8, 8>(value);
}
u8 size() const
{
return bf_decoder<4, 4>(value);
}
rsx::vertex_base_type type() const
{
return rsx::to_vertex_base_type(type_raw());
}
};
static void dump(std::string& out, const decoded_type& decoded)
{
auto print_vertex_attribute_format = [](rsx::vertex_base_type type) -> std::string_view
{
switch (type)
{
case rsx::vertex_base_type::s1: return "Signed short normalized";
case rsx::vertex_base_type::f: return "Float";
case rsx::vertex_base_type::sf: return "Half float";
case rsx::vertex_base_type::ub: return "Unsigned byte normalized";
case rsx::vertex_base_type::s32k: return "Signed short unormalized";
case rsx::vertex_base_type::cmp: return "CMP";
case rsx::vertex_base_type::ub256: return "Unsigned byte unormalized";
}
fmt::throw_exception("Unexpected enum found");
};
fmt::append(out, "Vertex Data Array %u%s: Type: %s, size: %u, stride: %u, frequency: %u", index, decoded.size() ? "" : " (disabled)", print_vertex_attribute_format(decoded.type()), decoded.size(), decoded.stride(), decoded.frequency());
}
};
#define VERTEX_DATA_ARRAY_FORMAT(index) template<> struct registers_decoder<NV4097_SET_VERTEX_DATA_ARRAY_FORMAT + index> : public vertex_array_helper<index> {};
#define DECLARE_VERTEX_DATA_ARRAY_FORMAT(index) NV4097_SET_VERTEX_DATA_ARRAY_FORMAT + index,
EXPAND_RANGE_16(0, VERTEX_DATA_ARRAY_FORMAT)
template<u32 index>
struct vertex_array_offset_helper
{
struct decoded_type
{
private:
u32 value;
public:
constexpr decoded_type(u32 value) noexcept : value(value) {}
u32 offset() const
{
return value;
}
};
static void dump(std::string& out, const decoded_type& decoded)
{
fmt::append(out, "Vertex Array %u: Offset: 0x%x", index, decoded.offset());
}
};
#define VERTEX_DATA_ARRAY_OFFSET(index) template<> struct registers_decoder<NV4097_SET_VERTEX_DATA_ARRAY_OFFSET + index> : public vertex_array_offset_helper<index> {};
#define DECLARE_VERTEX_DATA_ARRAY_OFFSET(index) NV4097_SET_VERTEX_DATA_ARRAY_OFFSET + index,
EXPAND_RANGE_16(0, VERTEX_DATA_ARRAY_OFFSET)
template<typename type, int count>
struct register_vertex_printer;
template<int count>
struct register_vertex_printer<f32, count>
{
static std::string type()
{
return fmt::format("float%u", count);
}
static std::string value(u32 v)
{
return fmt::format("%g", std::bit_cast<f32>(v));
}
};
template<int count>
struct register_vertex_printer<u16, count>
{
static std::string type()
{
return fmt::format("short%u", count);
}
static std::string value(u32 v)
{
return fmt::format("%u %u", (v & 0xffff), (v >> 16));
}
};
template<>
struct register_vertex_printer<u8, 4>
{
static std::string_view type()
{
return "uchar4";
}
static std::string value(u32 v)
{
return fmt::format("%u %u %u %u", (v & 0xff), ((v >> 8) & 0xff), ((v >> 16) & 0xff), ((v >> 24) & 0xff));
}
};
template<u32 index, typename type, int count>
struct register_vertex_helper
{
struct decoded_type
{
const u32 value;
constexpr decoded_type(u32 value) noexcept : value(value) {}
};
static constexpr usz increment_per_array_index = (count * sizeof(type)) / sizeof(u32);
static constexpr usz attribute_index = index / increment_per_array_index;
static constexpr usz vertex_subreg = index % increment_per_array_index;
static void dump(std::string& out, const decoded_type& decoded)
{
fmt::append(out, "register vertex: %u as %u: %s", attribute_index, register_vertex_printer<type, count>::type(), register_vertex_printer<type, count>::value(decoded.value));
}
};
#define VERTEX_DATA4UB(index) \
template<> struct registers_decoder<NV4097_SET_VERTEX_DATA4UB_M + index> : public register_vertex_helper<index, u8, 4> {};
#define VERTEX_DATA1F(index) \
template<> struct registers_decoder<NV4097_SET_VERTEX_DATA1F_M + index> : public register_vertex_helper<index, f32, 1> {};
#define VERTEX_DATA2F(index) \
template<> struct registers_decoder<NV4097_SET_VERTEX_DATA2F_M + index> : public register_vertex_helper<index, f32, 2> {};
#define VERTEX_DATA3F(index) \
template<> struct registers_decoder<NV4097_SET_VERTEX_DATA3F_M + index> : public register_vertex_helper<index, f32, 3> {};
#define VERTEX_DATA4F(index) \
template<> struct registers_decoder<NV4097_SET_VERTEX_DATA4F_M + index> : public register_vertex_helper<index, f32, 4> {};
#define VERTEX_DATA2S(index) \
template<> struct registers_decoder<NV4097_SET_VERTEX_DATA2S_M + index> : public register_vertex_helper<index, u16, 2> {};
#define VERTEX_DATA4S(index) \
template<> struct registers_decoder<NV4097_SET_VERTEX_DATA4S_M + index> : public register_vertex_helper<index, u16, 4> {};
#define DECLARE_VERTEX_DATA4UB(index) \
NV4097_SET_VERTEX_DATA4UB_M + index,
#define DECLARE_VERTEX_DATA1F(index) \
NV4097_SET_VERTEX_DATA1F_M + index,
#define DECLARE_VERTEX_DATA2F(index) \
NV4097_SET_VERTEX_DATA2F_M + index,
#define DECLARE_VERTEX_DATA3F(index) \
NV4097_SET_VERTEX_DATA3F_M + index,
#define DECLARE_VERTEX_DATA4F(index) \
NV4097_SET_VERTEX_DATA4F_M + index,
#define DECLARE_VERTEX_DATA2S(index) \
NV4097_SET_VERTEX_DATA2S_M + index,
#define DECLARE_VERTEX_DATA4S(index) \
NV4097_SET_VERTEX_DATA4S_M + index,
EXPAND_RANGE_16(0, VERTEX_DATA4UB)
EXPAND_RANGE_16(0, VERTEX_DATA1F)
EXPAND_RANGE_16(0, VERTEX_DATA2F)
EXPAND_RANGE_16(0, VERTEX_DATA3F)
EXPAND_RANGE_16(0, VERTEX_DATA4F)
EXPAND_RANGE_16(0, VERTEX_DATA2S)
EXPAND_RANGE_16(0, VERTEX_DATA4S)
template <u32 index>
struct texture_offset_helper
{
struct decoded_type
{
const u32 value;
constexpr decoded_type(u32 value) noexcept : value(value) {}
u32 offset() const
{
return value;
}
};
static void dump(std::string& out, const decoded_type& decoded)
{
fmt::append(out, "Texture %u: Offset: 0x%x", index, decoded.offset());
}
};
template <u32 index>
struct texture_format_helper
{
struct decoded_type
{
const u32 value;
constexpr decoded_type(u32 value) noexcept : value(value) {}
CellGcmLocation location() const
{
return CellGcmLocation{(value & 3) - 1};
}
bool cubemap() const
{
return bf_decoder<2, 1, bool>(value);
}
u8 border_type() const
{
return bf_decoder<3, 1>(value);
}
texture_dimension dimension() const
{
// Hack: avoid debugger crash on not-written value (needs checking on realhw)
// This is not the function RSX uses so it's safe
return rsx::to_texture_dimension(std::clamp<u8>(bf_decoder<4, 4>(value), 1, 3));
}
CellGcmTexture format() const
{
return CellGcmTexture{bf_decoder<8, 8>(value)};
}
u16 mipmap() const
{
return bf_decoder<16, 16>(value);
}
};
static void dump(std::string& out, const decoded_type& decoded)
{
fmt::append(out, "Texture %u: %s, Cubemap: %s, %s, %s, Mipmap: %u", index,
decoded.location(), decoded.cubemap(), decoded.dimension(), decoded.format(), decoded.mipmap());
}
};
template <u32 index>
struct texture_image_rect_helper
{
struct decoded_type
{
const u32 value;
constexpr decoded_type(u32 value) noexcept : value(value) {}
u16 height() const
{
return bf_decoder<0, 16>(value);
}
u16 width() const
{
return bf_decoder<16, 16>(value);
}
};
static void dump(std::string& out, const decoded_type& decoded)
{
fmt::append(out, "Texture %u: W: %u, H: %u", index, decoded.width(), decoded.height());
}
};
template <u32 index>
struct texture_control0_helper
{
struct decoded_type
{
const u32 value;
constexpr decoded_type(u32 value) noexcept : value(value) {}
bool enabled() const
{
return bf_decoder<31, 1, bool>(value);
}
f32 min_lod() const
{
return rsx::decode_fxp<4, 8, false>(bf_decoder<19, 12>(value));
}
f32 max_lod() const
{
return rsx::decode_fxp<4, 8, false>(bf_decoder<7, 12>(value));
}
texture_max_anisotropy max_aniso() const
{
return rsx::to_texture_max_anisotropy(bf_decoder<4, 3>(value));
}
bool alpha_kill_enabled() const
{
return bf_decoder<2, 1, bool>(value);
}
};
static void dump(std::string& out, const decoded_type& decoded)
{
fmt::append(out, "Texture %u: %s, Min/Max LOD: %g/%g, Max Aniso: %s, AKill: %s", index, print_boolean(decoded.enabled())
, decoded.min_lod(), decoded.max_lod(), decoded.max_aniso(), print_boolean(decoded.alpha_kill_enabled()));
}
};
template <u32 index>
struct texture_control3_helper
{
struct decoded_type
{
const u32 value;
constexpr decoded_type(u32 value) noexcept : value(value) {}
u16 depth() const
{
return bf_decoder<20, 12>(value);
}
u32 pitch() const
{
return bf_decoder<0, 16>(value);
}
};
static void dump(std::string& out, const decoded_type& decoded)
{
fmt::append(out, "Texture %u: Pitch: %u, Depth: %u", index, decoded.pitch(), decoded.depth());
}
};
#define TEXTURE_OFFSET(index) \
template<> struct registers_decoder<NV4097_SET_TEXTURE_OFFSET + ((index) * 8)> : public texture_offset_helper<index> {};
#define TEXTURE_FORMAT(index) \
template<> struct registers_decoder<NV4097_SET_TEXTURE_FORMAT + ((index) * 8)> : public texture_format_helper<index> {};
#define TEXTURE_IMAGE_RECT(index) \
template<> struct registers_decoder<NV4097_SET_TEXTURE_IMAGE_RECT + ((index) * 8)> : public texture_image_rect_helper<index> {};
#define TEXTURE_CONTROL0(index) \
template<> struct registers_decoder<NV4097_SET_TEXTURE_CONTROL0 + ((index) * 8)> : public texture_control0_helper<index> {};
#define TEXTURE_CONTROL3(index) \
template<> struct registers_decoder<NV4097_SET_TEXTURE_CONTROL3 + index> : public texture_control3_helper<index> {};
#define DECLARE_TEXTURE_OFFSET(index) \
NV4097_SET_TEXTURE_OFFSET + ((index) * 8),
#define DECLARE_TEXTURE_FORMAT(index) \
NV4097_SET_TEXTURE_FORMAT + ((index) * 8),
#define DECLARE_TEXTURE_IMAGE_RECT(index) \
NV4097_SET_TEXTURE_IMAGE_RECT + ((index) * 8),
#define DECLARE_TEXTURE_CONTROL0(index) \
NV4097_SET_TEXTURE_CONTROL0 + ((index) * 8),
#define DECLARE_TEXTURE_CONTROL3(index) \
NV4097_SET_TEXTURE_CONTROL3 + index,
EXPAND_RANGE_16(0, TEXTURE_OFFSET)
EXPAND_RANGE_16(0, TEXTURE_FORMAT)
EXPAND_RANGE_16(0, TEXTURE_IMAGE_RECT)
EXPAND_RANGE_16(0, TEXTURE_CONTROL0)
EXPAND_RANGE_16(0, TEXTURE_CONTROL3)
template <u32 index>
struct vertex_texture_control0_helper
{
struct decoded_type
{
const u32 value;
constexpr decoded_type(u32 value) noexcept : value(value) {}
bool enabled() const
{
return bf_decoder<31, 1, bool>(value);
}
f32 min_lod() const
{
return rsx::decode_fxp<4, 8, false>(bf_decoder<19, 12>(value));
}
f32 max_lod() const
{
return rsx::decode_fxp<4, 8, false>(bf_decoder<7, 12>(value));
}
};
static void dump(std::string& out, const decoded_type& decoded)
{
fmt::append(out, "VTexture %u: %s, Min/Max LOD: %g/%g", index, print_boolean(decoded.enabled())
, decoded.min_lod(), decoded.max_lod());
}
};
#define VERTEX_TEXTURE_CONTROL0(index) \
template<> struct registers_decoder<NV4097_SET_VERTEX_TEXTURE_CONTROL0 + ((index) * 8)> : public vertex_texture_control0_helper<index> {};
#define DECLARE_VERTEX_TEXTURE_CONTROL0(index) \
NV4097_SET_VERTEX_TEXTURE_CONTROL0 + ((index) * 8),
EXPAND_RANGE_4(0, VERTEX_TEXTURE_CONTROL0)
} // end namespace rsx
| 93,153
|
C++
|
.h
| 3,986
| 20.82564
| 255
| 0.703047
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
5,993
|
GSRender.h
|
RPCS3_rpcs3/rpcs3/Emu/RSX/GSRender.h
|
#pragma once
#include "GSFrameBase.h"
#include "Emu/RSX/RSXThread.h"
enum wm_event
{
none, // nothing
toggle_fullscreen, // user is requesting a fullscreen switch
geometry_change_notice, // about to start resizing and/or moving the window
geometry_change_in_progress, // window being resized and/or moved
window_resized, // window was resized
window_minimized, // window was minimized
window_restored, // window was restored from a minimized state
window_moved, // window moved without resize
window_visibility_changed
};
class GSRender : public rsx::thread
{
protected:
GSFrameBase* m_frame;
draw_context_t m_context = nullptr;
public:
~GSRender() override;
GSRender(utils::serial* ar) noexcept;
void on_init_thread() override;
void on_exit() override;
void flip(const rsx::display_flip_info_t& info) override;
f64 get_display_refresh_rate() const override;
GSFrameBase* get_frame() const { return m_frame; }
};
| 1,028
|
C++
|
.h
| 29
| 33.551724
| 81
| 0.702321
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
5,994
|
color_utils.h
|
RPCS3_rpcs3/rpcs3/Emu/RSX/color_utils.h
|
#pragma once
#include <util/types.hpp>
#include <Utilities/geometry.h>
#include "gcm_enums.h"
namespace rsx
{
struct texture_channel_remap_t
{
u32 encoded = 0xDEAD;
std::array<u8, 4> control_map;
std::array<u8, 4> channel_map;
template <typename T>
std::array<T, 4> remap(const std::array<T, 4>& components, T select_zero, T select_one) const
{
ensure(encoded != 0xDEAD, "Channel remap was not initialized");
std::array<T, 4> remapped{};
for (u8 channel = 0; channel < 4; ++channel)
{
switch (control_map[channel])
{
default:
[[fallthrough]];
case CELL_GCM_TEXTURE_REMAP_REMAP:
remapped[channel] = components[channel_map[channel]];
break;
case CELL_GCM_TEXTURE_REMAP_ZERO:
remapped[channel] = select_zero;
break;
case CELL_GCM_TEXTURE_REMAP_ONE:
remapped[channel] = select_one;
break;
}
}
return remapped;
}
template <typename T>
requires std::is_integral_v<T> || std::is_floating_point_v<T>
std::array<T, 4> remap(const std::array<T, 4>& components) const
{
return remap(components, static_cast<T>(0), static_cast<T>(1));
}
template <typename T>
color4_base<T> remap(const color4_base<T>& components)
{
const std::array<T, 4> values = { components.a, components.r, components.g, components.b };
const auto shuffled = remap(values, T{ 0 }, T{ 1 });
return color4_base<T>(shuffled[1], shuffled[2], shuffled[3], shuffled[0]);
}
template <typename T>
requires std::is_integral_v<T> || std::is_enum_v<T>
texture_channel_remap_t with_encoding(T encoding) const
{
texture_channel_remap_t result = *this;
result.encoded = encoding;
return result;
}
};
static const texture_channel_remap_t default_remap_vector =
{
.encoded = RSX_TEXTURE_REMAP_IDENTITY,
.control_map = { CELL_GCM_TEXTURE_REMAP_REMAP, CELL_GCM_TEXTURE_REMAP_REMAP, CELL_GCM_TEXTURE_REMAP_REMAP, CELL_GCM_TEXTURE_REMAP_REMAP },
.channel_map = { CELL_GCM_TEXTURE_REMAP_FROM_A, CELL_GCM_TEXTURE_REMAP_FROM_R, CELL_GCM_TEXTURE_REMAP_FROM_G, CELL_GCM_TEXTURE_REMAP_FROM_B }
};
static inline texture_channel_remap_t decode_remap_encoding(u32 remap_ctl)
{
// Remapping tables; format is A-R-G-B
// Remap input table. Contains channel index to read color from
texture_channel_remap_t result =
{
.encoded = remap_ctl
};
result.channel_map =
{
static_cast<u8>(remap_ctl & 0x3),
static_cast<u8>((remap_ctl >> 2) & 0x3),
static_cast<u8>((remap_ctl >> 4) & 0x3),
static_cast<u8>((remap_ctl >> 6) & 0x3),
};
// Remap control table. Controls whether the remap value is used, or force either 0 or 1
result.control_map =
{
static_cast<u8>((remap_ctl >> 8) & 0x3),
static_cast<u8>((remap_ctl >> 10) & 0x3),
static_cast<u8>((remap_ctl >> 12) & 0x3),
static_cast<u8>((remap_ctl >> 14) & 0x3),
};
return result;
}
// Convert color write mask for G8B8 to R8G8
static inline u32 get_g8b8_r8g8_clearmask(u32 mask)
{
u32 result = 0;
if (mask & RSX_GCM_CLEAR_GREEN_BIT) result |= RSX_GCM_CLEAR_GREEN_BIT;
if (mask & RSX_GCM_CLEAR_BLUE_BIT) result |= RSX_GCM_CLEAR_RED_BIT;
return result;
}
static inline void get_g8b8_r8g8_colormask(bool& red, bool&/*green*/, bool& blue, bool& alpha)
{
red = blue;
blue = false;
alpha = false;
}
static inline void get_g8b8_clear_color(u8& red, u8& /*green*/, u8& blue, u8& /*alpha*/)
{
red = blue;
}
static inline u32 get_abgr8_clearmask(u32 mask)
{
u32 result = 0;
if (mask & RSX_GCM_CLEAR_RED_BIT) result |= RSX_GCM_CLEAR_BLUE_BIT;
if (mask & RSX_GCM_CLEAR_GREEN_BIT) result |= RSX_GCM_CLEAR_GREEN_BIT;
if (mask & RSX_GCM_CLEAR_BLUE_BIT) result |= RSX_GCM_CLEAR_RED_BIT;
if (mask & RSX_GCM_CLEAR_ALPHA_BIT) result |= RSX_GCM_CLEAR_ALPHA_BIT;
return result;
}
static inline void get_abgr8_colormask(bool& red, bool& /*green*/, bool& blue, bool& /*alpha*/)
{
std::swap(red, blue);
}
static inline void get_abgr8_clear_color(u8& red, u8& /*green*/, u8& blue, u8& /*alpha*/)
{
std::swap(red, blue);
}
template <typename T, typename U>
requires std::is_integral_v<T>&& std::is_integral_v<U>
u8 renormalize_color8(T input, U base)
{
// Base will be some POT-1 value
const int value = static_cast<u8>(input & base);
return static_cast<u8>((value * 255) / base);
}
static inline void get_rgb565_clear_color(u8& red, u8& green, u8& blue, u8& /*alpha*/)
{
// RSX clear color is just a memcpy, so in this case the input is ARGB8 so only BG have the 16-bit input
const u16 raw_value = static_cast<u16>(green) << 8 | blue;
blue = renormalize_color8(raw_value, 0x1f);
green = renormalize_color8(raw_value >> 5, 0x3f);
red = renormalize_color8(raw_value >> 11, 0x1f);
}
static inline void get_a1rgb555_clear_color(u8& red, u8& green, u8& blue, u8& alpha, u8 alpha_override)
{
// RSX clear color is just a memcpy, so in this case the input is ARGB8 so only BG have the 16-bit input
const u16 raw_value = static_cast<u16>(green) << 8 | blue;
blue = renormalize_color8(raw_value, 0x1f);
green = renormalize_color8(raw_value >> 5, 0x1f);
red = renormalize_color8(raw_value >> 10, 0x1f);
// Alpha can technically be encoded into the clear but the format normally just injects constants.
// Will require hardware tests when possible to determine which approach makes more sense.
// alpha = static_cast<u8>((raw_value & (1 << 15)) ? 255 : 0);
alpha = alpha_override;
}
static inline u32 get_b8_clearmask(u32 mask)
{
u32 result = 0;
if (mask & RSX_GCM_CLEAR_BLUE_BIT) result |= RSX_GCM_CLEAR_RED_BIT;
return result;
}
static inline void get_b8_colormask(bool& red, bool& green, bool& blue, bool& alpha)
{
red = blue;
green = false;
blue = false;
alpha = false;
}
static inline void get_b8_clear_color(u8& red, u8& /*green*/, u8& blue, u8& /*alpha*/)
{
std::swap(red, blue);
}
static inline color4f decode_border_color(u32 colorref)
{
color4f result;
result.b = (colorref & 0xFF) / 255.f;
result.g = ((colorref >> 8) & 0xFF) / 255.f;
result.r = ((colorref >> 16) & 0xFF) / 255.f;
result.a = ((colorref >> 24) & 0xFF) / 255.f;
return result;
}
static inline u32 encode_color_to_storage_key(color4f color)
{
const u32 r = static_cast<u8>(color.r * 255);
const u32 g = static_cast<u8>(color.g * 255);
const u32 b = static_cast<u8>(color.b * 255);
const u32 a = static_cast<u8>(color.a * 255);
return (a << 24) | (b << 16) | (g << 8) | r;
}
static inline const std::array<bool, 4> get_write_output_mask(rsx::surface_color_format format)
{
constexpr std::array<bool, 4> rgba = { true, true, true, true };
constexpr std::array<bool, 4> rgb = { true, true, true, false };
constexpr std::array<bool, 4> rg = { true, true, false, false };
constexpr std::array<bool, 4> r = { true, false, false, false };
switch (format)
{
case rsx::surface_color_format::a8r8g8b8:
case rsx::surface_color_format::a8b8g8r8:
case rsx::surface_color_format::w16z16y16x16:
case rsx::surface_color_format::w32z32y32x32:
return rgba;
case rsx::surface_color_format::x1r5g5b5_z1r5g5b5:
case rsx::surface_color_format::x1r5g5b5_o1r5g5b5:
case rsx::surface_color_format::r5g6b5:
case rsx::surface_color_format::x8r8g8b8_z8r8g8b8:
case rsx::surface_color_format::x8r8g8b8_o8r8g8b8:
case rsx::surface_color_format::x8b8g8r8_z8b8g8r8:
case rsx::surface_color_format::x8b8g8r8_o8b8g8r8:
return rgb;
case rsx::surface_color_format::g8b8:
return rg;
case rsx::surface_color_format::b8:
case rsx::surface_color_format::x32:
return r;
default:
fmt::throw_exception("Unknown surface format 0x%x", static_cast<int>(format));
}
}
}
| 7,935
|
C++
|
.h
| 216
| 32.476852
| 144
| 0.66449
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
5,995
|
display.h
|
RPCS3_rpcs3/rpcs3/Emu/RSX/display.h
|
#pragma once
#ifdef _WIN32
#include <windows.h>
#elif defined(__APPLE__)
// nothing
#elif defined(HAVE_X11)
// Cannot include Xlib.h before Qt
// and we don't need all of Xlib anyway
using Display = struct _XDisplay;
using Window = unsigned long;
#endif
#ifdef VK_USE_PLATFORM_WAYLAND_KHR
#include <wayland-client.h>
#endif
#ifdef _WIN32
using display_handle_t = HWND;
#elif defined(__APPLE__)
using display_handle_t = void*; // NSView
#else
#include <variant>
using display_handle_t = std::variant<
#if defined(HAVE_X11) && defined(VK_USE_PLATFORM_WAYLAND_KHR)
std::pair<Display*, Window>, std::pair<wl_display*, wl_surface*>
#elif defined(HAVE_X11)
std::pair<Display*, Window>
#elif defined(VK_USE_PLATFORM_WAYLAND_KHR)
std::pair<wl_display*, wl_surface*>
#endif
>;
#endif
using draw_context_t = void*;
| 813
|
C++
|
.h
| 31
| 25
| 65
| 0.748072
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
5,996
|
RSXTexture.h
|
RPCS3_rpcs3/rpcs3/Emu/RSX/RSXTexture.h
|
#pragma once
#include "gcm_enums.h"
#include "color_utils.h"
namespace rsx
{
class fragment_texture
{
protected:
const u8 m_index;
std::array<u32, 0x10000 / 4>& registers;
public:
fragment_texture(u8 idx, std::array<u32, 0x10000 / 4>& r)
: m_index(idx)
, registers(r)
{
}
fragment_texture() = delete;
// Offset
u32 offset() const;
// Format
u8 location() const;
bool cubemap() const;
u8 border_type() const;
rsx::texture_dimension dimension() const;
// 2D texture can be either plane or cubemap texture depending on cubemap bit.
// Since cubemap is a format per se in all gfx API this function directly returns
// cubemap as a separate dimension.
rsx::texture_dimension_extended get_extended_texture_dimension() const;
u8 format() const;
bool is_compressed_format() const;
u16 mipmap() const;
// mipmap() returns value from register which can be higher than the actual number of mipmap level.
// This function clamp the result with the mipmap count allowed by texture size.
u16 get_exact_mipmap_count() const;
// Address
rsx::texture_wrap_mode wrap_s() const;
rsx::texture_wrap_mode wrap_t() const;
rsx::texture_wrap_mode wrap_r() const;
rsx::comparison_function zfunc() const;
u8 unsigned_remap() const;
u8 gamma() const;
u8 aniso_bias() const;
u8 signed_remap() const;
// Control0
bool enabled() const;
f32 min_lod() const;
f32 max_lod() const;
rsx::texture_max_anisotropy max_aniso() const;
bool alpha_kill_enabled() const;
// Control1
u32 remap() const;
rsx::texture_channel_remap_t decoded_remap() const;
// Filter
f32 bias() const;
rsx::texture_minify_filter min_filter() const;
rsx::texture_magnify_filter mag_filter() const;
u8 convolution_filter() const;
u8 argb_signed() const;
bool a_signed() const;
bool r_signed() const;
bool g_signed() const;
bool b_signed() const;
// Image Rect
u16 width() const;
u16 height() const;
// Border Color
u32 border_color() const;
color4f remapped_border_color() const;
u16 depth() const;
u32 pitch() const;
};
class vertex_texture
{
protected:
const u8 m_index;
std::array<u32, 0x10000 / 4>& registers;
public:
vertex_texture(u8 idx, std::array<u32, 0x10000 / 4> &r)
: m_index(idx)
, registers(r)
{
}
vertex_texture() = delete;
// Offset
u32 offset() const;
// Format
u8 location() const;
bool cubemap() const;
u8 border_type() const;
rsx::texture_dimension dimension() const;
u8 format() const;
u16 mipmap() const;
// Address
rsx::texture_wrap_mode wrap_s() const;
rsx::texture_wrap_mode wrap_t() const;
rsx::texture_wrap_mode wrap_r() const;
rsx::texture_channel_remap_t decoded_remap() const;
u32 remap() const;
// Control0
bool enabled() const;
f32 min_lod() const;
f32 max_lod() const;
// Filter
f32 bias() const;
rsx::texture_minify_filter min_filter() const;
rsx::texture_magnify_filter mag_filter() const;
// Image Rect
u16 width() const;
u16 height() const;
// Border Color
u32 border_color() const;
color4f remapped_border_color() const;
u16 depth() const;
u32 pitch() const;
rsx::texture_dimension_extended get_extended_texture_dimension() const;
u16 get_exact_mipmap_count() const;
};
}
| 3,289
|
C++
|
.h
| 118
| 24.79661
| 101
| 0.704517
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
5,997
|
RSXDisAsm.h
|
RPCS3_rpcs3/rpcs3/Emu/RSX/RSXDisAsm.h
|
#pragma once
#include "Emu/Cell/PPCDisAsm.h"
class RSXDisAsm final : public CPUDisAsm
{
public:
RSXDisAsm(cpu_disasm_mode mode, const u8* offset, u32 start_pc, const cpu_thread* cpu) : CPUDisAsm(mode, offset, start_pc, cpu)
{
}
private:
void Write(std::string_view str, s32 count, bool is_non_inc = false, u32 id = 0);
public:
u32 disasm(u32 pc) override;
std::pair<const void*, usz> get_memory_span() const override;
std::unique_ptr<CPUDisAsm> copy_type_erased() const override;
};
| 494
|
C++
|
.h
| 15
| 31.2
| 128
| 0.743158
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
5,998
|
gcm_enums.h
|
RPCS3_rpcs3/rpcs3/Emu/RSX/gcm_enums.h
|
#pragma once
#include "util/types.hpp"
#include "Common/expected.hpp"
#include "Utilities/StrFmt.h"
namespace gcm
{
enum
{
CELL_GCM_TYPE_B = 1,
CELL_GCM_TYPE_C = 2,
CELL_GCM_TYPE_RSX = 3,
CELL_GCM_MRT_MAXCOUNT = 4,
CELL_GCM_DISPLAY_MAXID = 8,
CELL_GCM_CONDITIONAL = 2,
};
enum
{
CELL_GCM_DISPLAY_HSYNC = 1,
CELL_GCM_DISPLAY_VSYNC = 2,
CELL_GCM_DISPLAY_HSYNC_WITH_NOISE = 3,
};
enum
{
CELL_GCM_DEBUG_LEVEL0 = 1,
CELL_GCM_DEBUG_LEVEL1 = 2,
CELL_GCM_DEBUG_LEVEL2 = 3,
};
enum
{
CELL_GCM_DISPLAY_FREQUENCY_59_94HZ = 1,
CELL_GCM_DISPLAY_FREQUENCY_SCANOUT = 2,
CELL_GCM_DISPLAY_FREQUENCY_DISABLE = 3,
};
enum
{
CELL_GCM_DISPLAY_FLIP_STATUS_DONE = 0,
CELL_GCM_DISPLAY_FLIP_STATUS_WAITING = 1,
};
enum
{
CELL_GCM_ZCULL_ALIGN_OFFSET = 4096,
CELL_GCM_ZCULL_ALIGN_WIDTH = 64,
CELL_GCM_ZCULL_ALIGN_HEIGHT = 64,
CELL_GCM_ZCULL_ALIGN_CULLSTART = 4096,
CELL_GCM_ZCULL_COMPRESSION_TAG_BASE_MAX = 0x7FF,
CELL_GCM_ZCULL_RAM_SIZE_MAX = 0x00300000,
CELL_GCM_TILE_ALIGN_OFFSET = 0x00010000,
CELL_GCM_TILE_ALIGN_SIZE = 0x00010000,
CELL_GCM_TILE_LOCAL_ALIGN_HEIGHT = 32,
CELL_GCM_TILE_MAIN_ALIGN_HEIGHT = 64,
CELL_GCM_TILE_ALIGN_BUFFER_START_BOUNDARY = 8,
CELL_GCM_FRAGMENT_UCODE_LOCAL_ALIGN_OFFSET = 64,
CELL_GCM_FRAGMENT_UCODE_MAIN_ALIGN_OFFSET = 128,
CELL_GCM_SURFACE_LINEAR_ALIGN_OFFSET = 64,
CELL_GCM_SURFACE_SWIZZLE_ALIGN_OFFSET = 128,
CELL_GCM_TEXTURE_SWIZZLE_ALIGN_OFFSET = 128,
CELL_GCM_TEXTURE_CUBEMAP_ALIGN_OFFSET = 128,
CELL_GCM_TEXTURE_SWIZZLED_CUBEMAP_FACE_ALIGN_OFFSET = 128,
CELL_GCM_VERTEX_TEXTURE_CACHE_LINE_SIZE = 32,
CELL_GCM_L2_TEXTURE_CACHE_LOCAL_LINE_SIZE = 64,
CELL_GCM_L2_TEXTURE_CACHE_MAIN_LINE_SIZE = 128,
CELL_GCM_IDX_FRAGMENT_UCODE_INSTRUCTION_PREFETCH_COUNT = 16,
CELL_GCM_DRAW_INDEX_ARRAY_INDEX_RANGE_MAX = 0x000FFFFF,
CELL_GCM_CURSOR_ALIGN_OFFSET = 2048
};
enum
{
CELL_GCM_FREQUENCY_MODULO = 1,
CELL_GCM_FREQUENCY_DIVIDE = 0,
};
enum CellRescTableElement
{
CELL_RESC_ELEMENT_HALF = 0,
CELL_RESC_ELEMENT_FLOAT = 1,
};
enum CellGcmDefaultFifoMode
{
CELL_GCM_DEFAULT_FIFO_MODE_TRADITIONAL = 0,
CELL_GCM_DEFAULT_FIFO_MODE_OPTIMIZE = 1,
CELL_GCM_DEFAULT_FIFO_MODE_CONDITIONAL = 2,
};
enum CellGcmSystemMode
{
CELL_GCM_SYSTEM_MODE_IOMAP_512MB = 1,
CELL_GCM_SYSTEM_MODE_MASK = 1
};
enum
{
// Index Array Type
CELL_GCM_DRAW_INDEX_ARRAY_TYPE_32 = 0,
CELL_GCM_DRAW_INDEX_ARRAY_TYPE_16 = 1,
};
enum
{
CELL_GCM_PRIMITIVE_POINTS = 1,
CELL_GCM_PRIMITIVE_LINES = 2,
CELL_GCM_PRIMITIVE_LINE_LOOP = 3,
CELL_GCM_PRIMITIVE_LINE_STRIP = 4,
CELL_GCM_PRIMITIVE_TRIANGLES = 5,
CELL_GCM_PRIMITIVE_TRIANGLE_STRIP= 6,
CELL_GCM_PRIMITIVE_TRIANGLE_FAN = 7,
CELL_GCM_PRIMITIVE_QUADS = 8,
CELL_GCM_PRIMITIVE_QUAD_STRIP = 9,
CELL_GCM_PRIMITIVE_POLYGON = 10,
};
// GCM Texture
enum CellGcmTexture : u32
{
// Color Flag
CELL_GCM_TEXTURE_B8 = 0x81,
CELL_GCM_TEXTURE_A1R5G5B5 = 0x82,
CELL_GCM_TEXTURE_A4R4G4B4 = 0x83,
CELL_GCM_TEXTURE_R5G6B5 = 0x84,
CELL_GCM_TEXTURE_A8R8G8B8 = 0x85,
CELL_GCM_TEXTURE_COMPRESSED_DXT1 = 0x86,
CELL_GCM_TEXTURE_COMPRESSED_DXT23 = 0x87,
CELL_GCM_TEXTURE_COMPRESSED_DXT45 = 0x88,
CELL_GCM_TEXTURE_G8B8 = 0x8B,
CELL_GCM_TEXTURE_COMPRESSED_B8R8_G8R8 = 0x8D, // NOTE: 0xAD in firmware
CELL_GCM_TEXTURE_COMPRESSED_R8B8_R8G8 = 0x8E, // NOTE: 0xAE in firmware
CELL_GCM_TEXTURE_R6G5B5 = 0x8F,
CELL_GCM_TEXTURE_DEPTH24_D8 = 0x90,
CELL_GCM_TEXTURE_DEPTH24_D8_FLOAT = 0x91,
CELL_GCM_TEXTURE_DEPTH16 = 0x92,
CELL_GCM_TEXTURE_DEPTH16_FLOAT = 0x93,
CELL_GCM_TEXTURE_X16 = 0x94,
CELL_GCM_TEXTURE_Y16_X16 = 0x95,
CELL_GCM_TEXTURE_R5G5B5A1 = 0x97,
CELL_GCM_TEXTURE_COMPRESSED_HILO8 = 0x98,
CELL_GCM_TEXTURE_COMPRESSED_HILO_S8 = 0x99,
CELL_GCM_TEXTURE_W16_Z16_Y16_X16_FLOAT = 0x9A,
CELL_GCM_TEXTURE_W32_Z32_Y32_X32_FLOAT = 0x9B,
CELL_GCM_TEXTURE_X32_FLOAT = 0x9C,
CELL_GCM_TEXTURE_D1R5G5B5 = 0x9D,
CELL_GCM_TEXTURE_D8R8G8B8 = 0x9E,
CELL_GCM_TEXTURE_Y16_X16_FLOAT = 0x9F,
// Swizzle Flag
CELL_GCM_TEXTURE_SZ = 0x00,
CELL_GCM_TEXTURE_LN = 0x20,
// Normalization Flag
CELL_GCM_TEXTURE_NR = 0x00,
CELL_GCM_TEXTURE_UN = 0x40,
};
// GCM Surface
enum
{
// Surface type
CELL_GCM_SURFACE_PITCH = 1,
CELL_GCM_SURFACE_SWIZZLE = 2,
};
// GCM blend equation
enum
{
CELL_GCM_FUNC_ADD = 0x8006,
CELL_GCM_MIN = 0x8007,
CELL_GCM_MAX = 0x8008,
CELL_GCM_FUNC_SUBTRACT = 0x800A,
CELL_GCM_FUNC_REVERSE_SUBTRACT = 0x800B,
CELL_GCM_FUNC_REVERSE_SUBTRACT_SIGNED = 0x0000F005,
CELL_GCM_FUNC_ADD_SIGNED = 0x0000F006,
CELL_GCM_FUNC_REVERSE_ADD_SIGNED = 0x0000F007,
};
// GCM blend factor
enum
{
CELL_GCM_SRC_COLOR = 0x0300,
CELL_GCM_ONE_MINUS_SRC_COLOR = 0x0301,
CELL_GCM_SRC_ALPHA = 0x0302,
CELL_GCM_ONE_MINUS_SRC_ALPHA = 0x0303,
CELL_GCM_DST_ALPHA = 0x0304,
CELL_GCM_ONE_MINUS_DST_ALPHA = 0x0305,
CELL_GCM_DST_COLOR = 0x0306,
CELL_GCM_ONE_MINUS_DST_COLOR = 0x0307,
CELL_GCM_SRC_ALPHA_SATURATE = 0x0308,
CELL_GCM_CONSTANT_COLOR = 0x8001,
CELL_GCM_ONE_MINUS_CONSTANT_COLOR = 0x8002,
CELL_GCM_CONSTANT_ALPHA = 0x8003,
CELL_GCM_ONE_MINUS_CONSTANT_ALPHA = 0x8004,
};
enum
{
CELL_GCM_TEXTURE_DIMENSION_1 = 1,
CELL_GCM_TEXTURE_DIMENSION_2 = 2,
CELL_GCM_TEXTURE_DIMENSION_3 = 3,
CELL_GCM_TEXTURE_UNSIGNED_REMAP_NORMAL = 0,
CELL_GCM_TEXTURE_UNSIGNED_REMAP_BIASED = 1,
CELL_GCM_TEXTURE_SIGNED_REMAP_NORMAL = 0x0,
CELL_GCM_TEXTURE_SIGNED_REMAP_CLAMPED = 0x3,
CELL_GCM_TEXTURE_REMAP_ORDER_XYXY = 0,
CELL_GCM_TEXTURE_REMAP_ORDER_XXXY = 1,
CELL_GCM_TEXTURE_REMAP_FROM_A = 0,
CELL_GCM_TEXTURE_REMAP_FROM_R = 1,
CELL_GCM_TEXTURE_REMAP_FROM_G = 2,
CELL_GCM_TEXTURE_REMAP_FROM_B = 3,
CELL_GCM_TEXTURE_REMAP_ZERO = 0,
CELL_GCM_TEXTURE_REMAP_ONE = 1,
CELL_GCM_TEXTURE_REMAP_REMAP = 2,
CELL_GCM_TEXTURE_BORDER_TEXTURE = 0,
CELL_GCM_TEXTURE_BORDER_COLOR = 1,
CELL_GCM_TEXTURE_ZFUNC_NEVER = 0,
CELL_GCM_TEXTURE_ZFUNC_LESS = 1,
CELL_GCM_TEXTURE_ZFUNC_EQUAL = 2,
CELL_GCM_TEXTURE_ZFUNC_LEQUAL = 3,
CELL_GCM_TEXTURE_ZFUNC_GREATER = 4,
CELL_GCM_TEXTURE_ZFUNC_NOTEQUAL = 5,
CELL_GCM_TEXTURE_ZFUNC_GEQUAL = 6,
CELL_GCM_TEXTURE_ZFUNC_ALWAYS = 7,
CELL_GCM_TEXTURE_GAMMA_R = 1 << 0,
CELL_GCM_TEXTURE_GAMMA_G = 1 << 1,
CELL_GCM_TEXTURE_GAMMA_B = 1 << 2,
CELL_GCM_TEXTURE_GAMMA_A = 1 << 3,
CELL_GCM_TEXTURE_ANISO_SPREAD_0_50_TEXEL = 0x0,
CELL_GCM_TEXTURE_ANISO_SPREAD_1_00_TEXEL = 0x1,
CELL_GCM_TEXTURE_ANISO_SPREAD_1_125_TEXEL = 0x2,
CELL_GCM_TEXTURE_ANISO_SPREAD_1_25_TEXEL = 0x3,
CELL_GCM_TEXTURE_ANISO_SPREAD_1_375_TEXEL = 0x4,
CELL_GCM_TEXTURE_ANISO_SPREAD_1_50_TEXEL = 0x5,
CELL_GCM_TEXTURE_ANISO_SPREAD_1_75_TEXEL = 0x6,
CELL_GCM_TEXTURE_ANISO_SPREAD_2_00_TEXEL = 0x7,
CELL_GCM_TEXTURE_CYLINDRICAL_WRAP_ENABLE_TEX0_U = 1 << 0,
CELL_GCM_TEXTURE_CYLINDRICAL_WRAP_ENABLE_TEX0_V = 1 << 1,
CELL_GCM_TEXTURE_CYLINDRICAL_WRAP_ENABLE_TEX0_P = 1 << 2,
CELL_GCM_TEXTURE_CYLINDRICAL_WRAP_ENABLE_TEX0_Q = 1 << 3,
CELL_GCM_TEXTURE_CYLINDRICAL_WRAP_ENABLE_TEX1_U = 1 << 4,
CELL_GCM_TEXTURE_CYLINDRICAL_WRAP_ENABLE_TEX1_V = 1 << 5,
CELL_GCM_TEXTURE_CYLINDRICAL_WRAP_ENABLE_TEX1_P = 1 << 6,
CELL_GCM_TEXTURE_CYLINDRICAL_WRAP_ENABLE_TEX1_Q = 1 << 7,
CELL_GCM_TEXTURE_CYLINDRICAL_WRAP_ENABLE_TEX2_U = 1 << 8,
CELL_GCM_TEXTURE_CYLINDRICAL_WRAP_ENABLE_TEX2_V = 1 << 9,
CELL_GCM_TEXTURE_CYLINDRICAL_WRAP_ENABLE_TEX2_P = 1 << 10,
CELL_GCM_TEXTURE_CYLINDRICAL_WRAP_ENABLE_TEX2_Q = 1 << 11,
CELL_GCM_TEXTURE_CYLINDRICAL_WRAP_ENABLE_TEX3_U = 1 << 12,
CELL_GCM_TEXTURE_CYLINDRICAL_WRAP_ENABLE_TEX3_V = 1 << 13,
CELL_GCM_TEXTURE_CYLINDRICAL_WRAP_ENABLE_TEX3_P = 1 << 14,
CELL_GCM_TEXTURE_CYLINDRICAL_WRAP_ENABLE_TEX3_Q = 1 << 15,
CELL_GCM_TEXTURE_CYLINDRICAL_WRAP_ENABLE_TEX4_U = 1 << 16,
CELL_GCM_TEXTURE_CYLINDRICAL_WRAP_ENABLE_TEX4_V = 1 << 17,
CELL_GCM_TEXTURE_CYLINDRICAL_WRAP_ENABLE_TEX4_P = 1 << 18,
CELL_GCM_TEXTURE_CYLINDRICAL_WRAP_ENABLE_TEX4_Q = 1 << 19,
CELL_GCM_TEXTURE_CYLINDRICAL_WRAP_ENABLE_TEX5_U = 1 << 20,
CELL_GCM_TEXTURE_CYLINDRICAL_WRAP_ENABLE_TEX5_V = 1 << 21,
CELL_GCM_TEXTURE_CYLINDRICAL_WRAP_ENABLE_TEX5_P = 1 << 22,
CELL_GCM_TEXTURE_CYLINDRICAL_WRAP_ENABLE_TEX5_Q = 1 << 23,
CELL_GCM_TEXTURE_CYLINDRICAL_WRAP_ENABLE_TEX6_U = 1 << 24,
CELL_GCM_TEXTURE_CYLINDRICAL_WRAP_ENABLE_TEX6_V = 1 << 25,
CELL_GCM_TEXTURE_CYLINDRICAL_WRAP_ENABLE_TEX6_P = 1 << 26,
CELL_GCM_TEXTURE_CYLINDRICAL_WRAP_ENABLE_TEX6_Q = 1 << 27,
CELL_GCM_TEXTURE_CYLINDRICAL_WRAP_ENABLE_TEX7_U = 1 << 28,
CELL_GCM_TEXTURE_CYLINDRICAL_WRAP_ENABLE_TEX7_V = 1 << 29,
CELL_GCM_TEXTURE_CYLINDRICAL_WRAP_ENABLE_TEX7_P = 1 << 30,
CELL_GCM_TEXTURE_CYLINDRICAL_WRAP_ENABLE_TEX7_Q = 1u << 31,
CELL_GCM_COLOR_MASK_B = 1 << 0,
CELL_GCM_COLOR_MASK_G = 1 << 8,
CELL_GCM_COLOR_MASK_R = 1 << 16,
CELL_GCM_COLOR_MASK_A = 1 << 24,
CELL_GCM_COLOR_MASK_MRT1_A = 1 << 4,
CELL_GCM_COLOR_MASK_MRT1_R = 1 << 5,
CELL_GCM_COLOR_MASK_MRT1_G = 1 << 6,
CELL_GCM_COLOR_MASK_MRT1_B = 1 << 7,
CELL_GCM_COLOR_MASK_MRT2_A = 1 << 8,
CELL_GCM_COLOR_MASK_MRT2_R = 1 << 9,
CELL_GCM_COLOR_MASK_MRT2_G = 1 << 10,
CELL_GCM_COLOR_MASK_MRT2_B = 1 << 11,
CELL_GCM_COLOR_MASK_MRT3_A = 1 << 12,
CELL_GCM_COLOR_MASK_MRT3_R = 1 << 13,
CELL_GCM_COLOR_MASK_MRT3_G = 1 << 14,
CELL_GCM_COLOR_MASK_MRT3_B = 1 << 15,
CELL_GCM_NEVER = 0x0200,
CELL_GCM_LESS = 0x0201,
CELL_GCM_EQUAL = 0x0202,
CELL_GCM_LEQUAL = 0x0203,
CELL_GCM_GREATER = 0x0204,
CELL_GCM_NOTEQUAL = 0x0205,
CELL_GCM_GEQUAL = 0x0206,
CELL_GCM_ALWAYS = 0x0207,
CELL_GCM_ZERO = 0,
CELL_GCM_ONE = 1,
CELL_GCM_FRONT = 0x0404,
CELL_GCM_BACK = 0x0405,
CELL_GCM_FRONT_AND_BACK = 0x0408,
CELL_GCM_CW = 0x0900,
CELL_GCM_CCW = 0x0901,
CELL_GCM_INVERT = 0x150A,
CELL_GCM_KEEP = 0x1E00,
CELL_GCM_REPLACE = 0x1E01,
CELL_GCM_INCR = 0x1E02,
CELL_GCM_DECR = 0x1E03,
CELL_GCM_INCR_WRAP = 0x8507,
CELL_GCM_DECR_WRAP = 0x8508,
CELL_GCM_TRANSFER_LOCAL_TO_LOCAL = 0,
CELL_GCM_TRANSFER_MAIN_TO_LOCAL = 1,
CELL_GCM_TRANSFER_LOCAL_TO_MAIN = 2,
CELL_GCM_TRANSFER_MAIN_TO_MAIN = 3,
CELL_GCM_INVALIDATE_TEXTURE = 1,
CELL_GCM_INVALIDATE_VERTEX_TEXTURE = 2,
CELL_GCM_COMPMODE_DISABLED = 0,
CELL_GCM_COMPMODE_C32_2X1 = 7,
CELL_GCM_COMPMODE_C32_2X2 = 8,
CELL_GCM_COMPMODE_Z32_SEPSTENCIL = 9,
CELL_GCM_COMPMODE_Z32_SEPSTENCIL_REG = 10,
CELL_GCM_COMPMODE_Z32_SEPSTENCIL_REGULAR = 10,
CELL_GCM_COMPMODE_Z32_SEPSTENCIL_DIAGONAL = 11,
CELL_GCM_COMPMODE_Z32_SEPSTENCIL_ROTATED = 12,
CELL_GCM_ZCULL_Z16 = 1,
CELL_GCM_ZCULL_Z24S8 = 2,
CELL_GCM_ZCULL_MSB = 0,
CELL_GCM_ZCULL_LONES = 1,
CELL_GCM_ZCULL_LESS = 0,
CELL_GCM_ZCULL_GREATER = 1,
CELL_GCM_SCULL_SFUNC_NEVER = 0,
CELL_GCM_SCULL_SFUNC_LESS = 1,
CELL_GCM_SCULL_SFUNC_EQUAL = 2,
CELL_GCM_SCULL_SFUNC_LEQUAL = 3,
CELL_GCM_SCULL_SFUNC_GREATER = 4,
CELL_GCM_SCULL_SFUNC_NOTEQUAL = 5,
CELL_GCM_SCULL_SFUNC_GEQUAL = 6,
CELL_GCM_SCULL_SFUNC_ALWAYS = 7,
CELL_GCM_ATTRIB_OUTPUT_FRONTDIFFUSE = 0,
CELL_GCM_ATTRIB_OUTPUT_FRONTSPECULAR = 1,
CELL_GCM_ATTRIB_OUTPUT_BACKDIFFUSE = 2,
CELL_GCM_ATTRIB_OUTPUT_BACKSPECULAR = 3,
CELL_GCM_ATTRIB_OUTPUT_FOG = 4,
CELL_GCM_ATTRIB_OUTPUT_POINTSIZE = 5,
CELL_GCM_ATTRIB_OUTPUT_UC0 = 6,
CELL_GCM_ATTRIB_OUTPUT_UC1 = 7,
CELL_GCM_ATTRIB_OUTPUT_UC2 = 8,
CELL_GCM_ATTRIB_OUTPUT_UC3 = 9,
CELL_GCM_ATTRIB_OUTPUT_UC4 = 10,
CELL_GCM_ATTRIB_OUTPUT_UC5 = 11,
CELL_GCM_ATTRIB_OUTPUT_TEX8 = 12,
CELL_GCM_ATTRIB_OUTPUT_TEX9 = 13,
CELL_GCM_ATTRIB_OUTPUT_TEX0 = 14,
CELL_GCM_ATTRIB_OUTPUT_TEX1 = 15,
CELL_GCM_ATTRIB_OUTPUT_TEX2 = 16,
CELL_GCM_ATTRIB_OUTPUT_TEX3 = 17,
CELL_GCM_ATTRIB_OUTPUT_TEX4 = 18,
CELL_GCM_ATTRIB_OUTPUT_TEX5 = 19,
CELL_GCM_ATTRIB_OUTPUT_TEX6 = 20,
CELL_GCM_ATTRIB_OUTPUT_TEX7 = 21,
CELL_GCM_ATTRIB_OUTPUT_MASK_FRONTDIFFUSE = 1 << CELL_GCM_ATTRIB_OUTPUT_FRONTDIFFUSE,
CELL_GCM_ATTRIB_OUTPUT_MASK_FRONTSPECULAR = 1 << CELL_GCM_ATTRIB_OUTPUT_FRONTSPECULAR,
CELL_GCM_ATTRIB_OUTPUT_MASK_BACKDIFFUSE = 1 << CELL_GCM_ATTRIB_OUTPUT_BACKDIFFUSE,
CELL_GCM_ATTRIB_OUTPUT_MASK_BACKSPECULAR = 1 << CELL_GCM_ATTRIB_OUTPUT_BACKSPECULAR,
CELL_GCM_ATTRIB_OUTPUT_MASK_FOG = 1 << CELL_GCM_ATTRIB_OUTPUT_FOG,
CELL_GCM_ATTRIB_OUTPUT_MASK_POINTSIZE = 1 << CELL_GCM_ATTRIB_OUTPUT_POINTSIZE,
CELL_GCM_ATTRIB_OUTPUT_MASK_UC0 = 1 << CELL_GCM_ATTRIB_OUTPUT_UC0,
CELL_GCM_ATTRIB_OUTPUT_MASK_UC1 = 1 << CELL_GCM_ATTRIB_OUTPUT_UC1,
CELL_GCM_ATTRIB_OUTPUT_MASK_UC2 = 1 << CELL_GCM_ATTRIB_OUTPUT_UC2,
CELL_GCM_ATTRIB_OUTPUT_MASK_UC3 = 1 << CELL_GCM_ATTRIB_OUTPUT_UC3,
CELL_GCM_ATTRIB_OUTPUT_MASK_UC4 = 1 << CELL_GCM_ATTRIB_OUTPUT_UC4,
CELL_GCM_ATTRIB_OUTPUT_MASK_UC5 = 1 << CELL_GCM_ATTRIB_OUTPUT_UC5,
CELL_GCM_ATTRIB_OUTPUT_MASK_TEX8 = 1 << CELL_GCM_ATTRIB_OUTPUT_TEX8,
CELL_GCM_ATTRIB_OUTPUT_MASK_TEX9 = 1 << CELL_GCM_ATTRIB_OUTPUT_TEX9,
CELL_GCM_ATTRIB_OUTPUT_MASK_TEX0 = 1 << CELL_GCM_ATTRIB_OUTPUT_TEX0,
CELL_GCM_ATTRIB_OUTPUT_MASK_TEX1 = 1 << CELL_GCM_ATTRIB_OUTPUT_TEX1,
CELL_GCM_ATTRIB_OUTPUT_MASK_TEX2 = 1 << CELL_GCM_ATTRIB_OUTPUT_TEX2,
CELL_GCM_ATTRIB_OUTPUT_MASK_TEX3 = 1 << CELL_GCM_ATTRIB_OUTPUT_TEX3,
CELL_GCM_ATTRIB_OUTPUT_MASK_TEX4 = 1 << CELL_GCM_ATTRIB_OUTPUT_TEX4,
CELL_GCM_ATTRIB_OUTPUT_MASK_TEX5 = 1 << CELL_GCM_ATTRIB_OUTPUT_TEX5,
CELL_GCM_ATTRIB_OUTPUT_MASK_TEX6 = 1 << CELL_GCM_ATTRIB_OUTPUT_TEX6,
CELL_GCM_ATTRIB_OUTPUT_MASK_TEX7 = 1 << CELL_GCM_ATTRIB_OUTPUT_TEX7,
CELL_GCM_TRUE = 1,
CELL_GCM_FALSE = 0,
};
enum
{
RSX_TEXTURE_REMAP_IDENTITY = 0xAAE4,
};
enum
{
CELL_GCM_POINT_SPRITE_RMODE_ZERO = 0,
CELL_GCM_POINT_SPRITE_RMODE_FROM_R = 1,
CELL_GCM_POINT_SPRITE_RMODE_FROM_S = 2,
CELL_GCM_POINT_SPRITE_TEX0 = 1 << 8,
CELL_GCM_POINT_SPRITE_TEX1 = 1 << 9,
CELL_GCM_POINT_SPRITE_TEX2 = 1 << 10,
CELL_GCM_POINT_SPRITE_TEX3 = 1 << 11,
CELL_GCM_POINT_SPRITE_TEX4 = 1 << 12,
CELL_GCM_POINT_SPRITE_TEX5 = 1 << 13,
CELL_GCM_POINT_SPRITE_TEX6 = 1 << 14,
CELL_GCM_POINT_SPRITE_TEX7 = 1 << 15,
CELL_GCM_POINT_SPRITE_TEX8 = 1 << 16,
CELL_GCM_POINT_SPRITE_TEX9 = 1 << 17,
};
enum
{
CELL_GCM_SHADER_CONTROL_DEPTH_EXPORT = 0xe, ///< shader program exports the depth of the shaded fragment
CELL_GCM_SHADER_CONTROL_32_BITS_EXPORTS = 0x40, ///< shader program exports 32 bits registers values (instead of 16 bits ones)
// Other known flags
RSX_SHADER_CONTROL_USED_REGS_MASK = 0xf,
RSX_SHADER_CONTROL_USED_TEMP_REGS_MASK = 0xff << 24,
RSX_SHADER_CONTROL_USES_KIL = 0x80, // program uses KIL op
RSX_SHADER_CONTROL_UNKNOWN0 = 0x400, // seemingly always set
RSX_SHADER_CONTROL_UNKNOWN1 = 0x8000, // seemingly set when srgb packer is used??
// Custom
RSX_SHADER_CONTROL_ATTRIBUTE_INTERPOLATION = 0x10000 // Rasterizing triangles and not lines or points
};
// GCM Reports
enum
{
CELL_GCM_ZPASS_PIXEL_CNT = 1,
CELL_GCM_ZCULL_STATS = 2,
CELL_GCM_ZCULL_STATS1 = 3,
CELL_GCM_ZCULL_STATS2 = 4,
CELL_GCM_ZCULL_STATS3 = 5,
};
// GPU Class Handles
enum CellGcmLocation : u32
{
CELL_GCM_LOCATION_LOCAL = 0,
CELL_GCM_LOCATION_MAIN = 1,
CELL_GCM_CONTEXT_DMA_MEMORY_FRAME_BUFFER = 0xFEED0000, // Local memory
CELL_GCM_CONTEXT_DMA_MEMORY_HOST_BUFFER = 0xFEED0001, // Main memory
CELL_GCM_CONTEXT_DMA_REPORT_LOCATION_LOCAL = 0x66626660,
CELL_GCM_CONTEXT_DMA_REPORT_LOCATION_MAIN = 0xBAD68000,
CELL_GCM_CONTEXT_DMA_NOTIFY_MAIN_0 = 0x6660420F,
CELL_GCM_CONTEXT_DMA_NOTIFY_MAIN_1 = 0x6660420E,
CELL_GCM_CONTEXT_DMA_NOTIFY_MAIN_2 = 0x6660420D,
CELL_GCM_CONTEXT_DMA_NOTIFY_MAIN_3 = 0x6660420C,
CELL_GCM_CONTEXT_DMA_NOTIFY_MAIN_4 = 0x6660420B,
CELL_GCM_CONTEXT_DMA_NOTIFY_MAIN_5 = 0x6660420A,
CELL_GCM_CONTEXT_DMA_NOTIFY_MAIN_6 = 0x66604209,
CELL_GCM_CONTEXT_DMA_NOTIFY_MAIN_7 = 0x66604208,
CELL_GCM_CONTEXT_DMA_TO_MEMORY_GET_NOTIFY0 = 0x66604207,
CELL_GCM_CONTEXT_DMA_TO_MEMORY_GET_NOTIFY1 = 0x66604206,
CELL_GCM_CONTEXT_DMA_TO_MEMORY_GET_NOTIFY2 = 0x66604205,
CELL_GCM_CONTEXT_DMA_TO_MEMORY_GET_NOTIFY3 = 0x66604204,
CELL_GCM_CONTEXT_DMA_TO_MEMORY_GET_NOTIFY4 = 0x66604203,
CELL_GCM_CONTEXT_DMA_TO_MEMORY_GET_NOTIFY5 = 0x66604202,
CELL_GCM_CONTEXT_DMA_TO_MEMORY_GET_NOTIFY6 = 0x66604201,
CELL_GCM_CONTEXT_DMA_TO_MEMORY_GET_NOTIFY7 = 0x66604200,
CELL_GCM_CONTEXT_DMA_SEMAPHORE_RW = 0x66606660,
CELL_GCM_CONTEXT_DMA_SEMAPHORE_R = 0x66616661,
CELL_GCM_CONTEXT_DMA_DEVICE_RW = 0x56616660,
CELL_GCM_CONTEXT_DMA_DEVICE_R = 0x56616661
};
enum CellGcmMethod : u16
{
// NV40_CHANNEL_DMA (NV406E)
NV406E_SET_REFERENCE = 0x00000050 >> 2,
NV406E_SET_CONTEXT_DMA_SEMAPHORE = 0x00000060 >> 2,
NV406E_SEMAPHORE_OFFSET = 0x00000064 >> 2,
NV406E_SEMAPHORE_ACQUIRE = 0x00000068 >> 2,
NV406E_SEMAPHORE_RELEASE = 0x0000006c >> 2,
// NV40_CURIE_PRIMITIVE (NV4097)
NV4097_SET_OBJECT = 0x00000000 >> 2,
NV4097_NO_OPERATION = 0x00000100 >> 2,
NV4097_NOTIFY = 0x00000104 >> 2,
NV4097_WAIT_FOR_IDLE = 0x00000110 >> 2,
NV4097_PM_TRIGGER = 0x00000140 >> 2,
NV4097_SET_CONTEXT_DMA_NOTIFIES = 0x00000180 >> 2,
NV4097_SET_CONTEXT_DMA_A = 0x00000184 >> 2,
NV4097_SET_CONTEXT_DMA_B = 0x00000188 >> 2,
NV4097_SET_CONTEXT_DMA_COLOR_B = 0x0000018c >> 2,
NV4097_SET_CONTEXT_DMA_STATE = 0x00000190 >> 2,
NV4097_SET_CONTEXT_DMA_COLOR_A = 0x00000194 >> 2,
NV4097_SET_CONTEXT_DMA_ZETA = 0x00000198 >> 2,
NV4097_SET_CONTEXT_DMA_VERTEX_A = 0x0000019c >> 2,
NV4097_SET_CONTEXT_DMA_VERTEX_B = 0x000001a0 >> 2,
NV4097_SET_CONTEXT_DMA_SEMAPHORE = 0x000001a4 >> 2,
NV4097_SET_CONTEXT_DMA_REPORT = 0x000001a8 >> 2,
NV4097_SET_CONTEXT_DMA_CLIP_ID = 0x000001ac >> 2,
NV4097_SET_CONTEXT_DMA_CULL_DATA = 0x000001b0 >> 2,
NV4097_SET_CONTEXT_DMA_COLOR_C = 0x000001b4 >> 2,
NV4097_SET_CONTEXT_DMA_COLOR_D = 0x000001b8 >> 2,
NV4097_SET_SURFACE_CLIP_HORIZONTAL = 0x00000200 >> 2,
NV4097_SET_SURFACE_CLIP_VERTICAL = 0x00000204 >> 2,
NV4097_SET_SURFACE_FORMAT = 0x00000208 >> 2,
NV4097_SET_SURFACE_PITCH_A = 0x0000020c >> 2,
NV4097_SET_SURFACE_COLOR_AOFFSET = 0x00000210 >> 2,
NV4097_SET_SURFACE_ZETA_OFFSET = 0x00000214 >> 2,
NV4097_SET_SURFACE_COLOR_BOFFSET = 0x00000218 >> 2,
NV4097_SET_SURFACE_PITCH_B = 0x0000021c >> 2,
NV4097_SET_SURFACE_COLOR_TARGET = 0x00000220 >> 2,
NV4097_SET_SURFACE_PITCH_Z = 0x0000022c >> 2,
NV4097_INVALIDATE_ZCULL = 0x00000234 >> 2,
NV4097_SET_CYLINDRICAL_WRAP = 0x00000238 >> 2,
NV4097_SET_CYLINDRICAL_WRAP1 = 0x0000023c >> 2,
NV4097_SET_SURFACE_PITCH_C = 0x00000280 >> 2,
NV4097_SET_SURFACE_PITCH_D = 0x00000284 >> 2,
NV4097_SET_SURFACE_COLOR_COFFSET = 0x00000288 >> 2,
NV4097_SET_SURFACE_COLOR_DOFFSET = 0x0000028c >> 2,
NV4097_SET_WINDOW_OFFSET = 0x000002b8 >> 2,
NV4097_SET_WINDOW_CLIP_TYPE = 0x000002bc >> 2,
NV4097_SET_WINDOW_CLIP_HORIZONTAL = 0x000002c0 >> 2,
NV4097_SET_WINDOW_CLIP_VERTICAL = 0x000002c4 >> 2,
NV4097_SET_DITHER_ENABLE = 0x00000300 >> 2,
NV4097_SET_ALPHA_TEST_ENABLE = 0x00000304 >> 2,
NV4097_SET_ALPHA_FUNC = 0x00000308 >> 2,
NV4097_SET_ALPHA_REF = 0x0000030c >> 2,
NV4097_SET_BLEND_ENABLE = 0x00000310 >> 2,
NV4097_SET_BLEND_FUNC_SFACTOR = 0x00000314 >> 2,
NV4097_SET_BLEND_FUNC_DFACTOR = 0x00000318 >> 2,
NV4097_SET_BLEND_COLOR = 0x0000031c >> 2,
NV4097_SET_BLEND_EQUATION = 0x00000320 >> 2,
NV4097_SET_COLOR_MASK = 0x00000324 >> 2,
NV4097_SET_STENCIL_TEST_ENABLE = 0x00000328 >> 2,
NV4097_SET_STENCIL_MASK = 0x0000032c >> 2,
NV4097_SET_STENCIL_FUNC = 0x00000330 >> 2,
NV4097_SET_STENCIL_FUNC_REF = 0x00000334 >> 2,
NV4097_SET_STENCIL_FUNC_MASK = 0x00000338 >> 2,
NV4097_SET_STENCIL_OP_FAIL = 0x0000033c >> 2,
NV4097_SET_STENCIL_OP_ZFAIL = 0x00000340 >> 2,
NV4097_SET_STENCIL_OP_ZPASS = 0x00000344 >> 2,
NV4097_SET_TWO_SIDED_STENCIL_TEST_ENABLE = 0x00000348 >> 2,
NV4097_SET_BACK_STENCIL_MASK = 0x0000034c >> 2,
NV4097_SET_BACK_STENCIL_FUNC = 0x00000350 >> 2,
NV4097_SET_BACK_STENCIL_FUNC_REF = 0x00000354 >> 2,
NV4097_SET_BACK_STENCIL_FUNC_MASK = 0x00000358 >> 2,
NV4097_SET_BACK_STENCIL_OP_FAIL = 0x0000035c >> 2,
NV4097_SET_BACK_STENCIL_OP_ZFAIL = 0x00000360 >> 2,
NV4097_SET_BACK_STENCIL_OP_ZPASS = 0x00000364 >> 2,
NV4097_SET_SHADE_MODE = 0x00000368 >> 2,
NV4097_SET_BLEND_ENABLE_MRT = 0x0000036c >> 2,
NV4097_SET_COLOR_MASK_MRT = 0x00000370 >> 2,
NV4097_SET_LOGIC_OP_ENABLE = 0x00000374 >> 2,
NV4097_SET_LOGIC_OP = 0x00000378 >> 2,
NV4097_SET_BLEND_COLOR2 = 0x0000037c >> 2,
NV4097_SET_DEPTH_BOUNDS_TEST_ENABLE = 0x00000380 >> 2,
NV4097_SET_DEPTH_BOUNDS_MIN = 0x00000384 >> 2,
NV4097_SET_DEPTH_BOUNDS_MAX = 0x00000388 >> 2,
NV4097_SET_CLIP_MIN = 0x00000394 >> 2,
NV4097_SET_CLIP_MAX = 0x00000398 >> 2,
NV4097_SET_CONTROL0 = 0x000003b0 >> 2,
NV4097_SET_LINE_WIDTH = 0x000003b8 >> 2,
NV4097_SET_LINE_SMOOTH_ENABLE = 0x000003bc >> 2,
NV4097_SET_ANISO_SPREAD = 0x000003c0 >> 2,
NV4097_SET_SCISSOR_HORIZONTAL = 0x000008c0 >> 2,
NV4097_SET_SCISSOR_VERTICAL = 0x000008c4 >> 2,
NV4097_SET_FOG_MODE = 0x000008cc >> 2,
NV4097_SET_FOG_PARAMS = 0x000008d0 >> 2,
NV4097_SET_SHADER_PROGRAM = 0x000008e4 >> 2,
NV4097_SET_VERTEX_TEXTURE_OFFSET = 0x00000900 >> 2,
NV4097_SET_VERTEX_TEXTURE_FORMAT = 0x00000904 >> 2,
NV4097_SET_VERTEX_TEXTURE_ADDRESS = 0x00000908 >> 2,
NV4097_SET_VERTEX_TEXTURE_CONTROL0 = 0x0000090c >> 2,
NV4097_SET_VERTEX_TEXTURE_CONTROL3 = 0x00000910 >> 2,
NV4097_SET_VERTEX_TEXTURE_FILTER = 0x00000914 >> 2,
NV4097_SET_VERTEX_TEXTURE_IMAGE_RECT = 0x00000918 >> 2,
NV4097_SET_VERTEX_TEXTURE_BORDER_COLOR = 0x0000091c >> 2,
NV4097_SET_VIEWPORT_HORIZONTAL = 0x00000a00 >> 2,
NV4097_SET_VIEWPORT_VERTICAL = 0x00000a04 >> 2,
NV4097_SET_POINT_CENTER_MODE = 0x00000a0c >> 2,
NV4097_ZCULL_SYNC = 0x00000a1c >> 2,
NV4097_SET_VIEWPORT_OFFSET = 0x00000a20 >> 2,
NV4097_SET_VIEWPORT_SCALE = 0x00000a30 >> 2,
NV4097_SET_POLY_OFFSET_POINT_ENABLE = 0x00000a60 >> 2,
NV4097_SET_POLY_OFFSET_LINE_ENABLE = 0x00000a64 >> 2,
NV4097_SET_POLY_OFFSET_FILL_ENABLE = 0x00000a68 >> 2,
NV4097_SET_DEPTH_FUNC = 0x00000a6c >> 2,
NV4097_SET_DEPTH_MASK = 0x00000a70 >> 2,
NV4097_SET_DEPTH_TEST_ENABLE = 0x00000a74 >> 2,
NV4097_SET_POLYGON_OFFSET_SCALE_FACTOR = 0x00000a78 >> 2,
NV4097_SET_POLYGON_OFFSET_BIAS = 0x00000a7c >> 2,
NV4097_SET_VERTEX_DATA_SCALED4S_M = 0x00000a80 >> 2,
NV4097_SET_TEXTURE_CONTROL2 = 0x00000b00 >> 2,
NV4097_SET_TEX_COORD_CONTROL = 0x00000b40 >> 2,
NV4097_SET_TRANSFORM_PROGRAM = 0x00000b80 >> 2,
NV4097_SET_SPECULAR_ENABLE = 0x00001428 >> 2,
NV4097_SET_TWO_SIDE_LIGHT_EN = 0x0000142c >> 2,
NV4097_CLEAR_ZCULL_SURFACE = 0x00001438 >> 2,
NV4097_SET_PERFORMANCE_PARAMS = 0x00001450 >> 2,
NV4097_SET_FLAT_SHADE_OP = 0x00001454 >> 2,
NV4097_SET_EDGE_FLAG = 0x0000145c >> 2,
NV4097_SET_USER_CLIP_PLANE_CONTROL = 0x00001478 >> 2,
NV4097_SET_POLYGON_STIPPLE = 0x0000147c >> 2,
NV4097_SET_POLYGON_STIPPLE_PATTERN = 0x00001480 >> 2,
NV4097_SET_VERTEX_DATA3F_M = 0x00001500 >> 2,
NV4097_SET_VERTEX_DATA_ARRAY_OFFSET = 0x00001680 >> 2,
NV4097_INVALIDATE_VERTEX_CACHE_FILE = 0x00001710 >> 2,
NV4097_INVALIDATE_VERTEX_FILE = 0x00001714 >> 2,
NV4097_PIPE_NOP = 0x00001718 >> 2,
NV4097_SET_VERTEX_DATA_BASE_OFFSET = 0x00001738 >> 2,
NV4097_SET_VERTEX_DATA_BASE_INDEX = 0x0000173c >> 2,
NV4097_SET_VERTEX_DATA_ARRAY_FORMAT = 0x00001740 >> 2,
NV4097_CLEAR_REPORT_VALUE = 0x000017c8 >> 2,
NV4097_SET_ZPASS_PIXEL_COUNT_ENABLE = 0x000017cc >> 2,
NV4097_GET_REPORT = 0x00001800 >> 2,
NV4097_SET_ZCULL_STATS_ENABLE = 0x00001804 >> 2,
NV4097_SET_BEGIN_END = 0x00001808 >> 2,
NV4097_ARRAY_ELEMENT16 = 0x0000180c >> 2,
NV4097_ARRAY_ELEMENT32 = 0x00001810 >> 2,
NV4097_DRAW_ARRAYS = 0x00001814 >> 2,
NV4097_INLINE_ARRAY = 0x00001818 >> 2,
NV4097_SET_INDEX_ARRAY_ADDRESS = 0x0000181c >> 2,
NV4097_SET_INDEX_ARRAY_DMA = 0x00001820 >> 2,
NV4097_DRAW_INDEX_ARRAY = 0x00001824 >> 2,
NV4097_SET_FRONT_POLYGON_MODE = 0x00001828 >> 2,
NV4097_SET_BACK_POLYGON_MODE = 0x0000182c >> 2,
NV4097_SET_CULL_FACE = 0x00001830 >> 2,
NV4097_SET_FRONT_FACE = 0x00001834 >> 2,
NV4097_SET_POLY_SMOOTH_ENABLE = 0x00001838 >> 2,
NV4097_SET_CULL_FACE_ENABLE = 0x0000183c >> 2,
NV4097_SET_TEXTURE_CONTROL3 = 0x00001840 >> 2,
NV4097_SET_VERTEX_DATA2F_M = 0x00001880 >> 2,
NV4097_SET_VERTEX_DATA2S_M = 0x00001900 >> 2,
NV4097_SET_VERTEX_DATA4UB_M = 0x00001940 >> 2,
NV4097_SET_VERTEX_DATA4S_M = 0x00001980 >> 2,
NV4097_SET_TEXTURE_OFFSET = 0x00001a00 >> 2,
NV4097_SET_TEXTURE_FORMAT = 0x00001a04 >> 2,
NV4097_SET_TEXTURE_ADDRESS = 0x00001a08 >> 2,
NV4097_SET_TEXTURE_CONTROL0 = 0x00001a0c >> 2,
NV4097_SET_TEXTURE_CONTROL1 = 0x00001a10 >> 2,
NV4097_SET_TEXTURE_FILTER = 0x00001a14 >> 2,
NV4097_SET_TEXTURE_IMAGE_RECT = 0x00001a18 >> 2,
NV4097_SET_TEXTURE_BORDER_COLOR = 0x00001a1c >> 2,
NV4097_SET_VERTEX_DATA4F_M = 0x00001c00 >> 2,
NV4097_SET_COLOR_KEY_COLOR = 0x00001d00 >> 2,
NV4097_SET_SHADER_CONTROL = 0x00001d60 >> 2,
NV4097_SET_INDEXED_CONSTANT_READ_LIMITS = 0x00001d64 >> 2,
NV4097_SET_SEMAPHORE_OFFSET = 0x00001d6c >> 2,
NV4097_BACK_END_WRITE_SEMAPHORE_RELEASE = 0x00001d70 >> 2,
NV4097_TEXTURE_READ_SEMAPHORE_RELEASE = 0x00001d74 >> 2,
NV4097_SET_ZMIN_MAX_CONTROL = 0x00001d78 >> 2,
NV4097_SET_ANTI_ALIASING_CONTROL = 0x00001d7c >> 2,
NV4097_SET_SURFACE_COMPRESSION = 0x00001d80 >> 2,
NV4097_SET_ZCULL_EN = 0x00001d84 >> 2,
NV4097_SET_SHADER_WINDOW = 0x00001d88 >> 2,
NV4097_SET_ZSTENCIL_CLEAR_VALUE = 0x00001d8c >> 2,
NV4097_SET_COLOR_CLEAR_VALUE = 0x00001d90 >> 2,
NV4097_CLEAR_SURFACE = 0x00001d94 >> 2,
NV4097_SET_CLEAR_RECT_HORIZONTAL = 0x00001d98 >> 2,
NV4097_SET_CLEAR_RECT_VERTICAL = 0x00001d9c >> 2,
NV4097_SET_CLIP_ID_TEST_ENABLE = 0x00001da4 >> 2,
NV4097_SET_RESTART_INDEX_ENABLE = 0x00001dac >> 2,
NV4097_SET_RESTART_INDEX = 0x00001db0 >> 2,
NV4097_SET_LINE_STIPPLE = 0x00001db4 >> 2,
NV4097_SET_LINE_STIPPLE_PATTERN = 0x00001db8 >> 2,
NV4097_SET_VERTEX_DATA1F_M = 0x00001e40 >> 2,
NV4097_SET_TRANSFORM_EXECUTION_MODE = 0x00001e94 >> 2,
NV4097_SET_RENDER_ENABLE = 0x00001e98 >> 2,
NV4097_SET_TRANSFORM_PROGRAM_LOAD = 0x00001e9c >> 2,
NV4097_SET_TRANSFORM_PROGRAM_START = 0x00001ea0 >> 2,
NV4097_SET_ZCULL_CONTROL0 = 0x00001ea4 >> 2,
NV4097_SET_ZCULL_CONTROL1 = 0x00001ea8 >> 2,
NV4097_SET_SCULL_CONTROL = 0x00001eac >> 2,
NV4097_SET_POINT_SIZE = 0x00001ee0 >> 2,
NV4097_SET_POINT_PARAMS_ENABLE = 0x00001ee4 >> 2,
NV4097_SET_POINT_SPRITE_CONTROL = 0x00001ee8 >> 2,
NV4097_SET_TRANSFORM_TIMEOUT = 0x00001ef8 >> 2,
NV4097_SET_TRANSFORM_CONSTANT_LOAD = 0x00001efc >> 2,
NV4097_SET_TRANSFORM_CONSTANT = 0x00001f00 >> 2,
NV4097_SET_FREQUENCY_DIVIDER_OPERATION = 0x00001fc0 >> 2,
NV4097_SET_ATTRIB_COLOR = 0x00001fc4 >> 2,
NV4097_SET_ATTRIB_TEX_COORD = 0x00001fc8 >> 2,
NV4097_SET_ATTRIB_TEX_COORD_EX = 0x00001fcc >> 2,
NV4097_SET_ATTRIB_UCLIP0 = 0x00001fd0 >> 2,
NV4097_SET_ATTRIB_UCLIP1 = 0x00001fd4 >> 2,
NV4097_INVALIDATE_L2 = 0x00001fd8 >> 2,
NV4097_SET_REDUCE_DST_COLOR = 0x00001fe0 >> 2,
NV4097_SET_NO_PARANOID_TEXTURE_FETCHES = 0x00001fe8 >> 2,
NV4097_SET_SHADER_PACKER = 0x00001fec >> 2,
NV4097_SET_VERTEX_ATTRIB_INPUT_MASK = 0x00001ff0 >> 2,
NV4097_SET_VERTEX_ATTRIB_OUTPUT_MASK = 0x00001ff4 >> 2,
NV4097_SET_TRANSFORM_BRANCH_BITS = 0x00001ff8 >> 2,
// NV03_MEMORY_TO_MEMORY_FORMAT (NV0039)
NV0039_SET_OBJECT = 0x00002000 >> 2,
NV0039_SET_CONTEXT_DMA_NOTIFIES = 0x00002180 >> 2,
NV0039_SET_CONTEXT_DMA_BUFFER_IN = 0x00002184 >> 2,
NV0039_SET_CONTEXT_DMA_BUFFER_OUT = 0x00002188 >> 2,
NV0039_OFFSET_IN = 0x0000230C >> 2,
NV0039_OFFSET_OUT = 0x00002310 >> 2,
NV0039_PITCH_IN = 0x00002314 >> 2,
NV0039_PITCH_OUT = 0x00002318 >> 2,
NV0039_LINE_LENGTH_IN = 0x0000231C >> 2,
NV0039_LINE_COUNT = 0x00002320 >> 2,
NV0039_FORMAT = 0x00002324 >> 2,
NV0039_BUFFER_NOTIFY = 0x00002328 >> 2,
// NV30_CONTEXT_SURFACES_2D (NV3062)
NV3062_SET_OBJECT = 0x00006000 >> 2,
NV3062_SET_CONTEXT_DMA_NOTIFIES = 0x00006180 >> 2,
NV3062_SET_CONTEXT_DMA_IMAGE_SOURCE = 0x00006184 >> 2,
NV3062_SET_CONTEXT_DMA_IMAGE_DESTIN = 0x00006188 >> 2,
NV3062_SET_COLOR_FORMAT = 0x00006300 >> 2,
NV3062_SET_PITCH = 0x00006304 >> 2,
NV3062_SET_OFFSET_SOURCE = 0x00006308 >> 2,
NV3062_SET_OFFSET_DESTIN = 0x0000630C >> 2,
// NV30_CONTEXT_SURFACE_SWIZZLED (NV309E)
NV309E_SET_OBJECT = 0x00008000 >> 2,
NV309E_SET_CONTEXT_DMA_NOTIFIES = 0x00008180 >> 2,
NV309E_SET_CONTEXT_DMA_IMAGE = 0x00008184 >> 2,
NV309E_SET_FORMAT = 0x00008300 >> 2,
NV309E_SET_OFFSET = 0x00008304 >> 2,
// NV30_IMAGE_FROM_CPU (NV308A)
NV308A_SET_OBJECT = 0x0000A000 >> 2,
NV308A_SET_CONTEXT_DMA_NOTIFIES = 0x0000A180 >> 2,
NV308A_SET_CONTEXT_COLOR_KEY = 0x0000A184 >> 2,
NV308A_SET_CONTEXT_CLIP_RECTANGLE = 0x0000A188 >> 2,
NV308A_SET_CONTEXT_PATTERN = 0x0000A18C >> 2,
NV308A_SET_CONTEXT_ROP = 0x0000A190 >> 2,
NV308A_SET_CONTEXT_BETA1 = 0x0000A194 >> 2,
NV308A_SET_CONTEXT_BETA4 = 0x0000A198 >> 2,
NV308A_SET_CONTEXT_SURFACE = 0x0000A19C >> 2,
NV308A_SET_COLOR_CONVERSION = 0x0000A2F8 >> 2,
NV308A_SET_OPERATION = 0x0000A2FC >> 2,
NV308A_SET_COLOR_FORMAT = 0x0000A300 >> 2,
NV308A_POINT = 0x0000A304 >> 2,
NV308A_SIZE_OUT = 0x0000A308 >> 2,
NV308A_SIZE_IN = 0x0000A30C >> 2,
NV308A_COLOR = 0x0000A400 >> 2,
// NV30_SCALED_IMAGE_FROM_MEMORY (NV3089)
NV3089_SET_OBJECT = 0x0000C000 >> 2,
NV3089_SET_CONTEXT_DMA_NOTIFIES = 0x0000C180 >> 2,
NV3089_SET_CONTEXT_DMA_IMAGE = 0x0000C184 >> 2,
NV3089_SET_CONTEXT_PATTERN = 0x0000C188 >> 2,
NV3089_SET_CONTEXT_ROP = 0x0000C18C >> 2,
NV3089_SET_CONTEXT_BETA1 = 0x0000C190 >> 2,
NV3089_SET_CONTEXT_BETA4 = 0x0000C194 >> 2,
NV3089_SET_CONTEXT_SURFACE = 0x0000C198 >> 2,
NV3089_SET_COLOR_CONVERSION = 0x0000C2FC >> 2,
NV3089_SET_COLOR_FORMAT = 0x0000C300 >> 2,
NV3089_SET_OPERATION = 0x0000C304 >> 2,
NV3089_CLIP_POINT = 0x0000C308 >> 2,
NV3089_CLIP_SIZE = 0x0000C30C >> 2,
NV3089_IMAGE_OUT_POINT = 0x0000C310 >> 2,
NV3089_IMAGE_OUT_SIZE = 0x0000C314 >> 2,
NV3089_DS_DX = 0x0000C318 >> 2,
NV3089_DT_DY = 0x0000C31C >> 2,
NV3089_IMAGE_IN_SIZE = 0x0000C400 >> 2,
NV3089_IMAGE_IN_FORMAT = 0x0000C404 >> 2,
NV3089_IMAGE_IN_OFFSET = 0x0000C408 >> 2,
NV3089_IMAGE_IN = 0x0000C40C >> 2,
//lv1 hypervisor commands
GCM_SET_DRIVER_OBJECT = 0x0000E000 >> 2,
GCM_FLIP_HEAD = 0X0000E920 >> 2, //0xE920:0xE924: Flip head 0 or 1
GCM_DRIVER_QUEUE = 0X0000E940 >> 2, //0XE940:0xE95C: First two indices prepare display buffers, rest unknown
GCM_SET_USER_COMMAND = 0x0000EB00 >> 2, //0xEB00:0xEB04: User interrupt
GCM_FLIP_COMMAND = 0x0000FEAC >> 2
};
enum Method : u32
{
/*
CELL_GCM_METHOD_FLAG_NON_INCREMENT = 0x40000000,
CELL_GCM_METHOD_FLAG_JUMP = 0x20000000,
CELL_GCM_METHOD_FLAG_CALL = 0x00000002,
CELL_GCM_METHOD_FLAG_RETURN = 0x00020000,
*/
RSX_METHOD_OLD_JUMP_CMD_MASK = 0xe0000003,
RSX_METHOD_OLD_JUMP_CMD = 0x20000000,
RSX_METHOD_OLD_JUMP_OFFSET_MASK = 0x1ffffffc,
RSX_METHOD_INCREMENT_CMD_MASK = 0xe0030003,
RSX_METHOD_INCREMENT_CMD = 0,
RSX_METHOD_NON_INCREMENT_CMD_MASK = 0xe0030003,
RSX_METHOD_NON_INCREMENT_CMD = 0x40000000,
RSX_METHOD_COUNT_MASK = 0x1ffc0000,
RSX_METHOD_COUNT_SHIFT = 18,
RSX_METHOD_METHOD_MASK = 0x0000fffc,
RSX_METHOD_NEW_JUMP_CMD_MASK = 0xe0000003,
RSX_METHOD_NEW_JUMP_CMD = 0x00000001,
RSX_METHOD_NEW_JUMP_OFFSET_MASK = 0xfffffffc,
RSX_METHOD_CALL_CMD_MASK = 0x00000003,
RSX_METHOD_CALL_CMD = 0x00000002,
RSX_METHOD_CALL_OFFSET_MASK = 0x1ffffffc,
RSX_METHOD_NON_METHOD_CMD_MASK = 0xa0030003,
RSX_METHOD_RETURN_CMD = 0x00020000,
RSX_METHOD_RETURN_MASK = 0xffff0003,
RSX_METHOD_NOP_CMD = 0x00000000,
RSX_METHOD_NOP_MASK = 0xbfff0003,
// Stack is empty (invalid value)
RSX_CALL_STACK_EMPTY = 0x00000003,
};
// Fog
enum
{
CELL_GCM_FOG_MODE_LINEAR = 0x2601,
CELL_GCM_FOG_MODE_EXP = 0x0800,
CELL_GCM_FOG_MODE_EXP2 = 0x0801,
CELL_GCM_FOG_MODE_EXP_ABS = 0x0802,
CELL_GCM_FOG_MODE_EXP2_ABS = 0x0803,
CELL_GCM_FOG_MODE_LINEAR_ABS = 0x0804,
};
// ISO
enum
{
CELL_GCM_TEXTURE_ISO_LOW = 0,
CELL_GCM_TEXTURE_ISO_HIGH = 1,
CELL_GCM_TEXTURE_ANISO_LOW = 0,
CELL_GCM_TEXTURE_ANISO_HIGH = 1,
};
// Depth format
enum
{
CELL_GCM_DEPTH_FORMAT_FIXED = 0,
CELL_GCM_DEPTH_FORMAT_FLOAT = 1,
};
// Surface clear bitfields (aggregates)
enum
{
RSX_GCM_CLEAR_DEPTH_BIT = 0x01,
RSX_GCM_CLEAR_STENCIL_BIT = 0x02,
RSX_GCM_CLEAR_RED_BIT = 0x10,
RSX_GCM_CLEAR_GREEN_BIT = 0x20,
RSX_GCM_CLEAR_BLUE_BIT = 0x40,
RSX_GCM_CLEAR_ALPHA_BIT = 0x80,
RSX_GCM_CLEAR_COLOR_RG_MASK = (RSX_GCM_CLEAR_RED_BIT | RSX_GCM_CLEAR_GREEN_BIT),
RSX_GCM_CLEAR_COLOR_RGB_MASK = (RSX_GCM_CLEAR_RED_BIT | RSX_GCM_CLEAR_GREEN_BIT | RSX_GCM_CLEAR_BLUE_BIT),
RSX_GCM_CLEAR_COLOR_RGBA_MASK = (RSX_GCM_CLEAR_COLOR_RGB_MASK | RSX_GCM_CLEAR_ALPHA_BIT),
RSX_GCM_CLEAR_DEPTH_STENCIL_MASK = (RSX_GCM_CLEAR_DEPTH_BIT | RSX_GCM_CLEAR_STENCIL_BIT),
RSX_GCM_CLEAR_ANY_MASK = (RSX_GCM_CLEAR_COLOR_RGBA_MASK | RSX_GCM_CLEAR_DEPTH_STENCIL_MASK)
};
enum
{
// Surface Target
CELL_GCM_SURFACE_TARGET_NONE = 0,
CELL_GCM_SURFACE_TARGET_0 = 1,
CELL_GCM_SURFACE_TARGET_1 = 2,
CELL_GCM_SURFACE_TARGET_MRT1 = 0x13,
CELL_GCM_SURFACE_TARGET_MRT2 = 0x17,
CELL_GCM_SURFACE_TARGET_MRT3 = 0x1f,
// Surface Depth
CELL_GCM_SURFACE_Z16 = 1,
CELL_GCM_SURFACE_Z24S8 = 2,
// Surface Antialias
CELL_GCM_SURFACE_CENTER_1 = 0,
CELL_GCM_SURFACE_DIAGONAL_CENTERED_2 = 3,
CELL_GCM_SURFACE_SQUARE_CENTERED_4 = 4,
CELL_GCM_SURFACE_SQUARE_ROTATED_4 = 5,
// Surface format
CELL_GCM_SURFACE_X1R5G5B5_Z1R5G5B5 = 1,
CELL_GCM_SURFACE_X1R5G5B5_O1R5G5B5 = 2,
CELL_GCM_SURFACE_R5G6B5 = 3,
CELL_GCM_SURFACE_X8R8G8B8_Z8R8G8B8 = 4,
CELL_GCM_SURFACE_X8R8G8B8_O8R8G8B8 = 5,
CELL_GCM_SURFACE_A8R8G8B8 = 8,
CELL_GCM_SURFACE_B8 = 9,
CELL_GCM_SURFACE_G8B8 = 10,
CELL_GCM_SURFACE_F_W16Z16Y16X16 = 11,
CELL_GCM_SURFACE_F_W32Z32Y32X32 = 12,
CELL_GCM_SURFACE_F_X32 = 13,
CELL_GCM_SURFACE_X8B8G8R8_Z8B8G8R8 = 14,
CELL_GCM_SURFACE_X8B8G8R8_O8B8G8R8 = 15,
CELL_GCM_SURFACE_A8B8G8R8 = 16,
// Wrap
CELL_GCM_TEXTURE_WRAP = 1,
CELL_GCM_TEXTURE_MIRROR = 2,
CELL_GCM_TEXTURE_CLAMP_TO_EDGE = 3,
CELL_GCM_TEXTURE_BORDER = 4,
CELL_GCM_TEXTURE_CLAMP = 5,
CELL_GCM_TEXTURE_MIRROR_ONCE_CLAMP_TO_EDGE = 6,
CELL_GCM_TEXTURE_MIRROR_ONCE_BORDER = 7,
CELL_GCM_TEXTURE_MIRROR_ONCE_CLAMP = 8,
// Max Anisotropy
CELL_GCM_TEXTURE_MAX_ANISO_1 = 0,
CELL_GCM_TEXTURE_MAX_ANISO_2 = 1,
CELL_GCM_TEXTURE_MAX_ANISO_4 = 2,
CELL_GCM_TEXTURE_MAX_ANISO_6 = 3,
CELL_GCM_TEXTURE_MAX_ANISO_8 = 4,
CELL_GCM_TEXTURE_MAX_ANISO_10 = 5,
CELL_GCM_TEXTURE_MAX_ANISO_12 = 6,
CELL_GCM_TEXTURE_MAX_ANISO_16 = 7,
// Texture Filter
CELL_GCM_TEXTURE_NEAREST = 1,
CELL_GCM_TEXTURE_LINEAR = 2,
CELL_GCM_TEXTURE_NEAREST_NEAREST = 3,
CELL_GCM_TEXTURE_LINEAR_NEAREST = 4,
CELL_GCM_TEXTURE_NEAREST_LINEAR = 5,
CELL_GCM_TEXTURE_LINEAR_LINEAR = 6,
CELL_GCM_TEXTURE_CONVOLUTION_MIN = 7,
CELL_GCM_TEXTURE_CONVOLUTION_MAG = 4,
CELL_GCM_TEXTURE_CONVOLUTION_QUINCUNX = 1,
CELL_GCM_TEXTURE_CONVOLUTION_GAUSSIAN = 2,
CELL_GCM_TEXTURE_CONVOLUTION_QUINCUNX_ALT = 3,
};
enum
{
CELL_GCM_CLEAR = 0x1500,
CELL_GCM_AND = 0x1501,
CELL_GCM_AND_REVERSE = 0x1502,
CELL_GCM_COPY = 0x1503,
CELL_GCM_AND_INVERTED = 0x1504,
CELL_GCM_NOOP = 0x1505,
CELL_GCM_XOR = 0x1506,
CELL_GCM_OR = 0x1507,
CELL_GCM_NOR = 0x1508,
CELL_GCM_EQUIV = 0x1509,
CELL_GCM_OR_REVERSE = 0x150B,
CELL_GCM_COPY_INVERTED = 0x150C,
CELL_GCM_OR_INVERTED = 0x150D,
CELL_GCM_NAND = 0x150E,
CELL_GCM_SET = 0x150F,
};
enum
{
CELL_GCM_TRANSFER_ORIGIN_CENTER = 1,
CELL_GCM_TRANSFER_ORIGIN_CORNER = 2,
CELL_GCM_TRANSFER_INTERPOLATOR_ZOH = 0,
CELL_GCM_TRANSFER_INTERPOLATOR_FOH = 1,
};
enum
{
CELL_GCM_TRANSFER_OPERATION_SRCCOPY_AND = 0,
CELL_GCM_TRANSFER_OPERATION_ROP_AND = 1,
CELL_GCM_TRANSFER_OPERATION_BLEND_AND = 2,
CELL_GCM_TRANSFER_OPERATION_SRCCOPY = 3,
CELL_GCM_TRANSFER_OPERATION_SRCCOPY_PREMULT = 4,
CELL_GCM_TRANSFER_OPERATION_BLEND_PREMULT = 5,
};
enum
{
CELL_GCM_TRANSFER_CONVERSION_DITHER = 0,
CELL_GCM_TRANSFER_CONVERSION_TRUNCATE = 1,
CELL_GCM_TRANSFER_CONVERSION_SUBTRACT_TRUNCATE = 2,
};
enum
{
CELL_GCM_TRANSFER_SCALE_FORMAT_A1R5G5B5 = 1,
CELL_GCM_TRANSFER_SCALE_FORMAT_X1R5G5B5 = 2,
CELL_GCM_TRANSFER_SCALE_FORMAT_A8R8G8B8 = 3,
CELL_GCM_TRANSFER_SCALE_FORMAT_X8R8G8B8 = 4,
CELL_GCM_TRANSFER_SCALE_FORMAT_CR8YB8CB8YA8 = 5,
CELL_GCM_TRANSFER_SCALE_FORMAT_YB8CR8YA8CB8 = 6,
CELL_GCM_TRANSFER_SCALE_FORMAT_R5G6B5 = 7,
CELL_GCM_TRANSFER_SCALE_FORMAT_Y8 = 8,
CELL_GCM_TRANSFER_SCALE_FORMAT_AY8 = 9,
CELL_GCM_TRANSFER_SCALE_FORMAT_EYB8ECR8EYA8ECB8 = 10,
CELL_GCM_TRANSFER_SCALE_FORMAT_ECR8EYB8ECB8EYA8 = 11,
CELL_GCM_TRANSFER_SCALE_FORMAT_A8B8G8R8 = 12,
CELL_GCM_TRANSFER_SCALE_FORMAT_X8B8G8R8 = 13,
};
enum
{
// Destination Format conversions
CELL_GCM_TRANSFER_SURFACE_FORMAT_R5G6B5 = 4,
CELL_GCM_TRANSFER_SURFACE_FORMAT_A8R8G8B8 = 10,
CELL_GCM_TRANSFER_SURFACE_FORMAT_Y32 = 11,
};
enum
{
CELL_GCM_TRANSFER_SURFACE = 0,
CELL_GCM_TRANSFER_SWIZZLE = 1,
};
enum
{
CELL_GCM_SHIFT_SET_SHADER_CONTROL_CONTROL_TXP = 15,
CELL_GCM_MASK_SET_SHADER_CONTROL_CONTROL_TXP = 0x00008000,
CELL_GCM_IOMAP_FLAG_STRICT_ORDERING = 1 << 1,
};
enum
{
CELL_GCM_CONTEXT_SURFACE2D = 0x313371C3,
CELL_GCM_CONTEXT_SWIZZLE2D = 0x31337A73,
};
enum
{
CELL_GCM_INDEX_RANGE_LABEL_MIN = 64,
CELL_GCM_INDEX_RANGE_LABEL_MAX = 255,
CELL_GCM_INDEX_RANGE_LABEL_COUNT = (256 - 64),
CELL_GCM_INDEX_RANGE_NOTIFY_MAIN_MIN = 0,
CELL_GCM_INDEX_RANGE_NOTIFY_MAIN_MAX = 255,
CELL_GCM_INDEX_RANGE_NOTIFY_MAIN_COUNT = 256,
CELL_GCM_INDEX_RANGE_REPORT_MAIN_MIN = 0,
CELL_GCM_INDEX_RANGE_REPORT_MAIN_MAX = (1024 * 1024 - 1),
CELL_GCM_INDEX_RANGE_REPORT_MAIN_COUNT = (1024 * 1024),
CELL_GCM_INDEX_RANGE_REPORT_LOCAL_MIN = 0,
CELL_GCM_INDEX_RANGE_REPORT_LOCAL_MAX = 2047,
CELL_GCM_INDEX_RANGE_REPORT_LOCAL_COUNT = 2048,
CELL_GCM_INDEX_RANGE_TILE_MIN = 0,
CELL_GCM_INDEX_RANGE_TILE_MAX = 14,
CELL_GCM_INDEX_RANGE_TILE_COUNT = 15,
CELL_GCM_INDEX_RANGE_ZCULL_MIN = 0,
CELL_GCM_INDEX_RANGE_ZCULL_MAX = 7,
CELL_GCM_INDEX_RANGE_ZCULL_COUNT = 8,
};
enum
{
CELL_GCM_DISPLAY_FIELD_TOP = 1,
CELL_GCM_DISPLAY_FIELD_BOTTOM = 0,
};
enum
{
CELL_GCM_USER_CLIP_PLANE_DISABLE = 0,
CELL_GCM_USER_CLIP_PLANE_ENABLE_LT = 1,
CELL_GCM_USER_CLIP_PLANE_ENABLE_GE = 2,
};
enum
{
CELL_GCM_FLAT = 0x1D00,
CELL_GCM_SMOOTH = 0x1D01,
};
enum
{
CELL_GCM_POLYGON_MODE_POINT = 0x1B00,
CELL_GCM_POLYGON_MODE_LINE = 0x1B01,
CELL_GCM_POLYGON_MODE_FILL = 0x1B02,
};
enum
{
CELL_GCM_CLEAR_Z = 1 << 0,
CELL_GCM_CLEAR_S = 1 << 1,
CELL_GCM_CLEAR_R = 1 << 4,
CELL_GCM_CLEAR_G = 1 << 5,
CELL_GCM_CLEAR_B = 1 << 6,
CELL_GCM_CLEAR_A = 1 << 7,
CELL_GCM_CLEAR_M = 0xf3
};
enum
{
CELL_GCM_VERTEX_S1 = 1,
CELL_GCM_VERTEX_F = 2,
CELL_GCM_VERTEX_SF = 3,
CELL_GCM_VERTEX_UB = 4,
CELL_GCM_VERTEX_S32K = 5,
CELL_GCM_VERTEX_CMP = 6,
CELL_GCM_VERTEX_UB256 = 7,
CELL_GCM_VERTEX_S16_NR = 1,
CELL_GCM_VERTEX_F32 = 2,
CELL_GCM_VERTEX_F16 = 3,
CELL_GCM_VERTEX_U8_NR = 4,
CELL_GCM_VERTEX_S16_UN = 5,
CELL_GCM_VERTEX_S11_11_10_NR = 6,
CELL_GCM_VERTEX_U8_UN = 7,
};
enum
{
CELL_GCM_WINDOW_ORIGIN_TOP = 0,
CELL_GCM_WINDOW_ORIGIN_BOTTOM = 1,
CELL_GCM_WINDOW_PIXEL_CENTER_HALF = 0,
CELL_GCM_WINDOW_PIXEL_CENTER_INTEGER = 1,
};
enum
{
RSX_VERTEX_BASE_TYPE_UNDEFINED = 0,
RSX_VERTEX_BASE_TYPE_SNORM16,
RSX_VERTEX_BASE_TYPE_FLOAT,
RSX_VERTEX_BASE_TYPE_HALF_FLOAT,
RSX_VERTEX_BASE_TYPE_UNORM8,
RSX_VERTEX_BASE_TYPE_SINT16,
RSX_VERTEX_BASE_TYPE_CMP32,
RSX_VERTEX_BASE_TYPE_UINT8,
};
}
// Public export
// TODO: Don't leak namespaces
using namespace gcm;
namespace rsx
{
template <typename T, u32 min_val, u32 max_val>
expected<T> gcm_enum_cast(u32 value)
{
if (value >= min_val && value <= max_val)
{
return static_cast<T>(value);
}
return exception_utils::soft_exception_t{ rsx::exception_utils::invalid_enum };
}
template <typename T>
expected<T> gcm_enum_cast(u32 value, std::initializer_list<u32> allowed)
{
for (const auto v : allowed)
{
if (value == v)
{
return static_cast<T>(value);
}
}
return exception_utils::soft_exception_t{ rsx::exception_utils::invalid_enum };
}
template <typename T>
expected<T> gcm_enum_cast(u32 value, std::initializer_list<const std::array<u32, 2>> allowed)
{
for (const auto& range : allowed)
{
if (value >= range[0] && value <= range[1])
{
return static_cast<T>(value);
}
}
return exception_utils::soft_exception_t{ rsx::exception_utils::invalid_enum };
}
enum class vertex_base_type : u8
{
s1 = RSX_VERTEX_BASE_TYPE_SNORM16, ///< signed normalized 16-bit int
f = RSX_VERTEX_BASE_TYPE_FLOAT, ///< float
sf = RSX_VERTEX_BASE_TYPE_HALF_FLOAT, ///< half float
ub = RSX_VERTEX_BASE_TYPE_UNORM8, ///< unsigned byte interpreted as 0.f and 1.f
s32k = RSX_VERTEX_BASE_TYPE_SINT16, ///< signed 16bits int
cmp = RSX_VERTEX_BASE_TYPE_CMP32, ///< compressed aka X11G11Z10 and always 1. W.
ub256 = RSX_VERTEX_BASE_TYPE_UINT8, ///< unsigned byte interpreted as between 0 and 255.
};
static inline auto to_vertex_base_type(u32 in)
{
return in
? gcm_enum_cast<
vertex_base_type,
RSX_VERTEX_BASE_TYPE_SNORM16,
RSX_VERTEX_BASE_TYPE_UINT8>(in)
: expected(vertex_base_type::ub256);
}
enum class index_array_type : u8
{
u32 = CELL_GCM_DRAW_INDEX_ARRAY_TYPE_32,
u16 = CELL_GCM_DRAW_INDEX_ARRAY_TYPE_16
};
enum class primitive_type : u8
{
points = CELL_GCM_PRIMITIVE_POINTS,
lines = CELL_GCM_PRIMITIVE_LINES,
line_loop = CELL_GCM_PRIMITIVE_LINE_LOOP, // line strip with last end being joined with first end.
line_strip = CELL_GCM_PRIMITIVE_LINE_STRIP,
triangles = CELL_GCM_PRIMITIVE_TRIANGLES,
triangle_strip = CELL_GCM_PRIMITIVE_TRIANGLE_STRIP,
triangle_fan = CELL_GCM_PRIMITIVE_TRIANGLE_FAN, // like strip except that every triangle share the first vertex and one instead of 2 from previous triangle.
quads = CELL_GCM_PRIMITIVE_QUADS,
quad_strip = CELL_GCM_PRIMITIVE_QUAD_STRIP,
polygon = CELL_GCM_PRIMITIVE_POLYGON, // convex polygon
};
static inline auto to_primitive_type(u32 in)
{
return gcm_enum_cast<
primitive_type,
CELL_GCM_PRIMITIVE_POINTS,
CELL_GCM_PRIMITIVE_POLYGON>(in);
}
enum class surface_target : u8
{
none = CELL_GCM_SURFACE_TARGET_NONE,
surface_a = CELL_GCM_SURFACE_TARGET_0,
surface_b = CELL_GCM_SURFACE_TARGET_1,
surfaces_a_b = CELL_GCM_SURFACE_TARGET_MRT1,
surfaces_a_b_c = CELL_GCM_SURFACE_TARGET_MRT2,
surfaces_a_b_c_d = CELL_GCM_SURFACE_TARGET_MRT3,
};
static inline auto to_surface_target(u32 in)
{
return gcm_enum_cast<surface_target>(in, {
CELL_GCM_SURFACE_TARGET_0,
CELL_GCM_SURFACE_TARGET_MRT1,
CELL_GCM_SURFACE_TARGET_NONE,
CELL_GCM_SURFACE_TARGET_1,
CELL_GCM_SURFACE_TARGET_MRT2,
CELL_GCM_SURFACE_TARGET_MRT3
});
}
enum class surface_depth_format : u8
{
z16 = CELL_GCM_SURFACE_Z16, // typeless 16 bits depth
z24s8 = CELL_GCM_SURFACE_Z24S8, // typeless 24 bits depth + 8 bits stencil
};
enum class surface_depth_format2 : u8
{
z16_uint = CELL_GCM_SURFACE_Z16, // unsigned 16 bits depth
z24s8_uint = CELL_GCM_SURFACE_Z24S8, // unsigned 24 bits depth + 8 bits stencil
z16_float, // floating point 16 bits depth
z24s8_float, // floating point 24 bits depth + 8 bits stencil
};
static inline auto to_surface_depth_format(u32 in)
{
return gcm_enum_cast<
surface_depth_format,
CELL_GCM_SURFACE_Z16,
CELL_GCM_SURFACE_Z24S8>(in);
}
constexpr bool operator ==(surface_depth_format2 rhs, surface_depth_format lhs)
{
switch (lhs)
{
case surface_depth_format::z16:
return (rhs == surface_depth_format2::z16_uint || rhs == surface_depth_format2::z16_float);
case surface_depth_format::z24s8:
return (rhs == surface_depth_format2::z24s8_uint || rhs == surface_depth_format2::z24s8_float);
[[unlikely]] default:
return false;
}
}
enum class surface_raster_type : u8
{
undefined = CELL_GCM_ZERO, // TODO: Drop this (used in surface cache for optional args)
linear = CELL_GCM_SURFACE_PITCH,
swizzle = CELL_GCM_SURFACE_SWIZZLE,
};
static inline auto to_surface_raster_type(u32 in)
{
return gcm_enum_cast<
surface_raster_type,
CELL_GCM_SURFACE_PITCH,
CELL_GCM_SURFACE_SWIZZLE>(in);
}
enum class surface_antialiasing : u8
{
center_1_sample = CELL_GCM_SURFACE_CENTER_1,
diagonal_centered_2_samples = CELL_GCM_SURFACE_DIAGONAL_CENTERED_2,
square_centered_4_samples = CELL_GCM_SURFACE_SQUARE_CENTERED_4,
square_rotated_4_samples = CELL_GCM_SURFACE_SQUARE_ROTATED_4,
};
static inline auto to_surface_antialiasing(u32 in)
{
return gcm_enum_cast<surface_antialiasing>(in,
{
{ CELL_GCM_SURFACE_CENTER_1, CELL_GCM_SURFACE_CENTER_1 },
{ CELL_GCM_SURFACE_DIAGONAL_CENTERED_2, CELL_GCM_SURFACE_SQUARE_ROTATED_4 }
});
}
enum class surface_color_format : u8
{
x1r5g5b5_z1r5g5b5 = CELL_GCM_SURFACE_X1R5G5B5_Z1R5G5B5,
x1r5g5b5_o1r5g5b5 = CELL_GCM_SURFACE_X1R5G5B5_O1R5G5B5,
r5g6b5 = CELL_GCM_SURFACE_R5G6B5,
x8r8g8b8_z8r8g8b8 = CELL_GCM_SURFACE_X8R8G8B8_Z8R8G8B8,
x8r8g8b8_o8r8g8b8 = CELL_GCM_SURFACE_X8R8G8B8_O8R8G8B8,
a8r8g8b8 = CELL_GCM_SURFACE_A8R8G8B8,
b8 = CELL_GCM_SURFACE_B8,
g8b8 = CELL_GCM_SURFACE_G8B8,
x8b8g8r8_z8b8g8r8 = CELL_GCM_SURFACE_X8B8G8R8_Z8B8G8R8,
x8b8g8r8_o8b8g8r8 = CELL_GCM_SURFACE_X8B8G8R8_O8B8G8R8,
a8b8g8r8 = CELL_GCM_SURFACE_A8B8G8R8,
w16z16y16x16 = CELL_GCM_SURFACE_F_W16Z16Y16X16,
w32z32y32x32 = CELL_GCM_SURFACE_F_W32Z32Y32X32,
x32 = CELL_GCM_SURFACE_F_X32
};
static inline auto to_surface_color_format(u32 in)
{
return gcm_enum_cast<
surface_color_format,
CELL_GCM_SURFACE_X1R5G5B5_Z1R5G5B5,
CELL_GCM_SURFACE_A8B8G8R8>(in);
}
enum class window_origin : u8
{
top = CELL_GCM_WINDOW_ORIGIN_TOP,
bottom = CELL_GCM_WINDOW_ORIGIN_BOTTOM
};
static inline auto to_window_origin(u32 in)
{
return gcm_enum_cast<
window_origin,
CELL_GCM_WINDOW_ORIGIN_TOP,
CELL_GCM_WINDOW_ORIGIN_BOTTOM>(in);
}
enum class window_pixel_center : u8
{
half = CELL_GCM_WINDOW_PIXEL_CENTER_HALF,
integer = CELL_GCM_WINDOW_PIXEL_CENTER_INTEGER
};
static inline auto to_window_pixel_center(u32 in)
{
return gcm_enum_cast<
window_pixel_center,
CELL_GCM_WINDOW_PIXEL_CENTER_HALF,
CELL_GCM_WINDOW_PIXEL_CENTER_INTEGER>(in);
}
enum class comparison_function : u16
{
never = CELL_GCM_NEVER,
less = CELL_GCM_LESS,
equal = CELL_GCM_EQUAL,
less_or_equal = CELL_GCM_LEQUAL,
greater = CELL_GCM_GREATER,
not_equal = CELL_GCM_NOTEQUAL,
greater_or_equal = CELL_GCM_GEQUAL,
always = CELL_GCM_ALWAYS,
};
static inline auto to_comparison_function(u32 in)
{
return gcm_enum_cast<
comparison_function,
CELL_GCM_NEVER,
CELL_GCM_ALWAYS>(in | 0x200);
}
enum class fog_mode : u16
{
linear = CELL_GCM_FOG_MODE_LINEAR,
exponential = CELL_GCM_FOG_MODE_EXP,
exponential2 = CELL_GCM_FOG_MODE_EXP2,
exponential_abs = CELL_GCM_FOG_MODE_EXP_ABS,
exponential2_abs = CELL_GCM_FOG_MODE_EXP2_ABS,
linear_abs = CELL_GCM_FOG_MODE_LINEAR_ABS
};
static inline auto to_fog_mode(u32 in)
{
if (in == CELL_GCM_FOG_MODE_LINEAR)
{
return expected(fog_mode::linear);
}
return gcm_enum_cast<
fog_mode,
CELL_GCM_FOG_MODE_EXP,
CELL_GCM_FOG_MODE_LINEAR_ABS>(in);
}
/**
* Use an extra cubemap format
*/
enum class texture_dimension_extended : u8
{
texture_dimension_1d = 0,
texture_dimension_2d = 1,
texture_dimension_cubemap = 2,
texture_dimension_3d = 3,
};
enum class texture_dimension : u8
{
dimension1d = 1,
dimension2d = 2,
dimension3d = 3,
};
static inline auto to_texture_dimension(u32 in)
{
return gcm_enum_cast<texture_dimension, 1, 3>(in);
}
enum class texture_wrap_mode : u8
{
wrap = CELL_GCM_TEXTURE_WRAP,
mirror = CELL_GCM_TEXTURE_MIRROR,
clamp_to_edge = CELL_GCM_TEXTURE_CLAMP_TO_EDGE,
border = CELL_GCM_TEXTURE_BORDER,
clamp = CELL_GCM_TEXTURE_CLAMP,
mirror_once_clamp_to_edge = CELL_GCM_TEXTURE_MIRROR_ONCE_CLAMP_TO_EDGE,
mirror_once_border = CELL_GCM_TEXTURE_MIRROR_ONCE_BORDER,
mirror_once_clamp = CELL_GCM_TEXTURE_MIRROR_ONCE_CLAMP,
};
static inline auto to_texture_wrap_mode(u32 in)
{
return gcm_enum_cast<
texture_wrap_mode,
CELL_GCM_TEXTURE_WRAP,
CELL_GCM_TEXTURE_MIRROR_ONCE_CLAMP>(in);
}
enum class texture_max_anisotropy : u8
{
x1 = CELL_GCM_TEXTURE_MAX_ANISO_1,
x2 = CELL_GCM_TEXTURE_MAX_ANISO_2,
x4 = CELL_GCM_TEXTURE_MAX_ANISO_4,
x6 = CELL_GCM_TEXTURE_MAX_ANISO_6,
x8 = CELL_GCM_TEXTURE_MAX_ANISO_8,
x10 = CELL_GCM_TEXTURE_MAX_ANISO_10,
x12 = CELL_GCM_TEXTURE_MAX_ANISO_12,
x16 = CELL_GCM_TEXTURE_MAX_ANISO_16,
};
static inline auto to_texture_max_anisotropy(u32 in)
{
return gcm_enum_cast<
texture_max_anisotropy,
CELL_GCM_TEXTURE_MAX_ANISO_1,
CELL_GCM_TEXTURE_MAX_ANISO_16>(in);
}
enum class texture_minify_filter : u8
{
nearest = CELL_GCM_TEXTURE_NEAREST, ///< no filtering, mipmap base level
linear = CELL_GCM_TEXTURE_LINEAR, ///< linear filtering, mipmap base level
nearest_nearest = CELL_GCM_TEXTURE_NEAREST_NEAREST, ///< no filtering, closest mipmap level
linear_nearest = CELL_GCM_TEXTURE_LINEAR_NEAREST, ///< linear filtering, closest mipmap level
nearest_linear = CELL_GCM_TEXTURE_NEAREST_LINEAR, ///< no filtering, linear mix between closest mipmap levels
linear_linear = CELL_GCM_TEXTURE_LINEAR_LINEAR, ///< linear filtering, linear mix between closest mipmap levels
convolution_min = CELL_GCM_TEXTURE_CONVOLUTION_MIN, ///< Unknown mode but looks close to linear_linear
};
static inline auto to_texture_minify_filter(u32 in)
{
return gcm_enum_cast<
texture_minify_filter,
CELL_GCM_TEXTURE_NEAREST,
CELL_GCM_TEXTURE_CONVOLUTION_MIN>(in);
}
enum class texture_magnify_filter : u8
{
nearest = CELL_GCM_TEXTURE_NEAREST, ///< no filtering
linear = CELL_GCM_TEXTURE_LINEAR, ///< linear filtering
convolution_mag = CELL_GCM_TEXTURE_CONVOLUTION_MAG, ///< Unknown mode but looks close to linear
};
static inline auto to_texture_magnify_filter(u32 in)
{
return gcm_enum_cast<texture_magnify_filter>(in, { CELL_GCM_TEXTURE_LINEAR, CELL_GCM_TEXTURE_NEAREST, CELL_GCM_TEXTURE_CONVOLUTION_MAG });
}
enum class stencil_op : u16
{
keep = CELL_GCM_KEEP,
zero = CELL_GCM_ZERO,
replace = CELL_GCM_REPLACE,
incr = CELL_GCM_INCR,
decr = CELL_GCM_DECR,
invert = CELL_GCM_INVERT,
incr_wrap = CELL_GCM_INCR_WRAP,
decr_wrap = CELL_GCM_DECR_WRAP,
};
static inline auto to_stencil_op(u32 in)
{
return gcm_enum_cast<stencil_op>(in,
{
CELL_GCM_KEEP, CELL_GCM_ZERO, CELL_GCM_REPLACE,
CELL_GCM_INCR, CELL_GCM_DECR, CELL_GCM_INVERT,
CELL_GCM_INCR_WRAP, CELL_GCM_DECR_WRAP
});
}
enum class blend_equation : u16
{
add = CELL_GCM_FUNC_ADD,
min = CELL_GCM_MIN,
max = CELL_GCM_MAX,
subtract = CELL_GCM_FUNC_SUBTRACT,
reverse_subtract = CELL_GCM_FUNC_REVERSE_SUBTRACT,
reverse_subtract_signed = CELL_GCM_FUNC_REVERSE_SUBTRACT_SIGNED,
add_signed = CELL_GCM_FUNC_ADD_SIGNED,
reverse_add_signed = CELL_GCM_FUNC_REVERSE_ADD_SIGNED,
};
static inline auto to_blend_equation(u32 in)
{
return gcm_enum_cast<blend_equation>(in,
{
{ CELL_GCM_FUNC_ADD, CELL_GCM_FUNC_REVERSE_SUBTRACT },
{ CELL_GCM_FUNC_REVERSE_SUBTRACT_SIGNED, CELL_GCM_FUNC_REVERSE_ADD_SIGNED }
});
}
enum class blend_factor : u16
{
zero = CELL_GCM_ZERO,
one = CELL_GCM_ONE,
src_color = CELL_GCM_SRC_COLOR,
one_minus_src_color = CELL_GCM_ONE_MINUS_SRC_COLOR,
dst_color = CELL_GCM_DST_COLOR,
one_minus_dst_color = CELL_GCM_ONE_MINUS_DST_COLOR,
src_alpha = CELL_GCM_SRC_ALPHA,
one_minus_src_alpha = CELL_GCM_ONE_MINUS_SRC_ALPHA,
dst_alpha = CELL_GCM_DST_ALPHA,
one_minus_dst_alpha = CELL_GCM_ONE_MINUS_DST_ALPHA,
src_alpha_saturate = CELL_GCM_SRC_ALPHA_SATURATE,
constant_color = CELL_GCM_CONSTANT_COLOR,
one_minus_constant_color = CELL_GCM_ONE_MINUS_CONSTANT_COLOR,
constant_alpha = CELL_GCM_CONSTANT_ALPHA,
one_minus_constant_alpha = CELL_GCM_ONE_MINUS_CONSTANT_ALPHA,
};
static inline auto to_blend_factor(u32 in)
{
return gcm_enum_cast<blend_factor>(in,
{
{ CELL_GCM_SRC_COLOR, CELL_GCM_ONE_MINUS_CONSTANT_ALPHA },
{ CELL_GCM_ZERO, CELL_GCM_ONE }
});
}
enum class logic_op : u16
{
logic_clear = CELL_GCM_CLEAR,
logic_and = CELL_GCM_AND,
logic_and_reverse = CELL_GCM_AND_REVERSE,
logic_copy = CELL_GCM_COPY,
logic_and_inverted = CELL_GCM_AND_INVERTED,
logic_noop = CELL_GCM_NOOP,
logic_xor = CELL_GCM_XOR,
logic_or = CELL_GCM_OR,
logic_nor = CELL_GCM_NOR,
logic_equiv = CELL_GCM_EQUIV,
logic_invert = CELL_GCM_INVERT,
logic_or_reverse = CELL_GCM_OR_REVERSE,
logic_copy_inverted = CELL_GCM_COPY_INVERTED,
logic_or_inverted = CELL_GCM_OR_INVERTED,
logic_nand = CELL_GCM_NAND,
logic_set = CELL_GCM_SET,
};
static inline auto to_logic_op(u32 in)
{
return gcm_enum_cast<
logic_op,
CELL_GCM_CLEAR,
CELL_GCM_SET>(in);
}
enum class front_face : u16
{
cw = CELL_GCM_CW, /// clockwise
ccw = CELL_GCM_CCW, /// counter clockwise
};
static inline auto to_front_face(u32 in)
{
return gcm_enum_cast<
front_face,
CELL_GCM_CW,
CELL_GCM_CCW>(in);
}
enum class cull_face : u16
{
front = CELL_GCM_FRONT,
back = CELL_GCM_BACK,
front_and_back = CELL_GCM_FRONT_AND_BACK
};
static inline auto to_cull_face(u32 in)
{
return gcm_enum_cast<cull_face>(in,
{
CELL_GCM_FRONT,
CELL_GCM_BACK,
CELL_GCM_FRONT_AND_BACK
});
}
enum class user_clip_plane_op : u8
{
disable = CELL_GCM_USER_CLIP_PLANE_DISABLE,
less_than = CELL_GCM_USER_CLIP_PLANE_ENABLE_LT,
greater_or_equal = CELL_GCM_USER_CLIP_PLANE_ENABLE_GE,
};
static inline auto to_user_clip_plane_op(u32 in)
{
return gcm_enum_cast<
user_clip_plane_op,
CELL_GCM_USER_CLIP_PLANE_DISABLE,
CELL_GCM_USER_CLIP_PLANE_ENABLE_GE>(in);
}
enum class shading_mode : u16
{
smooth = CELL_GCM_SMOOTH,
flat = CELL_GCM_FLAT,
};
static inline auto to_shading_mode(u32 in)
{
return gcm_enum_cast<
shading_mode,
CELL_GCM_FLAT,
CELL_GCM_SMOOTH>(in);
}
enum class polygon_mode : u16
{
point = CELL_GCM_POLYGON_MODE_POINT,
line = CELL_GCM_POLYGON_MODE_LINE,
fill = CELL_GCM_POLYGON_MODE_FILL,
};
static inline auto to_polygon_mode(u32 in)
{
return gcm_enum_cast<
polygon_mode,
CELL_GCM_POLYGON_MODE_POINT,
CELL_GCM_POLYGON_MODE_FILL>(in);
}
namespace blit_engine
{
enum class transfer_origin : u8
{
center = CELL_GCM_TRANSFER_ORIGIN_CENTER,
corner = CELL_GCM_TRANSFER_ORIGIN_CORNER,
};
static inline auto to_transfer_origin(u32 in)
{
return gcm_enum_cast<
transfer_origin,
CELL_GCM_TRANSFER_ORIGIN_CENTER,
CELL_GCM_TRANSFER_ORIGIN_CORNER>(in);
}
enum class transfer_interpolator : u8
{
zoh = CELL_GCM_TRANSFER_INTERPOLATOR_ZOH,
foh = CELL_GCM_TRANSFER_INTERPOLATOR_FOH,
};
static inline auto to_transfer_interpolator(u32 in)
{
return gcm_enum_cast<
transfer_interpolator,
CELL_GCM_TRANSFER_INTERPOLATOR_ZOH,
CELL_GCM_TRANSFER_INTERPOLATOR_FOH>(in);
}
enum class transfer_operation : u8
{
srccopy_and = CELL_GCM_TRANSFER_OPERATION_SRCCOPY_AND,
rop_and = CELL_GCM_TRANSFER_OPERATION_ROP_AND,
blend_and = CELL_GCM_TRANSFER_OPERATION_BLEND_AND,
srccopy = CELL_GCM_TRANSFER_OPERATION_SRCCOPY,
srccopy_premult = CELL_GCM_TRANSFER_OPERATION_SRCCOPY_PREMULT,
blend_premult = CELL_GCM_TRANSFER_OPERATION_BLEND_PREMULT,
};
static inline auto to_transfer_operation(u32 in)
{
return gcm_enum_cast<
transfer_operation,
CELL_GCM_TRANSFER_OPERATION_SRCCOPY_AND,
CELL_GCM_TRANSFER_OPERATION_BLEND_PREMULT>(in);
}
enum class transfer_source_format : u8
{
a1r5g5b5 = CELL_GCM_TRANSFER_SCALE_FORMAT_A1R5G5B5,
x1r5g5b5 = CELL_GCM_TRANSFER_SCALE_FORMAT_X1R5G5B5,
a8r8g8b8 = CELL_GCM_TRANSFER_SCALE_FORMAT_A8R8G8B8,
x8r8g8b8 = CELL_GCM_TRANSFER_SCALE_FORMAT_X8R8G8B8,
cr8yb8cb8ya8 = CELL_GCM_TRANSFER_SCALE_FORMAT_CR8YB8CB8YA8,
yb8cr8ya8cb8 = CELL_GCM_TRANSFER_SCALE_FORMAT_YB8CR8YA8CB8,
r5g6b5 = CELL_GCM_TRANSFER_SCALE_FORMAT_R5G6B5,
y8 = CELL_GCM_TRANSFER_SCALE_FORMAT_Y8,
ay8 = CELL_GCM_TRANSFER_SCALE_FORMAT_AY8,
eyb8ecr8eya8ecb8 = CELL_GCM_TRANSFER_SCALE_FORMAT_EYB8ECR8EYA8ECB8,
ecr8eyb8ecb8eya8 = CELL_GCM_TRANSFER_SCALE_FORMAT_ECR8EYB8ECB8EYA8,
a8b8g8r8 = CELL_GCM_TRANSFER_SCALE_FORMAT_A8B8G8R8,
x8b8g8r8 = CELL_GCM_TRANSFER_SCALE_FORMAT_X8B8G8R8,
};
static inline auto to_transfer_source_format(u32 in)
{
return gcm_enum_cast<
transfer_source_format,
CELL_GCM_TRANSFER_SCALE_FORMAT_A1R5G5B5,
CELL_GCM_TRANSFER_SCALE_FORMAT_X8B8G8R8>(in);
}
enum class transfer_destination_format : u8
{
r5g6b5 = CELL_GCM_TRANSFER_SURFACE_FORMAT_R5G6B5,
a8r8g8b8 = CELL_GCM_TRANSFER_SURFACE_FORMAT_A8R8G8B8,
y32 = CELL_GCM_TRANSFER_SURFACE_FORMAT_Y32,
};
static inline auto to_transfer_destination_format(u32 in)
{
return gcm_enum_cast<transfer_destination_format>(in,
{
CELL_GCM_TRANSFER_SURFACE_FORMAT_A8R8G8B8,
CELL_GCM_TRANSFER_SURFACE_FORMAT_R5G6B5,
CELL_GCM_TRANSFER_SURFACE_FORMAT_Y32
});
}
enum class context_surface : u32
{
surface2d = CELL_GCM_CONTEXT_SURFACE2D,
swizzle2d = CELL_GCM_CONTEXT_SWIZZLE2D,
};
static inline auto to_context_surface(u32 in)
{
return gcm_enum_cast<context_surface>(in,
{
CELL_GCM_CONTEXT_SURFACE2D,
CELL_GCM_CONTEXT_SWIZZLE2D
});
}
enum class context_dma : u32
{
to_memory_get_report = CELL_GCM_CONTEXT_DMA_REPORT_LOCATION_LOCAL,
report_location_main = CELL_GCM_CONTEXT_DMA_REPORT_LOCATION_MAIN,
memory_host_buffer = CELL_GCM_CONTEXT_DMA_MEMORY_HOST_BUFFER,
};
static inline auto to_context_dma(u32 in)
{
return gcm_enum_cast<context_dma>(in,
{
CELL_GCM_CONTEXT_DMA_REPORT_LOCATION_LOCAL,
CELL_GCM_CONTEXT_DMA_REPORT_LOCATION_MAIN,
CELL_GCM_CONTEXT_DMA_MEMORY_HOST_BUFFER
});
}
}
}
| 58,054
|
C++
|
.h
| 1,627
| 32.673632
| 158
| 0.715277
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
5,999
|
RSXZCULL.h
|
RPCS3_rpcs3/rpcs3/Emu/RSX/RSXZCULL.h
|
#pragma once
#include <util/types.hpp>
#include <util/vm.hpp>
#include <Emu/Memory/vm.h>
#include "rsx_utils.h"
#include <vector>
#include <stack>
#include <unordered_map>
namespace rsx
{
class thread;
static inline std::string_view location_tostring(u32 location)
{
ensure(location < 2);
constexpr const char* location_names[2] = { "CELL_GCM_LOCATION_LOCAL", "CELL_GCM_LOCATION_MAIN" };
return location_names[location];
}
static inline u32 classify_location(u32 address)
{
return (address >= rsx::constants::local_mem_base) ? CELL_GCM_LOCATION_LOCAL : CELL_GCM_LOCATION_MAIN;
}
namespace reports
{
struct occlusion_query_info
{
u32 driver_handle;
u32 result;
u32 num_draws;
u32 data_type;
u64 sync_tag;
u64 timestamp;
bool pending;
bool active;
bool owned;
};
struct queued_report_write
{
u32 type = CELL_GCM_ZPASS_PIXEL_CNT;
u32 counter_tag;
occlusion_query_info* query;
queued_report_write* forwarder;
vm::addr_t sink; // Memory location of the report
std::vector<vm::addr_t> sink_alias; // Aliased memory addresses
};
struct query_search_result
{
bool found;
u32 raw_zpass_result;
std::vector<occlusion_query_info*> queries;
};
struct query_stat_counter
{
u32 result;
u32 flags;
};
struct sync_hint_payload_t
{
occlusion_query_info* query;
vm::addr_t address;
void* other_params;
};
struct MMIO_page_data_t : public rsx::ref_counted
{
utils::protection prot = utils::protection::rw;
};
enum sync_control
{
sync_none = 0,
sync_defer_copy = 1, // If set, return a zcull intr code instead of forcefully reading zcull data
sync_no_notify = 2 // If set, backend hint notifications will not be made
};
enum constants
{
max_zcull_delay_us = 300, // Delay before a report update operation is forced to retire
min_zcull_tick_us = 100, // Default tick duration. To avoid hardware spam, we schedule peeks in multiples of this.
occlusion_query_count = 2048, // Number of occlusion query slots available. Real hardware actually has far fewer units before choking
max_safe_queue_depth = 1792, // Number of in-flight queries before we start forcefully flushing data from the GPU device.
max_stat_registers = 8192 // Size of the statistics cache
};
class ZCULL_control
{
private:
std::unordered_map<u32, MMIO_page_data_t> m_locked_pages[2];
atomic_t<bool> m_pages_accessed[2] = { false, false };
atomic_t<s32> m_critical_reports_in_flight = { 0 };
shared_mutex m_pages_mutex;
void on_report_enqueued(vm::addr_t address);
void on_report_completed(vm::addr_t address);
void disable_optimizations(class ::rsx::thread* ptimer, u32 location);
protected:
bool unit_enabled = false; // The ZCULL unit is on
bool write_enabled = false; // A surface in the ZCULL-monitored tile region has been loaded for rasterization
bool stats_enabled = false; // Collecting of ZCULL statistics is enabled (not same as pixels passing Z test!)
bool zpass_count_enabled = false; // Collecting of ZPASS statistics is enabled. If this is off, the counter does not increment
bool host_queries_active = false; // The backend/host is gathering Z data for the ZCULL unit
std::array<occlusion_query_info, 2048> m_occlusion_query_data = {};
std::stack<occlusion_query_info*> m_free_occlusion_pool{};
occlusion_query_info* m_current_task = nullptr;
u32 m_statistics_tag_id = 0;
// Scheduling clock. Granunlarity is min_zcull_tick value.
u64 m_tsc = 0;
u64 m_next_tsc = 0;
// Incremental tag used for tracking sync events. Hardware clock resolution is too low for the job.
u64 m_sync_tag = 0;
u64 m_timer = 0;
std::vector<queued_report_write> m_pending_writes{};
std::array<query_stat_counter, max_stat_registers> m_statistics_map{};
// Enables/disables the ZCULL unit
void set_active(class ::rsx::thread* ptimer, bool state, bool flush_queue);
// Checks current state of the unit and applies changes
void check_state(class ::rsx::thread* ptimer, bool flush_queue);
// Sets up a new query slot and sets it to the current task
void allocate_new_query(class ::rsx::thread* ptimer);
// Free a query slot in use
void free_query(occlusion_query_info* query);
// Write report to memory
void write(vm::addr_t sink, u64 timestamp, u32 type, u32 value);
void write(queued_report_write* writer, u64 timestamp, u32 value);
// Retire operation
void retire(class ::rsx::thread* ptimer, queued_report_write* writer, u32 result);
public:
ZCULL_control();
virtual ~ZCULL_control();
ZCULL_control(const ZCULL_control&) = delete;
ZCULL_control& operator=(const ZCULL_control&) = delete;
void set_enabled(class ::rsx::thread* ptimer, bool state, bool flush_queue = false);
void set_status(class ::rsx::thread* ptimer, bool surface_active, bool zpass_active, bool zcull_stats_active, bool flush_queue = false);
// Read current zcull statistics into the address provided
void read_report(class ::rsx::thread* ptimer, vm::addr_t sink, u32 type);
// Clears current stat block and increments stat_tag_id
void clear(class ::rsx::thread* ptimer, u32 type);
// Forcefully flushes all
void sync(class ::rsx::thread* ptimer);
// Conditionally sync any pending writes if range overlaps
flags32_t read_barrier(class ::rsx::thread* ptimer, u32 memory_address, u32 memory_range, flags32_t flags);
flags32_t read_barrier(class ::rsx::thread* ptimer, u32 memory_address, occlusion_query_info* query);
// Call once every 'tick' to update, optional address provided to partially sync until address is processed
void update(class ::rsx::thread* ptimer, u32 sync_address = 0, bool hint = false);
// Draw call notification
void on_draw();
// Sync hint notification
void on_sync_hint(sync_hint_payload_t payload);
// Check for pending writes
bool has_pending() const { return !m_pending_writes.empty(); }
// Search for query synchronized at address
query_search_result find_query(vm::addr_t sink_address, bool all);
// Copies queries in range rebased from source range to destination range
u32 copy_reports_to(u32 start, u32 range, u32 dest);
// Check paging issues
bool on_access_violation(u32 address);
// Optimization check
bool is_query_result_urgent(u32 address) const { return m_pages_accessed[rsx::classify_location(address)]; }
// Backend methods (optional, will return everything as always visible by default)
virtual void begin_occlusion_query(occlusion_query_info* /*query*/) {}
virtual void end_occlusion_query(occlusion_query_info* /*query*/) {}
virtual bool check_occlusion_query_status(occlusion_query_info* /*query*/) { return true; }
virtual void get_occlusion_query_result(occlusion_query_info* query) { query->result = -1; }
virtual void discard_occlusion_query(occlusion_query_info* /*query*/) {}
};
// Helper class for conditional rendering
struct conditional_render_eval
{
bool enabled = false;
bool eval_failed = false;
bool hw_cond_active = false;
bool reserved = false;
std::vector<occlusion_query_info*> eval_sources;
u64 eval_sync_tag = 0;
u32 eval_address = 0;
// Resets common data
void reset();
// Returns true if rendering is disabled as per conditional render test
bool disable_rendering() const;
// Returns true if a conditional render is active but not yet evaluated
bool eval_pending() const;
// Enable conditional rendering
void enable_conditional_render(thread* pthr, u32 address);
// Disable conditional rendering
void disable_conditional_render(thread* pthr);
// Sets data sources for predicate evaluation
void set_eval_sources(std::vector<occlusion_query_info*>& sources);
// Sets evaluation result. Result is true if conditional evaluation failed
void set_eval_result(thread* pthr, bool failed);
// Evaluates the condition by accessing memory directly
void eval_result(thread* pthr);
};
}
}
| 8,398
|
C++
|
.h
| 188
| 39.547872
| 140
| 0.701828
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
6,000
|
gcm_printing.h
|
RPCS3_rpcs3/rpcs3/Emu/RSX/gcm_printing.h
|
#pragma once
#include "util/types.hpp"
#include <string>
#include <functional>
namespace rsx
{
std::pair<std::string_view, std::string_view> get_method_name(u32 id, std::string& result_str);
std::add_pointer_t<void(std::string&, u32, u32)> get_pretty_printing_function(u32 id);
}
| 285
|
C++
|
.h
| 9
| 30.111111
| 96
| 0.74359
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
6,001
|
RSXOffload.h
|
RPCS3_rpcs3/rpcs3/Emu/RSX/RSXOffload.h
|
#pragma once
#include "util/types.hpp"
#include "Utilities/address_range.h"
#include "gcm_enums.h"
#include <vector>
template <typename T>
class named_thread;
namespace rsx
{
class dma_manager
{
enum op
{
raw_copy = 0,
vector_copy = 1,
index_emulate = 2,
callback = 3
};
struct transport_packet
{
op type{};
std::vector<u8> opt_storage{};
void* src{};
void* dst{};
u32 length{};
u32 aux_param0{};
u32 aux_param1{};
transport_packet(void *_dst, void *_src, u32 len)
: type(op::raw_copy), src(_src), dst(_dst), length(len)
{}
transport_packet(void *_dst, std::vector<u8>& _src, u32 len)
: type(op::vector_copy), opt_storage(std::move(_src)), dst(_dst), length(len)
{}
transport_packet(void *_dst, rsx::primitive_type prim, u32 len)
: type(op::index_emulate), dst(_dst), length(len), aux_param0(static_cast<u8>(prim))
{}
transport_packet(u32 command, void* args)
: type(op::callback), src(args), aux_param0(command)
{}
transport_packet(const transport_packet&) = delete;
transport_packet& operator=(const transport_packet&) = delete;
};
atomic_t<bool> m_mem_fault_flag = false;
struct offload_thread;
std::shared_ptr<named_thread<offload_thread>> m_thread;
// TODO: Improved benchmarks here; value determined by profiling on a Ryzen CPU, rounded to the nearest 512 bytes
const u32 max_immediate_transfer_size = 3584;
public:
dma_manager() = default;
// initialization
void init();
// General tranport
void copy(void *dst, std::vector<u8>& src, u32 length) const;
void copy(void *dst, void *src, u32 length) const;
// Vertex utilities
void emulate_as_indexed(void *dst, rsx::primitive_type primitive, u32 count);
// Renderer callback
void backend_ctrl(u32 request_code, void* args);
// Synchronization
bool is_current_thread() const;
bool sync() const;
void join();
void set_mem_fault_flag();
void clear_mem_fault_flag();
// Fault recovery
utils::address_range get_fault_range(bool writing) const;
};
}
| 2,060
|
C++
|
.h
| 68
| 26.926471
| 115
| 0.689655
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
6,002
|
NullGSRender.h
|
RPCS3_rpcs3/rpcs3/Emu/RSX/Null/NullGSRender.h
|
#pragma once
#include "Emu/RSX/GSRender.h"
class NullGSRender : public GSRender
{
public:
u64 get_cycles() final;
NullGSRender(utils::serial* ar) noexcept;
NullGSRender() noexcept : NullGSRender(nullptr) {}
private:
void end() override;
};
| 247
|
C++
|
.h
| 11
| 20.818182
| 51
| 0.772532
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
6,003
|
texture_cache.h
|
RPCS3_rpcs3/rpcs3/Emu/RSX/Common/texture_cache.h
|
#pragma once
#include "Emu/RSX/Common/simple_array.hpp"
#include "Emu/RSX/Core/RSXContext.h"
#include "Emu/RSX/RSXThread.h"
#include "texture_cache_utils.h"
#include "texture_cache_predictor.h"
#include "texture_cache_helpers.h"
#include <unordered_map>
#include "Emu/Cell/timers.hpp"
#define RSX_GCM_FORMAT_IGNORED 0
namespace rsx
{
namespace helpers = rsx::texture_cache_helpers;
template <typename derived_type, typename _traits>
class texture_cache
{
public:
using traits = _traits;
using commandbuffer_type = typename traits::commandbuffer_type;
using section_storage_type = typename traits::section_storage_type;
using image_resource_type = typename traits::image_resource_type;
using image_view_type = typename traits::image_view_type;
using image_storage_type = typename traits::image_storage_type;
using texture_format = typename traits::texture_format;
using viewable_image_type = typename traits::viewable_image_type;
using predictor_type = texture_cache_predictor<traits>;
using ranged_storage = rsx::ranged_storage<traits>;
using ranged_storage_block = typename ranged_storage::block_type;
using copy_region_descriptor = copy_region_descriptor_base<typename traits::image_resource_type>;
private:
static_assert(std::is_base_of_v<rsx::cached_texture_section<section_storage_type, traits>, section_storage_type>, "section_storage_type must derive from rsx::cached_texture_section");
/**
* Helper structs/enums
*/
// Keep track of cache misses to pre-emptively flush some addresses
struct framebuffer_memory_characteristics
{
u32 misses;
texture_format format;
};
public:
//Struct to hold data on sections to be paged back onto cpu memory
struct thrashed_set
{
bool violation_handled = false;
bool flushed = false;
bool invalidate_samplers = false;
invalidation_cause cause;
std::vector<section_storage_type*> sections_to_flush; // Sections to be flushed
std::vector<section_storage_type*> sections_to_unprotect; // These sections are to be unpotected and discarded by caller
std::vector<section_storage_type*> sections_to_exclude; // These sections are do be excluded from protection manipulation (subtracted from other sections)
u32 num_flushable = 0;
u32 num_excluded = 0; // Sections-to-exclude + sections that would have been excluded but are false positives
u32 num_discarded = 0;
u64 cache_tag = 0;
address_range fault_range;
address_range invalidate_range;
void clear_sections()
{
sections_to_flush = {};
sections_to_unprotect = {};
sections_to_exclude = {};
num_flushable = 0;
}
bool empty() const
{
return sections_to_flush.empty() && sections_to_unprotect.empty() && sections_to_exclude.empty();
}
bool is_flushed() const
{
return flushed || sections_to_flush.empty();
}
#ifdef TEXTURE_CACHE_DEBUG
void check_pre_sanity() const
{
usz flush_and_unprotect_count = sections_to_flush.size() + sections_to_unprotect.size();
usz exclude_count = sections_to_exclude.size();
//-------------------------
// It is illegal to have only exclusions except when reading from a range with only RO sections
ensure(flush_and_unprotect_count > 0 || exclude_count == 0 || cause.is_read());
if (flush_and_unprotect_count == 0 && exclude_count > 0)
{
// double-check that only RO sections exists
for (auto *tex : sections_to_exclude)
ensure(tex->get_protection() == utils::protection::ro);
}
//-------------------------
// Check that the number of sections we "found" matches the sections known to be in the fault range
const auto min_overlap_fault_no_ro = tex_cache_checker.get_minimum_number_of_sections(fault_range);
const auto min_overlap_invalidate_no_ro = tex_cache_checker.get_minimum_number_of_sections(invalidate_range);
const u16 min_overlap_fault = min_overlap_fault_no_ro.first + (cause.is_read() ? 0 : min_overlap_fault_no_ro.second);
const u16 min_overlap_invalidate = min_overlap_invalidate_no_ro.first + (cause.is_read() ? 0 : min_overlap_invalidate_no_ro.second);
AUDIT(min_overlap_fault <= min_overlap_invalidate);
const u16 min_flush_or_unprotect = min_overlap_fault;
// we must flush or unprotect *all* sections that partially overlap the fault range
ensure(flush_and_unprotect_count >= min_flush_or_unprotect);
// result must contain *all* sections that overlap (completely or partially) the invalidation range
ensure(flush_and_unprotect_count + exclude_count >= min_overlap_invalidate);
}
void check_post_sanity() const
{
AUDIT(is_flushed());
// Check that the number of sections we "found" matches the sections known to be in the fault range
tex_cache_checker.check_unprotected(fault_range, cause.is_read() && invalidation_keep_ro_during_read, true);
// Check that the cache has the correct protections
tex_cache_checker.verify();
}
#endif // TEXTURE_CACHE_DEBUG
};
struct intersecting_set
{
std::vector<section_storage_type*> sections = {};
address_range invalidate_range = {};
bool has_flushables = false;
};
struct deferred_subresource : image_section_attributes_t
{
image_resource_type external_handle = 0;
std::vector<copy_region_descriptor> sections_to_copy;
texture_channel_remap_t remap;
deferred_request_command op = deferred_request_command::nop;
u32 external_ref_addr = 0;
u16 x = 0;
u16 y = 0;
utils::address_range cache_range;
bool do_not_cache = false;
deferred_subresource() = default;
deferred_subresource(image_resource_type _res, deferred_request_command _op,
const image_section_attributes_t& attr, position2u offset,
texture_channel_remap_t _remap)
: external_handle(_res)
, remap(std::move(_remap))
, op(_op)
, x(offset.x)
, y(offset.y)
{
static_cast<image_section_attributes_t&>(*this) = attr;
}
viewable_image_type as_viewable() const
{
return static_cast<viewable_image_type>(external_handle);
}
image_resource_type src0() const
{
if (external_handle)
{
return external_handle;
}
if (!sections_to_copy.empty())
{
return sections_to_copy[0].src;
}
// Return typed null
return external_handle;
}
};
struct sampled_image_descriptor : public sampled_image_descriptor_base
{
image_view_type image_handle = 0;
deferred_subresource external_subresource_desc = {};
bool flag = false;
sampled_image_descriptor() = default;
sampled_image_descriptor(image_view_type handle, texture_upload_context ctx, rsx::format_class ftype,
size3f scale, rsx::texture_dimension_extended type, bool cyclic_reference = false,
u8 msaa_samples = 1)
{
image_handle = handle;
upload_context = ctx;
format_class = ftype;
is_cyclic_reference = cyclic_reference;
image_type = type;
samples = msaa_samples;
texcoord_xform.scale[0] = scale.width;
texcoord_xform.scale[1] = scale.height;
texcoord_xform.scale[2] = scale.depth;
texcoord_xform.bias[0] = 0.;
texcoord_xform.bias[1] = 0.;
texcoord_xform.bias[2] = 0.;
texcoord_xform.clamp = false;
}
sampled_image_descriptor(image_resource_type external_handle, deferred_request_command reason,
const image_section_attributes_t& attr, position2u src_offset,
texture_upload_context ctx, rsx::format_class ftype, size3f scale,
rsx::texture_dimension_extended type, const texture_channel_remap_t& remap)
{
external_subresource_desc = { external_handle, reason, attr, src_offset, remap };
image_handle = 0;
upload_context = ctx;
format_class = ftype;
image_type = type;
texcoord_xform.scale[0] = scale.width;
texcoord_xform.scale[1] = scale.height;
texcoord_xform.scale[2] = scale.depth;
texcoord_xform.bias[0] = 0.;
texcoord_xform.bias[1] = 0.;
texcoord_xform.bias[2] = 0.;
texcoord_xform.clamp = false;
}
inline bool section_fills_target(const copy_region_descriptor& cpy) const
{
return cpy.dst_x == 0 && cpy.dst_y == 0 &&
cpy.dst_w == external_subresource_desc.width && cpy.dst_h == external_subresource_desc.height;
}
inline bool section_is_transfer_only(const copy_region_descriptor& cpy) const
{
return cpy.src_w == cpy.dst_w && cpy.src_h == cpy.dst_h;
}
void simplify()
{
if (external_subresource_desc.op != deferred_request_command::atlas_gather)
{
// Only atlas simplification supported for now
return;
}
auto& sections = external_subresource_desc.sections_to_copy;
if (sections.size() > 1)
{
// GPU image copies are expensive, cull unnecessary transfers if possible
for (auto idx = sections.size() - 1; idx >= 1; idx--)
{
if (section_fills_target(sections[idx]))
{
const auto remaining = sections.size() - idx;
std::memcpy(
sections.data(),
§ions[idx],
remaining * sizeof(sections[0])
);
sections.resize(remaining);
break;
}
}
}
// Optimizations in the straightforward methods copy_image_static and copy_image_dynamic make them preferred over the atlas method
if (sections.size() == 1 && section_fills_target(sections[0]))
{
const auto cpy = sections[0];
external_subresource_desc.external_ref_addr = cpy.base_addr;
if (section_is_transfer_only(cpy))
{
// Change the command to copy_image_static
external_subresource_desc.external_handle = cpy.src;
external_subresource_desc.x = cpy.src_x;
external_subresource_desc.y = cpy.src_y;
external_subresource_desc.width = cpy.src_w;
external_subresource_desc.height = cpy.src_h;
external_subresource_desc.op = deferred_request_command::copy_image_static;
}
else
{
// Blit op is a semantic variant of the copy and atlas ops.
// We can simply reuse the atlas handler for this for now, but this allows simplification.
external_subresource_desc.op = deferred_request_command::blit_image_static;
}
}
}
// Returns true if at least threshold% is covered in pixels
bool atlas_covers_target_area(int threshold) const
{
const int target_area = (external_subresource_desc.width * external_subresource_desc.height * external_subresource_desc.depth * threshold) / 100;
int covered_area = 0;
areai bbox{smax, smax, 0, 0};
for (const auto& section : external_subresource_desc.sections_to_copy)
{
if (section.level != 0)
{
// Ignore other slices other than mip0
continue;
}
// Calculate virtual Y coordinate
const auto dst_y = (section.dst_z * external_subresource_desc.height) + section.dst_y;
// Add this slice's dimensions to the total
covered_area += section.dst_w * section.dst_h;
// Extend the covered bbox
bbox.x1 = std::min<int>(section.dst_x, bbox.x1);
bbox.x2 = std::max<int>(section.dst_x + section.dst_w, bbox.x2);
bbox.y1 = std::min<int>(dst_y, bbox.y1);
bbox.y2 = std::max<int>(dst_y + section.dst_h, bbox.y2);
}
if (covered_area < target_area)
{
return false;
}
if (const auto bounds_area = bbox.width() * bbox.height();
bounds_area < target_area)
{
return false;
}
return true;
}
u32 encoded_component_map() const override
{
if (image_handle)
{
return image_handle->encoded_component_map();
}
return 0;
}
bool validate() const
{
return (image_handle || external_subresource_desc.op != deferred_request_command::nop);
}
/**
* Returns a boolean true/false if the descriptor is expired
* Optionally returns a second variable that contains the surface reference.
* The surface reference can be used to insert a texture barrier or inject a deferred resource
*/
template <typename surface_store_type, typename surface_type = typename surface_store_type::surface_type>
std::pair<bool, surface_type> is_expired(surface_store_type& surface_cache)
{
if (upload_context != rsx::texture_upload_context::framebuffer_storage)
{
return {};
}
// Expired, but may still be valid. Check if the texture is still accessible
auto ref_image = image_handle ? image_handle->image() : external_subresource_desc.external_handle;
surface_type surface = dynamic_cast<surface_type>(ref_image);
// Try and grab a cache reference in case of MSAA resolve target or compositing op
if (!surface)
{
if (!(surface = surface_cache.get_surface_at(ref_address)))
{
// Compositing op. Just ignore expiry for now
ensure(!ref_image);
return {};
}
}
ensure(surface);
if (!ref_image || surface->get_surface(rsx::surface_access::gpu_reference) == ref_image)
{
// Same image, so configuration did not change.
if (surface_cache.cache_tag <= surface_cache_tag &&
surface->last_use_tag <= surface_cache_tag)
{
external_subresource_desc.do_not_cache = false;
return {};
}
// Image was written to since last bind. Insert texture barrier.
surface_cache_tag = surface->last_use_tag;
is_cyclic_reference = surface_cache.address_is_bound(ref_address);
external_subresource_desc.do_not_cache = is_cyclic_reference;
switch (external_subresource_desc.op)
{
case deferred_request_command::copy_image_dynamic:
case deferred_request_command::copy_image_static:
external_subresource_desc.op = (is_cyclic_reference) ? deferred_request_command::copy_image_dynamic : deferred_request_command::copy_image_static;
[[ fallthrough ]];
default:
return { false, surface };
}
}
// Reupload
return { true, nullptr };
}
};
protected:
/**
* Variable declarations
*/
shared_mutex m_cache_mutex;
ranged_storage m_storage;
std::unordered_multimap<u32, std::pair<deferred_subresource, image_view_type>> m_temporary_subresource_cache;
std::vector<image_view_type> m_uncached_subresources;
predictor_type m_predictor;
atomic_t<u64> m_cache_update_tag = {0};
address_range read_only_range;
address_range no_access_range;
//Map of messages to only emit once
std::unordered_set<std::string> m_once_only_messages_set;
//Set when a shader read-only texture data suddenly becomes contested, usually by fbo memory
bool read_only_tex_invalidate = false;
//Store of all objects in a flush_always state. A lazy readback is attempted every draw call
std::unordered_map<address_range, section_storage_type*> m_flush_always_cache;
u64 m_flush_always_update_timestamp = 0;
//Memory usage
const u32 m_max_zombie_objects = 64; //Limit on how many texture objects to keep around for reuse after they are invalidated
//Other statistics
atomic_t<u32> m_flushes_this_frame = { 0 };
atomic_t<u32> m_misses_this_frame = { 0 };
atomic_t<u32> m_speculations_this_frame = { 0 };
atomic_t<u32> m_unavoidable_hard_faults_this_frame = { 0 };
atomic_t<u32> m_texture_upload_calls_this_frame = { 0 };
atomic_t<u32> m_texture_upload_misses_this_frame = { 0 };
atomic_t<u32> m_texture_copies_ellided_this_frame = { 0 };
static const u32 m_predict_max_flushes_per_frame = 50; // Above this number the predictions are disabled
// Invalidation
static const bool invalidation_ignore_unsynchronized = true; // If true, unsynchronized sections don't get forcefully flushed unless they overlap the fault range
static const bool invalidation_keep_ro_during_read = true; // If true, RO sections are not invalidated during read faults
/**
* Virtual Methods
*/
virtual image_view_type create_temporary_subresource_view(commandbuffer_type&, image_resource_type* src, u32 gcm_format, u16 x, u16 y, u16 w, u16 h, const texture_channel_remap_t& remap_vector) = 0;
virtual image_view_type create_temporary_subresource_view(commandbuffer_type&, image_storage_type* src, u32 gcm_format, u16 x, u16 y, u16 w, u16 h, const texture_channel_remap_t& remap_vector) = 0;
virtual void release_temporary_subresource(image_view_type rsc) = 0;
virtual section_storage_type* create_new_texture(commandbuffer_type&, const address_range &rsx_range, u16 width, u16 height, u16 depth, u16 mipmaps, u32 pitch, u32 gcm_format,
rsx::texture_upload_context context, rsx::texture_dimension_extended type, bool swizzled, component_order swizzle_flags, rsx::flags32_t flags) = 0;
virtual section_storage_type* upload_image_from_cpu(commandbuffer_type&, const address_range &rsx_range, u16 width, u16 height, u16 depth, u16 mipmaps, u32 pitch, u32 gcm_format, texture_upload_context context,
const std::vector<rsx::subresource_layout>& subresource_layout, rsx::texture_dimension_extended type, bool swizzled) = 0;
virtual section_storage_type* create_nul_section(commandbuffer_type&, const address_range &rsx_range, const image_section_attributes_t& attrs, const GCM_tile_reference& tile, bool memory_load) = 0;
virtual void set_component_order(section_storage_type& section, u32 gcm_format, component_order expected) = 0;
virtual void insert_texture_barrier(commandbuffer_type&, image_storage_type* tex, bool strong_ordering = true) = 0;
virtual image_view_type generate_cubemap_from_images(commandbuffer_type&, u32 gcm_format, u16 size, const std::vector<copy_region_descriptor>& sources, const texture_channel_remap_t& remap_vector) = 0;
virtual image_view_type generate_3d_from_2d_images(commandbuffer_type&, u32 gcm_format, u16 width, u16 height, u16 depth, const std::vector<copy_region_descriptor>& sources, const texture_channel_remap_t& remap_vector) = 0;
virtual image_view_type generate_atlas_from_images(commandbuffer_type&, u32 gcm_format, u16 width, u16 height, const std::vector<copy_region_descriptor>& sections_to_copy, const texture_channel_remap_t& remap_vector) = 0;
virtual image_view_type generate_2d_mipmaps_from_images(commandbuffer_type&, u32 gcm_format, u16 width, u16 height, const std::vector<copy_region_descriptor>& sections_to_copy, const texture_channel_remap_t& remap_vector) = 0;
virtual void update_image_contents(commandbuffer_type&, image_view_type dst, image_resource_type src, u16 width, u16 height) = 0;
virtual bool render_target_format_is_compatible(image_storage_type* tex, u32 gcm_format) = 0;
virtual void prepare_for_dma_transfers(commandbuffer_type&) = 0;
virtual void cleanup_after_dma_transfers(commandbuffer_type&) = 0;
public:
virtual void destroy() = 0;
virtual bool is_depth_texture(u32, u32) = 0;
virtual void on_section_destroyed(section_storage_type& /*section*/)
{}
protected:
/**
* Helpers
*/
inline void update_cache_tag()
{
m_cache_update_tag = rsx::get_shared_tag();
}
template <typename CharT, usz N, typename... Args>
void emit_once(bool error, const CharT(&fmt)[N], const Args&... params)
{
const auto result = m_once_only_messages_set.emplace(fmt::format(fmt, params...));
if (!result.second)
return;
if (error)
rsx_log.error("%s", *result.first);
else
rsx_log.warning("%s", *result.first);
}
template <typename CharT, usz N, typename... Args>
void err_once(const CharT(&fmt)[N], const Args&... params)
{
emit_once(true, fmt, params...);
}
template <typename CharT, usz N, typename... Args>
void warn_once(const CharT(&fmt)[N], const Args&... params)
{
emit_once(false, fmt, params...);
}
/**
* Internal implementation methods and helpers
*/
inline bool region_intersects_cache(const address_range &test_range, bool is_writing)
{
AUDIT(test_range.valid());
// Quick range overlaps with cache tests
if (!is_writing)
{
if (!no_access_range.valid() || !test_range.overlaps(no_access_range))
return false;
}
else
{
if (!read_only_range.valid() || !test_range.overlaps(read_only_range))
{
//Doesnt fall in the read_only textures range; check render targets
if (!no_access_range.valid() || !test_range.overlaps(no_access_range))
return false;
}
}
// Check that there is at least one valid (locked) section in the test_range
reader_lock lock(m_cache_mutex);
if (m_storage.range_begin(test_range, locked_range, true) == m_storage.range_end())
return false;
// We do intersect the cache
return true;
}
/**
* Section invalidation
*/
private:
template <typename ...Args>
void flush_set(commandbuffer_type& cmd, thrashed_set& data, std::function<void()> on_data_transfer_completed, Args&&... extras)
{
AUDIT(!data.flushed);
if (data.sections_to_flush.size() > 1)
{
// Sort with oldest data first
// Ensures that new data tramples older data
std::sort(data.sections_to_flush.begin(), data.sections_to_flush.end(), FN(x->last_write_tag < y->last_write_tag));
}
rsx::simple_array<section_storage_type*> sections_to_transfer;
for (auto &surface : data.sections_to_flush)
{
if (!surface->is_synchronized())
{
sections_to_transfer.push_back(surface);
}
else if (surface->get_memory_read_flags() == rsx::memory_read_flags::flush_always)
{
// This region is set to always read from itself (unavoidable hard sync)
const auto ROP_timestamp = rsx::get_current_renderer()->ROP_sync_timestamp;
if (ROP_timestamp > surface->get_sync_timestamp())
{
sections_to_transfer.push_back(surface);
}
}
}
if (!sections_to_transfer.empty())
{
// Batch all hard faults together
prepare_for_dma_transfers(cmd);
for (auto &surface : sections_to_transfer)
{
surface->copy_texture(cmd, true, std::forward<Args>(extras)...);
}
cleanup_after_dma_transfers(cmd);
}
if (on_data_transfer_completed)
{
on_data_transfer_completed();
}
for (auto &surface : data.sections_to_flush)
{
surface->flush();
// Exclude this region when flushing other sections that should not trample it
// If we overlap an excluded RO, set it as dirty
for (auto &other : data.sections_to_exclude)
{
AUDIT(other != surface);
if (!other->is_flushable())
{
if (other->overlaps(*surface, section_bounds::confirmed_range))
{
// This should never happen. It will raise exceptions later due to a dirty region being locked
rsx_log.error("Excluded region overlaps with flushed surface!");
other->set_dirty(true);
}
}
else if(surface->last_write_tag > other->last_write_tag)
{
other->add_flush_exclusion(surface->get_confirmed_range());
}
}
}
// Resync any exclusions that do not require flushing
std::vector<section_storage_type*> surfaces_to_inherit;
for (auto& surface : data.sections_to_exclude)
{
if (surface->get_context() != texture_upload_context::framebuffer_storage)
{
continue;
}
// Check for any 'newer' flushed overlaps. Memory must be re-acquired to avoid holding stale contents
// Note that the surface cache inheritance will minimize the impact
surfaces_to_inherit.clear();
for (auto& flushed_surface : data.sections_to_flush)
{
if (flushed_surface->get_context() != texture_upload_context::framebuffer_storage ||
flushed_surface->last_write_tag <= surface->last_write_tag ||
!flushed_surface->get_confirmed_range().overlaps(surface->get_confirmed_range()))
{
continue;
}
surfaces_to_inherit.push_back(flushed_surface);
}
surface->sync_surface_memory(surfaces_to_inherit);
}
data.flushed = true;
update_cache_tag();
}
// Merges the protected ranges of the sections in "sections" into "result"
void merge_protected_ranges(address_range_vector &result, const std::vector<section_storage_type*> §ions)
{
result.reserve(result.size() + sections.size());
// Copy ranges to result, merging them if possible
for (const auto §ion : sections)
{
ensure(section->is_locked(true));
const auto &new_range = section->get_locked_range();
AUDIT(new_range.is_page_range());
result.merge(new_range);
}
}
// NOTE: It is *very* important that data contains exclusions for *all* sections that overlap sections_to_unprotect/flush
// Otherwise the page protections will end up incorrect and things will break!
void unprotect_set(thrashed_set& data)
{
auto protect_ranges = [](address_range_vector& _set, utils::protection _prot)
{
//u32 count = 0;
for (auto &range : _set)
{
if (range.valid())
{
rsx::memory_protect(range, _prot);
//count++;
}
}
//rsx_log.error("Set protection of %d blocks to 0x%x", count, static_cast<u32>(prot));
};
auto discard_set = [](std::vector<section_storage_type*>& _set)
{
for (auto* section : _set)
{
ensure(section->is_flushed() || section->is_dirty());
section->discard(/*set_dirty*/ false);
}
};
// Sanity checks
AUDIT(data.fault_range.is_page_range());
AUDIT(data.invalidate_range.is_page_range());
AUDIT(data.is_flushed());
// Merge ranges to unprotect
address_range_vector ranges_to_unprotect;
address_range_vector ranges_to_protect_ro;
ranges_to_unprotect.reserve(data.sections_to_unprotect.size() + data.sections_to_flush.size() + data.sections_to_exclude.size());
merge_protected_ranges(ranges_to_unprotect, data.sections_to_unprotect);
merge_protected_ranges(ranges_to_unprotect, data.sections_to_flush);
AUDIT(!ranges_to_unprotect.empty());
// Apply exclusions and collect ranges of excluded pages that need to be reprotected RO (i.e. only overlap RO regions)
if (!data.sections_to_exclude.empty())
{
ranges_to_protect_ro.reserve(data.sections_to_exclude.size());
u32 no_access_count = 0;
for (const auto &excluded : data.sections_to_exclude)
{
ensure(excluded->is_locked(true));
address_range exclusion_range = excluded->get_locked_range();
// We need to make sure that the exclusion range is *inside* invalidate range
exclusion_range.intersect(data.invalidate_range);
// Sanity checks
AUDIT(exclusion_range.is_page_range());
AUDIT((data.cause.is_read() && !excluded->is_flushable()) || data.cause.skip_fbos() || !exclusion_range.overlaps(data.fault_range));
// Apply exclusion
ranges_to_unprotect.exclude(exclusion_range);
// Keep track of RO exclusions
// TODO ruipin: Bug here, we cannot add the whole exclusion range to ranges_to_reprotect, only the part inside invalidate_range
utils::protection prot = excluded->get_protection();
if (prot == utils::protection::ro)
{
ranges_to_protect_ro.merge(exclusion_range);
}
else if (prot == utils::protection::no)
{
no_access_count++;
}
else
{
fmt::throw_exception("Unreachable");
}
}
// Exclude NA ranges from ranges_to_reprotect_ro
if (no_access_count > 0 && !ranges_to_protect_ro.empty())
{
for (auto &exclusion : data.sections_to_exclude)
{
if (exclusion->get_protection() != utils::protection::ro)
{
ranges_to_protect_ro.exclude(exclusion->get_locked_range());
}
}
}
}
AUDIT(!ranges_to_unprotect.empty());
// Exclude the fault range if told to do so (this means the fault_range got unmapped or is otherwise invalid)
if (data.cause.keep_fault_range_protection())
{
ranges_to_unprotect.exclude(data.fault_range);
ranges_to_protect_ro.exclude(data.fault_range);
AUDIT(!ranges_to_unprotect.overlaps(data.fault_range));
AUDIT(!ranges_to_protect_ro.overlaps(data.fault_range));
}
else
{
AUDIT(ranges_to_unprotect.inside(data.invalidate_range));
AUDIT(ranges_to_protect_ro.inside(data.invalidate_range));
}
AUDIT(!ranges_to_protect_ro.overlaps(ranges_to_unprotect));
// Unprotect and discard
protect_ranges(ranges_to_unprotect, utils::protection::rw);
protect_ranges(ranges_to_protect_ro, utils::protection::ro);
discard_set(data.sections_to_unprotect);
discard_set(data.sections_to_flush);
#ifdef TEXTURE_CACHE_DEBUG
// Check that the cache looks sane
data.check_post_sanity();
#endif // TEXTURE_CACHE_DEBUG
}
// Return a set containing all sections that should be flushed/unprotected/reprotected
atomic_t<u64> m_last_section_cache_tag = 0;
intersecting_set get_intersecting_set(const address_range &fault_range)
{
AUDIT(fault_range.is_page_range());
const u64 cache_tag = ++m_last_section_cache_tag;
intersecting_set result = {};
address_range &invalidate_range = result.invalidate_range;
invalidate_range = fault_range; // Sections fully inside this range will be invalidated, others will be deemed false positives
// Loop through cache and find pages that overlap the invalidate_range
u32 last_dirty_block = -1;
bool repeat_loop = false;
auto It = m_storage.range_begin(invalidate_range, locked_range, true); // will iterate through locked sections only
while (It != m_storage.range_end())
{
const u32 base = It.get_block().get_start();
// On the last loop, we stop once we're done with the last dirty block
if (!repeat_loop && base > last_dirty_block) // note: blocks are iterated in order from lowest to highest base address
break;
auto &tex = *It;
AUDIT(tex.is_locked()); // we should be iterating locked sections only, but just to make sure...
AUDIT(tex.cache_tag != cache_tag || last_dirty_block != umax); // cache tag should not match during the first loop
if (tex.cache_tag != cache_tag) //flushable sections can be 'clean' but unlocked. TODO: Handle this better
{
const rsx::section_bounds bounds = tex.get_overlap_test_bounds();
if (locked_range == bounds || tex.overlaps(invalidate_range, bounds))
{
const auto new_range = tex.get_min_max(invalidate_range, bounds).to_page_range();
AUDIT(new_range.is_page_range() && invalidate_range.inside(new_range));
// The various chaining policies behave differently
bool extend_invalidate_range = tex.overlaps(fault_range, bounds);
// Extend the various ranges
if (extend_invalidate_range && new_range != invalidate_range)
{
if (new_range.end > invalidate_range.end)
It.set_end(new_range.end);
invalidate_range = new_range;
repeat_loop = true; // we will need to repeat the loop again
last_dirty_block = base; // stop the repeat loop once we finish this block
}
// Add texture to result, and update its cache tag
tex.cache_tag = cache_tag;
result.sections.push_back(&tex);
if (tex.is_flushable())
{
result.has_flushables = true;
}
}
}
// Iterate
It++;
// repeat_loop==true means some blocks are still dirty and we need to repeat the loop again
if (repeat_loop && It == m_storage.range_end())
{
It = m_storage.range_begin(invalidate_range, locked_range, true);
repeat_loop = false;
}
}
AUDIT(result.invalidate_range.is_page_range());
#ifdef TEXTURE_CACHE_DEBUG
// naive check that sections are not duplicated in the results
for (auto §ion1 : result.sections)
{
usz count = 0;
for (auto §ion2 : result.sections)
{
if (section1 == section2) count++;
}
ensure(count == 1);
}
#endif //TEXTURE_CACHE_DEBUG
return result;
}
//Invalidate range base implementation
template <typename ...Args>
thrashed_set invalidate_range_impl_base(
commandbuffer_type& cmd,
const address_range &fault_range_in,
invalidation_cause cause,
std::function<void()> on_data_transfer_completed = {},
Args&&... extras)
{
#ifdef TEXTURE_CACHE_DEBUG
// Check that the cache has the correct protections
tex_cache_checker.verify();
#endif // TEXTURE_CACHE_DEBUG
AUDIT(cause.valid());
AUDIT(fault_range_in.valid());
address_range fault_range = fault_range_in.to_page_range();
const intersecting_set trampled_set = get_intersecting_set(fault_range);
thrashed_set result = {};
result.cause = cause;
result.fault_range = fault_range;
result.invalidate_range = trampled_set.invalidate_range;
// Fast code-path for keeping the fault range protection when not flushing anything
if (cause.keep_fault_range_protection() && cause.skip_flush() && !trampled_set.sections.empty())
{
ensure(cause != invalidation_cause::committed_as_fbo);
// We discard all sections fully inside fault_range
for (auto &obj : trampled_set.sections)
{
auto &tex = *obj;
if (tex.overlaps(fault_range, section_bounds::locked_range))
{
if (cause == invalidation_cause::superseded_by_fbo &&
tex.is_flushable() &&
tex.get_section_base() != fault_range_in.start)
{
// HACK: When being superseded by an fbo, we preserve overlapped flushables unless the start addresses match
continue;
}
else if (tex.inside(fault_range, section_bounds::locked_range))
{
// Discard - this section won't be needed any more
tex.discard(/* set_dirty */ true);
result.invalidate_samplers = true;
result.num_discarded++;
}
else if (g_cfg.video.strict_texture_flushing && tex.is_flushable())
{
tex.add_flush_exclusion(fault_range);
}
else
{
tex.set_dirty(true);
result.invalidate_samplers = true;
}
if (tex.is_dirty() && tex.get_context() == rsx::texture_upload_context::framebuffer_storage)
{
// Make sure the region is not going to get immediately reprotected
m_flush_always_cache.erase(tex.get_section_range());
}
}
}
#ifdef TEXTURE_CACHE_DEBUG
// Notify the checker that fault_range got discarded
tex_cache_checker.discard(fault_range);
#endif
// If invalidate_range is fault_range, we can stop now
const address_range invalidate_range = trampled_set.invalidate_range;
if (invalidate_range == fault_range)
{
result.violation_handled = true;
result.invalidate_samplers = true;
#ifdef TEXTURE_CACHE_DEBUG
// Post-check the result
result.check_post_sanity();
#endif
return result;
}
AUDIT(fault_range.inside(invalidate_range));
}
// Decide which sections to flush, unprotect, and exclude
if (!trampled_set.sections.empty())
{
update_cache_tag();
for (auto &obj : trampled_set.sections)
{
auto &tex = *obj;
if (!tex.is_locked())
continue;
const rsx::section_bounds bounds = tex.get_overlap_test_bounds();
const bool overlaps_fault_range = tex.overlaps(fault_range, bounds);
if (
// RO sections during a read invalidation can be ignored (unless there are flushables in trampled_set, since those could overwrite RO data)
(invalidation_keep_ro_during_read && !trampled_set.has_flushables && cause.is_read() && !tex.is_flushable()) ||
// Sections that are not fully contained in invalidate_range can be ignored
!tex.inside(trampled_set.invalidate_range, bounds) ||
// Unsynchronized sections (or any flushable when skipping flushes) that do not overlap the fault range directly can also be ignored
(invalidation_ignore_unsynchronized && tex.is_flushable() && (cause.skip_flush() || !tex.is_synchronized()) && !overlaps_fault_range) ||
// HACK: When being superseded by an fbo, we preserve other overlapped flushables unless the start addresses match
// If region is committed as fbo, all non-flushable data is removed but all flushables in the region must be preserved if possible
(overlaps_fault_range && tex.is_flushable() && cause.skip_fbos() && tex.get_section_base() != fault_range_in.start)
)
{
// False positive
if (tex.is_locked(true))
{
// Do not exclude hashed pages from unprotect! They will cause protection holes
result.sections_to_exclude.push_back(&tex);
}
result.num_excluded++;
continue;
}
if (tex.is_flushable())
{
// Write if and only if no one else has trashed section memory already
// TODO: Proper section management should prevent this from happening
// TODO: Blit engine section merge support and/or partial texture memory buffering
if (tex.is_dirty())
{
// Contents clobbered, destroy this
if (!tex.is_dirty())
{
tex.set_dirty(true);
}
result.sections_to_unprotect.push_back(&tex);
}
else
{
result.sections_to_flush.push_back(&tex);
}
continue;
}
else
{
// deferred_flush = true and not synchronized
if (!tex.is_dirty())
{
AUDIT(tex.get_memory_read_flags() != memory_read_flags::flush_always);
tex.set_dirty(true);
}
if (tex.is_locked(true))
{
result.sections_to_unprotect.push_back(&tex);
}
else
{
// No need to waste resources on hashed section, just discard immediately
tex.discard(true);
result.invalidate_samplers = true;
result.num_discarded++;
}
continue;
}
fmt::throw_exception("Unreachable");
}
result.violation_handled = true;
#ifdef TEXTURE_CACHE_DEBUG
// Check that result makes sense
result.check_pre_sanity();
#endif // TEXTURE_CACHE_DEBUG
const bool has_flushables = !result.sections_to_flush.empty();
const bool has_unprotectables = !result.sections_to_unprotect.empty();
if (cause.deferred_flush() && has_flushables)
{
// There is something to flush, but we've been asked to defer it
result.num_flushable = static_cast<int>(result.sections_to_flush.size());
result.cache_tag = m_cache_update_tag.load();
return result;
}
else if (has_flushables || has_unprotectables)
{
AUDIT(!has_flushables || !cause.deferred_flush());
// We have something to flush and are allowed to flush now
// or there is nothing to flush but we have something to unprotect
if (has_flushables && !cause.skip_flush())
{
flush_set(cmd, result, on_data_transfer_completed, std::forward<Args>(extras)...);
}
unprotect_set(result);
// Everything has been handled
result.clear_sections();
}
else
{
// This is a read and all overlapping sections were RO and were excluded (except for cause == superseded_by_fbo)
// Can also happen when we have hash strat in use, since we "unlock" sections by just discarding
AUDIT(cause.skip_fbos() || (cause.is_read() && result.num_excluded > 0) || result.num_discarded > 0);
// We did not handle this violation
result.clear_sections();
result.violation_handled = false;
}
result.invalidate_samplers |= result.violation_handled;
#ifdef TEXTURE_CACHE_DEBUG
// Post-check the result
result.check_post_sanity();
#endif // TEXTURE_CACHE_DEBUG
return result;
}
return {};
}
public:
texture_cache() : m_storage(this), m_predictor(this) {}
~texture_cache() = default;
void clear()
{
// Release objects used for frame data
on_frame_end();
// Nuke the permanent storage pool
m_storage.clear();
m_predictor.clear();
}
virtual void on_frame_end()
{
// Must manually release each cached entry
for (auto& entry : m_temporary_subresource_cache)
{
release_temporary_subresource(entry.second.second);
}
m_temporary_subresource_cache.clear();
m_predictor.on_frame_end();
reset_frame_statistics();
}
template <bool check_unlocked = false>
std::vector<section_storage_type*> find_texture_from_range(const address_range &test_range, u32 required_pitch = 0, u32 context_mask = 0xFF)
{
std::vector<section_storage_type*> results;
for (auto It = m_storage.range_begin(test_range, full_range, check_unlocked); It != m_storage.range_end(); It++)
{
auto &tex = *It;
if (!tex.is_dirty() && (context_mask & static_cast<u32>(tex.get_context())))
{
if (required_pitch && !rsx::pitch_compatible<false>(&tex, required_pitch, -1))
{
continue;
}
if (!tex.sync_protection())
{
continue;
}
results.push_back(&tex);
}
}
return results;
}
template <bool check_unlocked = false>
section_storage_type *find_texture_from_dimensions(u32 rsx_address, u32 format, u16 width = 0, u16 height = 0, u16 depth = 0, u16 mipmaps = 0)
{
auto &block = m_storage.block_for(rsx_address);
for (auto &tex : block)
{
if constexpr (check_unlocked)
{
if (!tex.is_locked())
{
continue;
}
}
if (!tex.is_dirty() &&
tex.matches(rsx_address, format, width, height, depth, mipmaps) &&
tex.sync_protection())
{
return &tex;
}
}
return nullptr;
}
section_storage_type* find_cached_texture(const address_range &range, const image_section_attributes_t& attr, bool create_if_not_found, bool confirm_dimensions, bool allow_dirty)
{
auto &block = m_storage.block_for(range);
section_storage_type *dimensions_mismatch = nullptr;
section_storage_type *best_fit = nullptr;
section_storage_type *reuse = nullptr;
#ifdef TEXTURE_CACHE_DEBUG
section_storage_type *res = nullptr;
#endif
// Try to find match in block
for (auto &tex : block)
{
if (tex.matches(range))
{
// We must validate
tex.sync_protection();
if (allow_dirty || !tex.is_dirty())
{
if (!confirm_dimensions || tex.matches(attr.gcm_format, attr.width, attr.height, attr.depth, attr.mipmaps))
{
#ifndef TEXTURE_CACHE_DEBUG
return &tex;
#else
ensure(res == nullptr);
res = &tex;
#endif
}
else if (dimensions_mismatch == nullptr)
{
dimensions_mismatch = &tex;
}
}
else if (best_fit == nullptr && tex.can_be_reused())
{
// By grabbing a ref to a matching entry, duplicates are avoided
best_fit = &tex;
}
}
else if (reuse == nullptr && tex.can_be_reused())
{
reuse = &tex;
}
}
#ifdef TEXTURE_CACHE_DEBUG
if (res != nullptr)
return res;
#endif
if (dimensions_mismatch != nullptr)
{
auto &tex = *dimensions_mismatch;
rsx_log.warning("Cached object for address 0x%X was found, but it does not match stored parameters (width=%d vs %d; height=%d vs %d; depth=%d vs %d; mipmaps=%d vs %d)",
range.start, attr.width, tex.get_width(), attr.height, tex.get_height(), attr.depth, tex.get_depth(), attr.mipmaps, tex.get_mipmaps());
}
if (!create_if_not_found)
return nullptr;
// If found, use the best fitting section
if (best_fit != nullptr)
{
if (best_fit->exists())
{
best_fit->destroy();
}
return best_fit;
}
// Return the first dirty section found, if any
if (reuse != nullptr)
{
if (reuse->exists())
{
reuse->destroy();
}
return reuse;
}
// Create and return a new section
update_cache_tag();
auto tex = &block.create_section();
return tex;
}
section_storage_type* find_flushable_section(const address_range &memory_range)
{
auto &block = m_storage.block_for(memory_range);
for (auto &tex : block)
{
if (tex.is_dirty()) continue;
if (!tex.is_flushable() && !tex.is_flushed()) continue;
if (tex.matches(memory_range))
return &tex;
}
return nullptr;
}
template <typename ...FlushArgs, typename ...Args>
void lock_memory_region(commandbuffer_type& cmd, image_storage_type* image, const address_range &rsx_range, bool is_active_surface, u16 width, u16 height, u32 pitch, Args&&... extras)
{
AUDIT(g_cfg.video.write_color_buffers || g_cfg.video.write_depth_buffer); // this method is only called when either WCB or WDB are enabled
std::lock_guard lock(m_cache_mutex);
// Find a cached section to use
image_section_attributes_t search_desc = { .gcm_format = RSX_GCM_FORMAT_IGNORED, .width = width, .height = height };
section_storage_type& region = *find_cached_texture(rsx_range, search_desc, true, true, false);
// Prepare and initialize fbo region
if (region.exists() && region.get_context() != texture_upload_context::framebuffer_storage)
{
//This space was being used for other purposes other than framebuffer storage
//Delete used resources before attaching it to framebuffer memory
read_only_tex_invalidate = true;
}
if (!region.is_locked() || region.get_context() != texture_upload_context::framebuffer_storage)
{
// Invalidate sections from surface cache occupying same address range
invalidate_range_impl_base(cmd, rsx_range, invalidation_cause::superseded_by_fbo);
}
if (!region.is_locked() || region.can_be_reused())
{
// New region, we must prepare it
region.reset(rsx_range);
no_access_range = region.get_min_max(no_access_range, rsx::section_bounds::locked_range);
region.set_context(texture_upload_context::framebuffer_storage);
region.set_image_type(rsx::texture_dimension_extended::texture_dimension_2d);
}
else
{
// Re-using clean fbo region
ensure(region.matches(rsx_range));
ensure(region.get_context() == texture_upload_context::framebuffer_storage);
ensure(region.get_image_type() == rsx::texture_dimension_extended::texture_dimension_2d);
}
region.create(width, height, 1, 1, image, pitch, false, std::forward<Args>(extras)...);
region.reprotect(utils::protection::no, { 0, rsx_range.length() });
region.set_dirty(false);
region.touch(m_cache_update_tag);
if (is_active_surface)
{
// Add to flush always cache
if (region.get_memory_read_flags() != memory_read_flags::flush_always)
{
region.set_memory_read_flags(memory_read_flags::flush_always, false);
update_flush_always_cache(region, true);
}
else
{
AUDIT(m_flush_always_cache.find(region.get_section_range()) != m_flush_always_cache.end());
}
}
update_cache_tag();
#ifdef TEXTURE_CACHE_DEBUG
// Check that the cache makes sense
tex_cache_checker.verify();
#endif // TEXTURE_CACHE_DEBUG
}
template <typename ...Args>
void commit_framebuffer_memory_region(commandbuffer_type& cmd, const address_range &rsx_range, Args&&... extras)
{
AUDIT(!g_cfg.video.write_color_buffers || !g_cfg.video.write_depth_buffer);
if (!region_intersects_cache(rsx_range, true))
return;
std::lock_guard lock(m_cache_mutex);
invalidate_range_impl_base(cmd, rsx_range, invalidation_cause::committed_as_fbo, {}, std::forward<Args>(extras)...);
}
template <typename ...Args>
void discard_framebuffer_memory_region(commandbuffer_type& /*cmd*/, const address_range& rsx_range, Args&&... /*extras*/)
{
if (g_cfg.video.write_color_buffers || g_cfg.video.write_depth_buffer)
{
auto* region_ptr = find_cached_texture(rsx_range, { .gcm_format = RSX_GCM_FORMAT_IGNORED }, false, false, false);
if (region_ptr && region_ptr->is_locked() && region_ptr->get_context() == texture_upload_context::framebuffer_storage)
{
ensure(region_ptr->get_protection() == utils::protection::no);
region_ptr->discard(false);
}
}
}
void set_memory_read_flags(const address_range &memory_range, memory_read_flags flags)
{
std::lock_guard lock(m_cache_mutex);
auto* region_ptr = find_cached_texture(memory_range, { .gcm_format = RSX_GCM_FORMAT_IGNORED }, false, false, true);
if (region_ptr == nullptr)
{
AUDIT(m_flush_always_cache.find(memory_range) == m_flush_always_cache.end());
rsx_log.error("set_memory_flags(0x%x, 0x%x, %d): region_ptr == nullptr", memory_range.start, memory_range.end, static_cast<u32>(flags));
return;
}
if (region_ptr->is_dirty())
{
// Previously invalidated
return;
}
auto& region = *region_ptr;
if (!region.exists() || region.is_dirty() || region.get_context() != texture_upload_context::framebuffer_storage)
{
#ifdef TEXTURE_CACHE_DEBUG
if (!region.is_dirty())
{
if (flags == memory_read_flags::flush_once)
ensure(m_flush_always_cache.find(memory_range) == m_flush_always_cache.end());
else
ensure(m_flush_always_cache[memory_range] == ®ion);
}
#endif // TEXTURE_CACHE_DEBUG
return;
}
update_flush_always_cache(region, flags == memory_read_flags::flush_always);
region.set_memory_read_flags(flags, false);
}
virtual void on_memory_read_flags_changed(section_storage_type §ion, rsx::memory_read_flags flags)
{
#ifdef TEXTURE_CACHE_DEBUG
const auto &memory_range = section.get_section_range();
if (flags == memory_read_flags::flush_once)
ensure(m_flush_always_cache[memory_range] == §ion);
else
ensure(m_flush_always_cache.find(memory_range) == m_flush_always_cache.end());
#endif
update_flush_always_cache(section, flags == memory_read_flags::flush_always);
}
private:
inline void update_flush_always_cache(section_storage_type §ion, bool add)
{
const address_range& range = section.get_section_range();
if (add)
{
// Add to m_flush_always_cache
AUDIT(m_flush_always_cache.find(range) == m_flush_always_cache.end());
m_flush_always_cache[range] = §ion;
}
else
{
// Remove from m_flush_always_cache
AUDIT(m_flush_always_cache[range] == §ion);
m_flush_always_cache.erase(range);
}
}
public:
template <typename ...Args>
thrashed_set invalidate_address(
commandbuffer_type& cmd,
u32 address,
invalidation_cause cause,
std::function<void()> on_data_transfer_completed = {},
Args&&... extras)
{
// Test before trying to acquire the lock
const auto range = page_for(address);
if (!region_intersects_cache(range, !cause.is_read()))
return{};
std::lock_guard lock(m_cache_mutex);
return invalidate_range_impl_base(cmd, range, cause, on_data_transfer_completed, std::forward<Args>(extras)...);
}
template <typename ...Args>
thrashed_set invalidate_range(
commandbuffer_type& cmd,
const address_range &range,
invalidation_cause cause,
std::function<void()> on_data_transfer_completed = {},
Args&&... extras)
{
// Test before trying to acquire the lock
if (!region_intersects_cache(range, !cause.is_read()))
return {};
std::lock_guard lock(m_cache_mutex);
return invalidate_range_impl_base(cmd, range, cause, on_data_transfer_completed, std::forward<Args>(extras)...);
}
template <typename ...Args>
bool flush_all(commandbuffer_type& cmd, thrashed_set& data, std::function<void()> on_data_transfer_completed = {}, Args&&... extras)
{
std::lock_guard lock(m_cache_mutex);
AUDIT(data.cause.deferred_flush());
AUDIT(!data.flushed);
if (m_cache_update_tag.load() == data.cache_tag)
{
//1. Write memory to cpu side
flush_set(cmd, data, on_data_transfer_completed, std::forward<Args>(extras)...);
//2. Release all obsolete sections
unprotect_set(data);
}
else
{
// The cache contents have changed between the two readings. This means the data held is useless
invalidate_range_impl_base(cmd, data.fault_range, data.cause.undefer(), on_data_transfer_completed, std::forward<Args>(extras)...);
}
return true;
}
template <typename ...Args>
bool flush_if_cache_miss_likely(commandbuffer_type& cmd, const address_range &range, Args&&... extras)
{
u32 cur_flushes_this_frame = (m_flushes_this_frame + m_speculations_this_frame);
if (cur_flushes_this_frame > m_predict_max_flushes_per_frame)
return false;
auto& block = m_storage.block_for(range);
if (block.empty())
return false;
reader_lock lock(m_cache_mutex);
// Try to find matching regions
bool result = false;
for (auto ®ion : block)
{
if (region.is_dirty() || region.is_synchronized() || !region.is_flushable())
continue;
if (!region.matches(range))
continue;
if (!region.tracked_by_predictor())
continue;
if (!m_predictor.predict(region))
continue;
lock.upgrade();
region.copy_texture(cmd, false, std::forward<Args>(extras)...);
result = true;
cur_flushes_this_frame++;
if (cur_flushes_this_frame > m_predict_max_flushes_per_frame)
return result;
}
return result;
}
void purge_unreleased_sections()
{
std::lock_guard lock(m_cache_mutex);
m_storage.purge_unreleased_sections();
}
virtual bool handle_memory_pressure(problem_severity severity)
{
if (m_storage.m_unreleased_texture_objects)
{
m_storage.purge_unreleased_sections();
return true;
}
if (severity >= problem_severity::severe)
{
// Things are bad, previous check should have released 'unreleased' pool
return m_storage.purge_unlocked_sections();
}
return false;
}
void trim_sections()
{
std::lock_guard lock(m_cache_mutex);
m_storage.trim_sections();
}
bool evict_unused(const std::set<u32>& exclusion_list)
{
// Some sanity checks. Do not evict if the cache is currently in use.
ensure(rsx::get_current_renderer()->is_current_thread());
std::unique_lock lock(m_cache_mutex, std::defer_lock);
if (!lock.try_lock())
{
rsx_log.warning("Unable to evict the texture cache because we're faulting from within in the texture cache!");
return false;
}
rsx_log.warning("[PERFORMANCE WARNING] Texture cache is running eviction routine. This will affect performance.");
thrashed_set evicted_set;
const u32 type_to_evict = rsx::texture_upload_context::shader_read | rsx::texture_upload_context::blit_engine_src;
for (auto It = m_storage.begin(); It != m_storage.end(); ++It)
{
auto& block = *It;
if (block.empty())
{
continue;
}
for (auto& region : block)
{
if (region.is_dirty() || !(region.get_context() & type_to_evict))
{
continue;
}
ensure(region.is_locked());
const u32 this_address = region.get_section_base();
if (exclusion_list.contains(this_address))
{
continue;
}
evicted_set.violation_handled = true;
region.set_dirty(true);
if (region.is_locked(true))
{
evicted_set.sections_to_unprotect.push_back(®ion);
}
else
{
region.discard(true);
}
}
}
unprotect_set(evicted_set);
return evicted_set.violation_handled;
}
image_view_type create_temporary_subresource(commandbuffer_type &cmd, deferred_subresource& desc)
{
if (!desc.do_not_cache) [[likely]]
{
const auto found = m_temporary_subresource_cache.equal_range(desc.address);
for (auto It = found.first; It != found.second; ++It)
{
const auto& found_desc = It->second.first;
if (found_desc.external_handle != desc.external_handle ||
found_desc.op != desc.op ||
found_desc.x != desc.x || found_desc.y != desc.y ||
found_desc.width != desc.width || found_desc.height != desc.height)
continue;
if (desc.op == deferred_request_command::copy_image_dynamic)
update_image_contents(cmd, It->second.second, desc.external_handle, desc.width, desc.height);
return It->second.second;
}
}
std::lock_guard lock(m_cache_mutex);
image_view_type result = 0;
switch (desc.op)
{
case deferred_request_command::cubemap_gather:
{
result = generate_cubemap_from_images(cmd, desc.gcm_format, desc.width, desc.sections_to_copy, desc.remap);
break;
}
case deferred_request_command::cubemap_unwrap:
{
std::vector<copy_region_descriptor> sections(6);
for (u16 n = 0; n < 6; ++n)
{
sections[n] =
{
.src = desc.external_handle,
.xform = surface_transform::coordinate_transform,
.level = 0,
.src_x = 0,
.src_y = static_cast<u16>(desc.slice_h * n),
.dst_x = 0,
.dst_y = 0,
.dst_z = n,
.src_w = desc.width,
.src_h = desc.height,
.dst_w = desc.width,
.dst_h = desc.height
};
}
result = generate_cubemap_from_images(cmd, desc.gcm_format, desc.width, sections, desc.remap);
break;
}
case deferred_request_command::_3d_gather:
{
result = generate_3d_from_2d_images(cmd, desc.gcm_format, desc.width, desc.height, desc.depth, desc.sections_to_copy, desc.remap);
break;
}
case deferred_request_command::_3d_unwrap:
{
std::vector<copy_region_descriptor> sections;
sections.resize(desc.depth);
for (u16 n = 0; n < desc.depth; ++n)
{
sections[n] =
{
.src = desc.external_handle,
.xform = surface_transform::coordinate_transform,
.level = 0,
.src_x = 0,
.src_y = static_cast<u16>(desc.slice_h * n),
.dst_x = 0,
.dst_y = 0,
.dst_z = n,
.src_w = desc.width,
.src_h = desc.height,
.dst_w = desc.width,
.dst_h = desc.height
};
}
result = generate_3d_from_2d_images(cmd, desc.gcm_format, desc.width, desc.height, desc.depth, sections, desc.remap);
break;
}
case deferred_request_command::atlas_gather:
case deferred_request_command::blit_image_static:
{
result = generate_atlas_from_images(cmd, desc.gcm_format, desc.width, desc.height, desc.sections_to_copy, desc.remap);
break;
}
case deferred_request_command::copy_image_static:
case deferred_request_command::copy_image_dynamic:
{
result = create_temporary_subresource_view(cmd, &desc.external_handle, desc.gcm_format, desc.x, desc.y, desc.width, desc.height, desc.remap);
break;
}
case deferred_request_command::mipmap_gather:
{
result = generate_2d_mipmaps_from_images(cmd, desc.gcm_format, desc.width, desc.height, desc.sections_to_copy, desc.remap);
break;
}
default:
{
//Throw
fmt::throw_exception("Invalid deferred command op 0x%X", static_cast<u32>(desc.op));
}
}
if (result) [[likely]]
{
if (!desc.do_not_cache) [[likely]]
{
m_temporary_subresource_cache.insert({ desc.address,{ desc, result } });
}
else
{
m_uncached_subresources.push_back(result);
}
}
return result;
}
void release_uncached_temporary_subresources()
{
for (auto& view : m_uncached_subresources)
{
release_temporary_subresource(view);
}
m_uncached_subresources.clear();
}
void notify_surface_changed(const utils::address_range& range)
{
for (auto It = m_temporary_subresource_cache.begin(); It != m_temporary_subresource_cache.end();)
{
const auto& desc = It->second.first;
if (range.overlaps(desc.cache_range))
{
release_temporary_subresource(It->second.second);
It = m_temporary_subresource_cache.erase(It);
}
else
{
++It;
}
}
}
template <typename SurfaceStoreType, typename... Args>
sampled_image_descriptor fast_texture_search(
commandbuffer_type& cmd,
const image_section_attributes_t& attr,
const size3f& scale,
const texture_channel_remap_t& remap,
const texture_cache_search_options& options,
const utils::address_range& memory_range,
rsx::texture_dimension_extended extended_dimension,
SurfaceStoreType& m_rtts, Args&&... /*extras*/)
{
if (options.is_compressed_format) [[likely]]
{
// Most mesh textures are stored as compressed to make the most of the limited memory
if (auto cached_texture = find_texture_from_dimensions(attr.address, attr.gcm_format, attr.width, attr.height, attr.depth))
{
return{ cached_texture->get_view(remap), cached_texture->get_context(), cached_texture->get_format_class(), scale, cached_texture->get_image_type() };
}
}
else
{
// Fast lookup for cyclic reference
if (m_rtts.address_is_bound(attr.address)) [[unlikely]]
{
if (auto texptr = m_rtts.get_surface_at(attr.address);
helpers::check_framebuffer_resource(texptr, attr, extended_dimension))
{
const bool force_convert = !render_target_format_is_compatible(texptr, attr.gcm_format);
auto result = helpers::process_framebuffer_resource_fast<sampled_image_descriptor>(
cmd, texptr, attr, scale, extended_dimension, remap, true, force_convert);
if (!options.skip_texture_barriers && result.is_cyclic_reference)
{
// A texture barrier is only necessary when the rendertarget is going to be bound as a shader input.
// If a temporary copy is to be made, this should not be invoked
insert_texture_barrier(cmd, texptr);
}
return result;
}
}
std::vector<typename SurfaceStoreType::surface_overlap_info> overlapping_fbos;
std::vector<section_storage_type*> overlapping_locals;
auto fast_fbo_check = [&]() -> sampled_image_descriptor
{
const auto& last = overlapping_fbos.back();
if (last.src_area.x == 0 && last.src_area.y == 0 && !last.is_clipped)
{
const bool force_convert = !render_target_format_is_compatible(last.surface, attr.gcm_format);
return helpers::process_framebuffer_resource_fast<sampled_image_descriptor>(
cmd, last.surface, attr, scale, extended_dimension, remap, false, force_convert);
}
return {};
};
// Check surface cache early if the option is enabled
if (options.prefer_surface_cache)
{
const u16 block_h = (attr.depth * attr.slice_h);
overlapping_fbos = m_rtts.get_merged_texture_memory_region(cmd, attr.address, attr.width, block_h, attr.pitch, attr.bpp, rsx::surface_access::shader_read);
if (!overlapping_fbos.empty())
{
if (auto result = fast_fbo_check(); result.validate())
{
return result;
}
if (options.skip_texture_merge)
{
overlapping_fbos.clear();
}
}
}
// Check shader_read storage. In a given scene, reads from local memory far outnumber reads from the surface cache
const u32 lookup_mask = rsx::texture_upload_context::shader_read | rsx::texture_upload_context::blit_engine_dst | rsx::texture_upload_context::blit_engine_src;
overlapping_locals = find_texture_from_range<true>(memory_range, attr.height > 1 ? attr.pitch : 0, lookup_mask & options.lookup_mask);
// Search for exact match if possible
for (auto& cached_texture : overlapping_locals)
{
if (cached_texture->matches(attr.address, attr.gcm_format, attr.width, attr.height, attr.depth, 0))
{
#ifdef TEXTURE_CACHE_DEBUG
if (!memory_range.inside(cached_texture->get_confirmed_range()))
{
// TODO. This is easily possible for blit_dst textures if the blit is incomplete in Y
// The possibility that a texture will be split into parts on the CPU like this is very rare
continue;
}
#endif
if (attr.swizzled != cached_texture->is_swizzled())
{
// We can have the correct data in cached_texture but it needs decoding before it can be sampled.
// Usually a sign of a game bug where the developer forgot to mark the texture correctly the first time we see it.
// TODO: This section should execute under an exclusive lock, but we're not actually modifying any object references, only flags
rsx_log.warning("A texture was found in cache for address 0x%x, but swizzle flag does not match", attr.address);
cached_texture->unprotect();
cached_texture->set_dirty(true);
break;
}
return{ cached_texture->get_view(remap), cached_texture->get_context(), cached_texture->get_format_class(), scale, cached_texture->get_image_type() };
}
}
if (!overlapping_locals.empty())
{
// Remove everything that is not a transfer target
overlapping_locals.erase
(
std::remove_if(overlapping_locals.begin(), overlapping_locals.end(), [](const auto& e)
{
return e->is_dirty() || (e->get_context() != rsx::texture_upload_context::blit_engine_dst);
}),
overlapping_locals.end()
);
}
if (!options.prefer_surface_cache)
{
// Now check for surface cache hits
const u16 block_h = (attr.depth * attr.slice_h);
overlapping_fbos = m_rtts.get_merged_texture_memory_region(cmd, attr.address, attr.width, block_h, attr.pitch, attr.bpp, rsx::surface_access::shader_read);
}
if (!overlapping_fbos.empty() || !overlapping_locals.empty())
{
int _pool = -1;
if (overlapping_locals.empty()) [[likely]]
{
_pool = 0;
}
else if (overlapping_fbos.empty())
{
_pool = 1;
}
else
{
_pool = (overlapping_locals.back()->last_write_tag < overlapping_fbos.back().surface->last_use_tag) ? 0 : 1;
}
if (_pool == 0)
{
// Surface cache data is newer, check if this thing fits our search parameters
if (!options.prefer_surface_cache)
{
if (auto result = fast_fbo_check(); result.validate())
{
return result;
}
}
}
else if (extended_dimension <= rsx::texture_dimension_extended::texture_dimension_2d)
{
const auto last = overlapping_locals.back();
const auto normalized_width = u16(last->get_width() * get_format_block_size_in_bytes(last->get_gcm_format())) / attr.bpp;
if (last->get_section_base() == attr.address &&
normalized_width >= attr.width && last->get_height() >= attr.height)
{
u32 gcm_format = attr.gcm_format;
const bool gcm_format_is_depth = helpers::is_gcm_depth_format(attr.gcm_format);
if (!gcm_format_is_depth && last->is_depth_texture())
{
// While the copy routines can perform a typeless cast, prefer to not cross the aspect barrier if possible
gcm_format = helpers::get_compatible_depth_format(attr.gcm_format);
}
auto new_attr = attr;
new_attr.gcm_format = gcm_format;
if (last->get_gcm_format() == attr.gcm_format && attr.edge_clamped)
{
// Clipped view
auto viewed_image = last->get_raw_texture();
sampled_image_descriptor result = { viewed_image->get_view(remap), last->get_context(),
viewed_image->format_class(), scale, extended_dimension, false, viewed_image->samples() };
helpers::calculate_sample_clip_parameters(result, position2i(0, 0), size2i(attr.width, attr.height), size2i(normalized_width, last->get_height()));
return result;
}
return { last->get_raw_texture(), deferred_request_command::copy_image_static, new_attr, {},
last->get_context(), classify_format(gcm_format), scale, extended_dimension, remap };
}
}
auto result = helpers::merge_cache_resources<sampled_image_descriptor>(
cmd, overlapping_fbos, overlapping_locals, attr, scale, extended_dimension, remap, _pool);
const bool is_simple_subresource_copy =
(result.external_subresource_desc.op == deferred_request_command::copy_image_static) ||
(result.external_subresource_desc.op == deferred_request_command::copy_image_dynamic) ||
(result.external_subresource_desc.op == deferred_request_command::blit_image_static);
if (attr.edge_clamped &&
!g_cfg.video.strict_rendering_mode &&
is_simple_subresource_copy &&
render_target_format_is_compatible(result.external_subresource_desc.src0(), attr.gcm_format))
{
if (result.external_subresource_desc.op != deferred_request_command::blit_image_static) [[ likely ]]
{
helpers::convert_image_copy_to_clip_descriptor(
result,
position2i(result.external_subresource_desc.x, result.external_subresource_desc.y),
size2i(result.external_subresource_desc.width, result.external_subresource_desc.height),
size2i(result.external_subresource_desc.external_handle->width(), result.external_subresource_desc.external_handle->height()),
remap, false);
}
else
{
helpers::convert_image_blit_to_clip_descriptor(
result,
remap,
false);
}
if (!!result.ref_address && m_rtts.address_is_bound(result.ref_address))
{
result.is_cyclic_reference = true;
auto texptr = ensure(m_rtts.get_surface_at(result.ref_address));
insert_texture_barrier(cmd, texptr);
}
return result;
}
if (options.skip_texture_merge)
{
if (is_simple_subresource_copy)
{
return result;
}
return {};
}
if (const auto section_count = result.external_subresource_desc.sections_to_copy.size();
section_count > 0)
{
bool result_is_valid;
if (_pool == 0 && !g_cfg.video.write_color_buffers && !g_cfg.video.write_depth_buffer)
{
// HACK: Avoid WCB requirement for some games with wrongly declared sampler dimensions.
// TODO: Some games may render a small region (e.g 1024x256x2) and sample a huge texture (e.g 1024x1024).
// Seen in APF2k8 - this causes missing bits to be reuploaded from CPU which can cause WCB requirement.
// Properly fix this by introducing partial data upload into the surface cache in such cases and making RCB/RDB
// enabled by default. Blit engine already handles this correctly.
result_is_valid = true;
}
else
{
result_is_valid = result.atlas_covers_target_area(section_count == 1 ? 99 : 90);
}
if (result_is_valid)
{
// Check for possible duplicates
usz max_overdraw_ratio = u32{ umax };
usz max_safe_sections = u32{ umax };
switch (result.external_subresource_desc.op)
{
case deferred_request_command::atlas_gather:
max_overdraw_ratio = 150;
max_safe_sections = 8 + 2 * attr.mipmaps;
break;
case deferred_request_command::cubemap_gather:
max_overdraw_ratio = 150;
max_safe_sections = 6 * 2 * attr.mipmaps;
break;
case deferred_request_command::_3d_gather:
// 3D gather can have very many input sections, try to keep section count low
max_overdraw_ratio = 125;
max_safe_sections = (attr.depth * attr.mipmaps * 110) / 100;
break;
default:
break;
}
if (overlapping_fbos.size() > max_safe_sections)
{
// Are we really over-budget?
u32 coverage_size = 0;
for (const auto& section : overlapping_fbos)
{
const auto area = section.surface->get_native_pitch() * section.surface->template get_surface_height<rsx::surface_metrics::bytes>();
coverage_size += area;
}
if (const auto coverage_ratio = (coverage_size * 100ull) / memory_range.length();
coverage_ratio > max_overdraw_ratio)
{
rsx_log.warning("[Performance warning] Texture gather routine encountered too many objects! Operation=%d, Mipmaps=%d, Depth=%d, Sections=%zu, Ratio=%llu%",
static_cast<int>(result.external_subresource_desc.op), attr.mipmaps, attr.depth, overlapping_fbos.size(), coverage_ratio);
m_rtts.check_for_duplicates(overlapping_fbos);
}
}
// Optionally disallow caching if resource is being written to as it is being read from
for (const auto& section : overlapping_fbos)
{
if (m_rtts.address_is_bound(section.base_address))
{
if (result.external_subresource_desc.op == deferred_request_command::copy_image_static)
{
result.external_subresource_desc.op = deferred_request_command::copy_image_dynamic;
}
else
{
result.external_subresource_desc.do_not_cache = true;
}
break;
}
}
return result;
}
}
}
}
return {};
}
template <typename surface_store_type, typename RsxTextureType>
bool test_if_descriptor_expired(commandbuffer_type& cmd, surface_store_type& surface_cache, sampled_image_descriptor* descriptor, const RsxTextureType& tex)
{
auto result = descriptor->is_expired(surface_cache);
if (result.second && descriptor->is_cyclic_reference)
{
/* NOTE: All cyclic descriptors updated via fast update must have a barrier check
* It is possible for the following sequence of events to break common-sense tests
* 1. Cyclic ref occurs normally in upload_texture
* 2. Surface is swappd out, but texture is not updated
* 3. Surface is swapped back in. Surface cache resets layout to optimal rasterization layout
* 4. During bind, the surface is converted to shader layout because it is not in GENERAL layout
*/
if (!texture_cache_helpers::force_strict_fbo_sampling(result.second->samples()))
{
insert_texture_barrier(cmd, result.second, false);
}
else if (descriptor->image_handle)
{
// Rebuild duplicate surface
auto src = descriptor->image_handle->image();
rsx::image_section_attributes_t attr;
attr.address = descriptor->ref_address;
attr.gcm_format = tex.format() & ~(CELL_GCM_TEXTURE_LN | CELL_GCM_TEXTURE_UN);
attr.width = src->width();
attr.height = src->height();
attr.depth = 1;
attr.mipmaps = 1;
attr.pitch = 0; // Unused
attr.slice_h = src->height();
attr.bpp = get_format_block_size_in_bytes(attr.gcm_format);
attr.swizzled = false;
// Sanity checks
const bool gcm_format_is_depth = helpers::is_gcm_depth_format(attr.gcm_format);
const bool bound_surface_is_depth = surface_cache.m_bound_depth_stencil.first == attr.address;
if (!gcm_format_is_depth && bound_surface_is_depth)
{
// While the copy routines can perform a typeless cast, prefer to not cross the aspect barrier if possible
// This avoids messing with other solutions such as texture redirection as well
attr.gcm_format = helpers::get_compatible_depth_format(attr.gcm_format);
}
descriptor->external_subresource_desc =
{
src,
rsx::deferred_request_command::copy_image_dynamic,
attr,
{},
rsx::default_remap_vector
};
descriptor->external_subresource_desc.do_not_cache = true;
descriptor->image_handle = nullptr;
}
else
{
// Force reupload
return true;
}
}
return result.first;
}
template <typename RsxTextureType, typename surface_store_type, typename ...Args>
sampled_image_descriptor upload_texture(commandbuffer_type& cmd, const RsxTextureType& tex, surface_store_type& m_rtts, Args&&... extras)
{
m_texture_upload_calls_this_frame++;
image_section_attributes_t attributes{};
texture_cache_search_options options{};
attributes.address = rsx::get_address(tex.offset(), tex.location());
attributes.gcm_format = tex.format() & ~(CELL_GCM_TEXTURE_LN | CELL_GCM_TEXTURE_UN);
attributes.bpp = get_format_block_size_in_bytes(attributes.gcm_format);
attributes.width = tex.width();
attributes.height = tex.height();
attributes.mipmaps = tex.get_exact_mipmap_count();
attributes.swizzled = !(tex.format() & CELL_GCM_TEXTURE_LN);
const bool is_unnormalized = !!(tex.format() & CELL_GCM_TEXTURE_UN);
auto extended_dimension = tex.get_extended_texture_dimension();
options.is_compressed_format = helpers::is_compressed_gcm_format(attributes.gcm_format);
u32 tex_size = 0, required_surface_height = 1;
u8 subsurface_count = 1;
size3f scale{ 1.f, 1.f, 1.f };
if (is_unnormalized)
{
switch (extended_dimension)
{
case rsx::texture_dimension_extended::texture_dimension_3d:
case rsx::texture_dimension_extended::texture_dimension_cubemap:
scale.depth /= attributes.depth;
[[ fallthrough ]];
case rsx::texture_dimension_extended::texture_dimension_2d:
scale.height /= attributes.height;
[[ fallthrough ]];
default:
scale.width /= attributes.width;
break;
}
}
const auto packed_pitch = get_format_packed_pitch(attributes.gcm_format, attributes.width, !tex.border_type(), attributes.swizzled);
if (!attributes.swizzled) [[likely]]
{
if (attributes.pitch = tex.pitch(); !attributes.pitch)
{
attributes.pitch = packed_pitch;
scale = { 1.f, 0.f, 0.f };
}
else if (packed_pitch > attributes.pitch && !options.is_compressed_format)
{
scale.width *= f32(packed_pitch) / attributes.pitch;
attributes.width = attributes.pitch / attributes.bpp;
}
}
else
{
attributes.pitch = packed_pitch;
}
switch (extended_dimension)
{
case rsx::texture_dimension_extended::texture_dimension_1d:
attributes.depth = 1;
attributes.height = 1;
attributes.slice_h = 1;
attributes.edge_clamped = (tex.wrap_s() == rsx::texture_wrap_mode::clamp_to_edge);
scale.height = scale.depth = 0.f;
subsurface_count = 1;
required_surface_height = 1;
break;
case rsx::texture_dimension_extended::texture_dimension_2d:
attributes.depth = 1;
attributes.edge_clamped = (tex.wrap_s() == rsx::texture_wrap_mode::clamp_to_edge && tex.wrap_t() == rsx::texture_wrap_mode::clamp_to_edge);
scale.depth = 0.f;
subsurface_count = options.is_compressed_format? 1 : tex.get_exact_mipmap_count();
attributes.slice_h = required_surface_height = attributes.height;
break;
case rsx::texture_dimension_extended::texture_dimension_cubemap:
attributes.depth = 6;
subsurface_count = 1;
tex_size = static_cast<u32>(get_texture_size(tex));
required_surface_height = tex_size / attributes.pitch;
attributes.slice_h = required_surface_height / attributes.depth;
break;
case rsx::texture_dimension_extended::texture_dimension_3d:
attributes.depth = tex.depth();
subsurface_count = 1;
tex_size = static_cast<u32>(get_texture_size(tex));
required_surface_height = tex_size / attributes.pitch;
attributes.slice_h = required_surface_height / attributes.depth;
break;
default:
fmt::throw_exception("Unsupported texture dimension %d", static_cast<int>(extended_dimension));
}
// Validation
if (!attributes.width || !attributes.height || !attributes.depth)
{
rsx_log.warning("Image at address 0x%x has invalid dimensions. Type=%d, Dims=%dx%dx%d",
attributes.address, static_cast<s32>(extended_dimension),
attributes.width, attributes.height, attributes.depth);
return {};
}
if (options.is_compressed_format)
{
// Compressed textures cannot be 1D in some APIs
extended_dimension = std::max(extended_dimension, rsx::texture_dimension_extended::texture_dimension_2d);
}
const auto lookup_range = utils::address_range::start_length(attributes.address, attributes.pitch * required_surface_height);
reader_lock lock(m_cache_mutex);
auto result = fast_texture_search(cmd, attributes, scale, tex.decoded_remap(),
options, lookup_range, extended_dimension, m_rtts,
std::forward<Args>(extras)...);
if (result.validate())
{
if (!result.image_handle) [[unlikely]]
{
// Deferred reconstruct
result.external_subresource_desc.cache_range = lookup_range;
}
else if (result.texcoord_xform.clamp)
{
m_texture_copies_ellided_this_frame++;
}
if (!result.ref_address)
{
result.ref_address = attributes.address;
}
result.surface_cache_tag = m_rtts.write_tag;
if (subsurface_count == 1)
{
return result;
}
switch (result.upload_context)
{
case rsx::texture_upload_context::blit_engine_dst:
case rsx::texture_upload_context::framebuffer_storage:
break;
case rsx::texture_upload_context::shader_read:
if (!result.image_handle)
break;
[[fallthrough]];
default:
return result;
}
// Traverse the mipmap tree
// Some guarantees here include:
// 1. Only 2D images will invoke this routine
// 2. The image has to have been generated on the GPU (fbo or blit target only)
std::vector<copy_region_descriptor> sections;
const bool use_upscaling = (result.upload_context == rsx::texture_upload_context::framebuffer_storage && g_cfg.video.resolution_scale_percent != 100);
if (!helpers::append_mipmap_level(sections, result, attributes, 0, use_upscaling, attributes)) [[unlikely]]
{
// Abort if mip0 is not compatible
return result;
}
auto attr2 = attributes;
sections.reserve(subsurface_count);
options.skip_texture_merge = true;
options.skip_texture_barriers = true;
options.prefer_surface_cache = (result.upload_context == rsx::texture_upload_context::framebuffer_storage);
for (u8 subsurface = 1; subsurface < subsurface_count; ++subsurface)
{
attr2.address += (attr2.pitch * attr2.height);
attr2.width = std::max(attr2.width / 2, 1);
attr2.height = std::max(attr2.height / 2, 1);
attr2.slice_h = attr2.height;
if (attributes.swizzled)
{
attr2.pitch = attr2.width * attr2.bpp;
}
const auto range = utils::address_range::start_length(attr2.address, attr2.pitch * attr2.height);
auto ret = fast_texture_search(cmd, attr2, scale, tex.decoded_remap(),
options, range, extended_dimension, m_rtts, std::forward<Args>(extras)...);
if (!ret.validate() ||
!helpers::append_mipmap_level(sections, ret, attr2, subsurface, use_upscaling, attributes))
{
// Abort
break;
}
}
if (sections.size() == 1) [[unlikely]]
{
return result;
}
else
{
// NOTE: Do not disable 'cyclic ref' since the texture_barrier may have already been issued!
result.image_handle = 0;
result.external_subresource_desc = { 0, deferred_request_command::mipmap_gather, attributes, {}, tex.decoded_remap() };
result.format_class = rsx::classify_format(attributes.gcm_format);
if (result.texcoord_xform.clamp)
{
// Revert clamp configuration
result.pop_texcoord_xform();
}
if (use_upscaling)
{
// Grab the correct image dimensions from the base mipmap level
const auto& mip0 = sections.front();
result.external_subresource_desc.width = mip0.dst_w;
result.external_subresource_desc.height = mip0.dst_h;
}
const u32 cache_end = attr2.address + (attr2.pitch * attr2.height);
result.external_subresource_desc.cache_range = utils::address_range::start_end(attributes.address, cache_end);
result.external_subresource_desc.sections_to_copy = std::move(sections);
return result;
}
}
// Do direct upload from CPU as the last resort
m_texture_upload_misses_this_frame++;
const auto subresources_layout = get_subresources_layout(tex);
const auto format_class = classify_format(attributes.gcm_format);
if (!tex_size)
{
tex_size = static_cast<u32>(get_texture_size(tex));
}
lock.upgrade();
// Invalidate
const address_range tex_range = address_range::start_length(attributes.address, tex_size);
invalidate_range_impl_base(cmd, tex_range, invalidation_cause::read, {}, std::forward<Args>(extras)...);
// Upload from CPU. Note that sRGB conversion is handled in the FS
auto uploaded = upload_image_from_cpu(cmd, tex_range, attributes.width, attributes.height, attributes.depth, tex.get_exact_mipmap_count(), attributes.pitch, attributes.gcm_format,
texture_upload_context::shader_read, subresources_layout, extended_dimension, attributes.swizzled);
return{ uploaded->get_view(tex.decoded_remap()),
texture_upload_context::shader_read, format_class, scale, extended_dimension };
}
// FIXME: This function is way too large and needs an urgent refactor.
template <typename surface_store_type, typename blitter_type, typename ...Args>
blit_op_result upload_scaled_image(const rsx::blit_src_info& src_info, const rsx::blit_dst_info& dst_info, bool interpolate, commandbuffer_type& cmd, surface_store_type& m_rtts, blitter_type& blitter, Args&&... extras)
{
// Local working copy. We may modify the descriptors for optimization purposes
auto src = src_info;
auto dst = dst_info;
bool src_is_render_target = false;
bool dst_is_render_target = false;
const bool dst_is_argb8 = (dst.format == rsx::blit_engine::transfer_destination_format::a8r8g8b8);
const bool src_is_argb8 = (src.format == rsx::blit_engine::transfer_source_format::a8r8g8b8);
const u8 src_bpp = src_is_argb8 ? 4 : 2;
const u8 dst_bpp = dst_is_argb8 ? 4 : 2;
typeless_xfer typeless_info = {};
image_resource_type vram_texture = 0;
image_resource_type dest_texture = 0;
const u32 dst_address = vm::get_addr(dst.pixels);
u32 src_address = vm::get_addr(src.pixels);
const f32 scale_x = fabsf(dst.scale_x);
const f32 scale_y = fabsf(dst.scale_y);
const bool is_copy_op = (fcmp(scale_x, 1.f) && fcmp(scale_y, 1.f));
const bool is_format_convert = (dst_is_argb8 != src_is_argb8);
bool skip_if_collision_exists = false;
// Offset in x and y for src is 0 (it is already accounted for when getting pixels_src)
// Reproject final clip onto source...
u16 src_w = static_cast<u16>(dst.clip_width / scale_x);
u16 src_h = static_cast<u16>(dst.clip_height / scale_y);
u16 dst_w = dst.clip_width;
u16 dst_h = dst.clip_height;
if (true) // This block is a debug/sanity check and should be optionally disabled with a config option
{
// Do subpixel correction in the special case of reverse scanning
// When reverse scanning, pixel0 is at offset = (dimension - 1)
if (dst.scale_y < 0.f && src.offset_y)
{
if (src.offset_y = (src.height - src.offset_y);
src.offset_y == 1)
{
src.offset_y = 0;
}
}
if (dst.scale_x < 0.f && src.offset_x)
{
if (src.offset_x = (src.width - src.offset_x);
src.offset_x == 1)
{
src.offset_x = 0;
}
}
if ((src_h + src.offset_y) > src.height) [[unlikely]]
{
// TODO: Special case that needs wrapping around (custom blit)
rsx_log.error("Transfer cropped in Y, src_h=%d, offset_y=%d, block_h=%d", src_h, src.offset_y, src.height);
src_h = src.height - src.offset_y;
}
if ((src_w + src.offset_x) > src.width) [[unlikely]]
{
// TODO: Special case that needs wrapping around (custom blit)
rsx_log.error("Transfer cropped in X, src_w=%d, offset_x=%d, block_w=%d", src_w, src.offset_x, src.width);
src_w = src.width - src.offset_x;
}
}
if (dst.scale_y < 0.f)
{
typeless_info.flip_vertical = true;
src_address -= (src.pitch * (src_h - 1));
}
if (dst.scale_x < 0.f)
{
typeless_info.flip_horizontal = true;
src_address += (src.width - src_w) * src_bpp;
}
const auto get_tiled_region = [&](const utils::address_range& range)
{
auto rsxthr = rsx::get_current_renderer();
return rsxthr->get_tiled_memory_region(range);
};
auto rtt_lookup = [&m_rtts, &cmd, &scale_x, &scale_y](u32 address, u32 width, u32 height, u32 pitch, u8 bpp, rsx::flags32_t access, bool allow_clipped) -> typename surface_store_type::surface_overlap_info
{
const auto list = m_rtts.get_merged_texture_memory_region(cmd, address, width, height, pitch, bpp, access);
if (list.empty())
{
return {};
}
for (auto It = list.rbegin(); It != list.rend(); ++It)
{
if (!(It->surface->memory_usage_flags & rsx::surface_usage_flags::attachment))
{
// HACK
// TODO: Properly analyse the input here to determine if it can properly fit what we need
// This is a problem due to chunked transfer
// First 2 512x720 blocks go into a cpu-side buffer but suddenly when its time to render the final 256x720
// it falls onto some storage buffer in surface cache that has bad dimensions
// Proper solution is to always merge when a cpu resource is created (it should absorb the render targets in range)
// We then should not have any 'dst-is-rendertarget' surfaces in use
// Option 2: Make surfaces here part of surface cache and do not pad them for optimization
// Surface cache is good at merging for resolve operations. This keeps integrity even when drawing to the rendertgargets
// This option needs a lot more work
continue;
}
if (!It->is_clipped || allow_clipped)
{
return *It;
}
const auto _w = It->dst_area.width;
const auto _h = It->dst_area.height;
if (_w < width)
{
if ((_w * scale_x) <= 1.f)
continue;
}
if (_h < height)
{
if ((_h * scale_y) <= 1.f)
continue;
}
// Some surface exists, but its size is questionable
// Opt to re-upload (needs WCB/WDB to work properly)
break;
}
return {};
};
auto validate_memory_range = [](u32 base_address, u32 write_end, u32 heuristic_end)
{
if (heuristic_end <= write_end)
{
return true;
}
// Confirm if the pages actually exist in vm
if (get_location(base_address) == CELL_GCM_LOCATION_LOCAL)
{
const auto vram_end = rsx::get_current_renderer()->local_mem_size + rsx::constants::local_mem_base;
if (heuristic_end > vram_end)
{
// Outside available VRAM area
return false;
}
}
else
{
if (!vm::check_addr(write_end, vm::page_readable, (heuristic_end - write_end)))
{
// Enforce strict allocation size!
return false;
}
}
return true;
};
auto validate_fbo_integrity = [&](const utils::address_range& range, bool is_depth_texture)
{
const bool will_upload = is_depth_texture ? !!g_cfg.video.read_depth_buffer : !!g_cfg.video.read_color_buffers;
if (!will_upload)
{
// Give a pass. The data is lost anyway.
return true;
}
const bool should_be_locked = is_depth_texture ? !!g_cfg.video.write_depth_buffer : !!g_cfg.video.write_color_buffers;
if (!should_be_locked)
{
// Data is lost anyway.
return true;
}
// Optimal setup. We have ideal conditions presented so we can correctly decide what to do here.
const auto section = find_cached_texture(range, { .gcm_format = RSX_GCM_FORMAT_IGNORED }, false, false, false);
return section && section->is_locked();
};
// Check tiled mem
const auto dst_tile = get_tiled_region(utils::address_range::start_length(dst_address, dst.pitch * dst.clip_height));
const auto src_tile = get_tiled_region(utils::address_range::start_length(src_address, src.pitch * src.height));
const auto dst_is_tiled = !!dst_tile;
const auto src_is_tiled = !!src_tile;
// Check if src/dst are parts of render targets
typename surface_store_type::surface_overlap_info dst_subres;
bool use_null_region = false;
// TODO: Handle cases where src or dst can be a depth texture while the other is a color texture - requires a render pass to emulate
// NOTE: Grab the src first as requirements for reading are more strict than requirements for writing
auto src_subres = rtt_lookup(src_address, src_w, src_h, src.pitch, src_bpp, surface_access::transfer_read, false);
src_is_render_target = src_subres.surface != nullptr;
if (get_location(dst_address) == CELL_GCM_LOCATION_LOCAL)
{
// TODO: HACK
// After writing, it is required to lock the memory range from access!
dst_subres = rtt_lookup(dst_address, dst_w, dst_h, dst.pitch, dst_bpp, surface_access::transfer_write, false);
dst_is_render_target = dst_subres.surface != nullptr;
}
else
{
// Surface exists in local memory.
use_null_region = (is_copy_op && !is_format_convert);
// Invalidate surfaces in range. Sample tests should catch overlaps in theory.
m_rtts.invalidate_range(utils::address_range::start_length(dst_address, dst.pitch* dst_h));
}
// FBO re-validation. It is common for GPU and CPU data to desync as we do not have a way to share memory pages directly between the two (in most setups)
// To avoid losing data, we need to do some gymnastics
if (src_is_render_target && !validate_fbo_integrity(src_subres.surface->get_memory_range(), src_subres.is_depth))
{
src_is_render_target = false;
src_subres.surface = nullptr;
}
if (dst_is_render_target && !validate_fbo_integrity(dst_subres.surface->get_memory_range(), dst_subres.is_depth))
{
// This is a lot more serious that the src case. We have to signal surface cache to reload the memory and discard what we have GPU-side.
// Do the transfer CPU side and we should eventually "read" the data on RCB/RDB barrier.
dst_subres.surface->invalidate_GPU_memory();
return false;
}
if (src_is_render_target)
{
const auto surf = src_subres.surface;
const auto bpp = surf->get_bpp();
const bool typeless = (bpp != src_bpp || is_format_convert);
if (!typeless) [[likely]]
{
// Use format as-is
typeless_info.src_gcm_format = helpers::get_sized_blit_format(src_is_argb8, src_subres.is_depth, false);
}
else
{
// Enable type scaling in src
typeless_info.src_is_typeless = true;
typeless_info.src_scaling_hint = static_cast<f32>(bpp) / src_bpp;
typeless_info.src_gcm_format = helpers::get_sized_blit_format(src_is_argb8, false, is_format_convert);
}
if (surf->template get_surface_width<rsx::surface_metrics::pixels>() != surf->width() ||
surf->template get_surface_height<rsx::surface_metrics::pixels>() != surf->height())
{
// Must go through a scaling operation due to resolution scaling being present
ensure(g_cfg.video.resolution_scale_percent != 100);
use_null_region = false;
}
}
else
{
// Determine whether to perform this transfer on CPU or GPU (src data may not be graphical)
const bool is_trivial_copy = is_copy_op && !is_format_convert && !dst.swizzled && !dst_is_tiled && !src_is_tiled;
const bool is_block_transfer = (dst_w == src_w && dst_h == src_h && (src.pitch == dst.pitch || src_h == 1));
const bool is_mirror_op = (dst.scale_x < 0.f || dst.scale_y < 0.f);
if (dst_is_render_target)
{
if (is_trivial_copy && src_h == 1)
{
dst_is_render_target = false;
dst_subres = {};
}
}
// Always use GPU blit if src or dst is in the surface store
if (!dst_is_render_target)
{
if (is_trivial_copy)
{
// Check if trivial memcpy can perform the same task
// Used to copy programs and arbitrary data to the GPU in some cases
// NOTE: This case overrides the GPU texture scaling option
if (is_block_transfer && !is_mirror_op)
{
return false;
}
// If a matching section exists with a different use-case, fall back to CPU memcpy
skip_if_collision_exists = true;
}
if (!g_cfg.video.use_gpu_texture_scaling && !dst_is_tiled && !src_is_tiled)
{
if (dst.swizzled)
{
// Swizzle operation requested. Use fallback
return false;
}
if (is_trivial_copy && get_location(dst_address) != CELL_GCM_LOCATION_LOCAL)
{
// Trivial copy and the destination is in XDR memory
return false;
}
}
}
}
if (dst_is_render_target)
{
const auto bpp = dst_subres.surface->get_bpp();
const bool typeless = (bpp != dst_bpp || is_format_convert);
if (!typeless) [[likely]]
{
typeless_info.dst_gcm_format = helpers::get_sized_blit_format(dst_is_argb8, dst_subres.is_depth, false);
}
else
{
// Enable type scaling in dst
typeless_info.dst_is_typeless = true;
typeless_info.dst_scaling_hint = static_cast<f32>(bpp) / dst_bpp;
typeless_info.dst_gcm_format = helpers::get_sized_blit_format(dst_is_argb8, false, is_format_convert);
}
}
section_storage_type* cached_dest = nullptr;
section_storage_type* cached_src = nullptr;
bool dst_is_depth_surface = false;
u16 max_dst_width = dst.width;
u16 max_dst_height = dst.height;
areai src_area = { 0, 0, src_w, src_h };
areai dst_area = { 0, 0, dst_w, dst_h };
size2i dst_dimensions = { static_cast<s32>(dst.pitch / dst_bpp), dst.height };
position2i dst_offset = { dst.offset_x, dst.offset_y };
u32 dst_base_address = dst.rsx_address;
const auto src_payload_length = (src.pitch * (src_h - 1) + (src_w * src_bpp));
const auto dst_payload_length = (dst.pitch * (dst_h - 1) + (dst_w * dst_bpp));
const auto dst_range = address_range::start_length(dst_address, dst_payload_length);
if (!use_null_region && !dst_is_render_target)
{
size2u src_dimensions = { 0, 0 };
if (src_is_render_target)
{
src_dimensions.width = src_subres.surface->template get_surface_width<rsx::surface_metrics::samples>();
src_dimensions.height = src_subres.surface->template get_surface_height<rsx::surface_metrics::samples>();
}
const auto props = texture_cache_helpers::get_optimal_blit_target_properties(
src_is_render_target,
dst_range,
dst.pitch,
src_dimensions,
static_cast<size2u>(dst_dimensions)
);
if (props.use_dma_region)
{
// Try to use a dma flush
use_null_region = (is_copy_op && !is_format_convert);
}
else
{
if (props.offset)
{
// Calculate new offsets
dst_base_address = props.offset;
const auto new_offset = (dst_address - dst_base_address);
// Generate new offsets
dst_offset.y = new_offset / dst.pitch;
dst_offset.x = (new_offset % dst.pitch) / dst_bpp;
}
dst_dimensions.width = static_cast<s32>(props.width);
dst_dimensions.height = static_cast<s32>(props.height);
}
}
reader_lock lock(m_cache_mutex);
const auto old_dst_area = dst_area;
if (!dst_is_render_target)
{
// Check for any available region that will fit this one
u32 required_type_mask;
if (use_null_region)
{
required_type_mask = texture_upload_context::dma;
}
else
{
required_type_mask = texture_upload_context::blit_engine_dst;
if (skip_if_collision_exists) required_type_mask |= texture_upload_context::shader_read;
}
auto overlapping_surfaces = find_texture_from_range(dst_range, dst.pitch, required_type_mask);
for (const auto &surface : overlapping_surfaces)
{
if (!surface->is_locked())
{
// Discard
surface->set_dirty(true);
continue;
}
if (cached_dest)
{
// Nothing to do
continue;
}
if (!dst_range.inside(surface->get_section_range()))
{
// Hit test failed
continue;
}
if (use_null_region)
{
// Attach to existing region
cached_dest = surface;
// Technically it is totally possible to just extend a pre-existing section
// Will leave this as a TODO
continue;
}
if (skip_if_collision_exists) [[unlikely]]
{
if (surface->get_context() != texture_upload_context::blit_engine_dst)
{
// This section is likely to be 'flushed' to CPU for reupload soon anyway
return false;
}
}
// Prefer formats which will not trigger a typeless conversion later
// Only color formats are supported as destination as most access from blit engine will be color
switch (surface->get_gcm_format())
{
case CELL_GCM_TEXTURE_A8R8G8B8:
if (!dst_is_argb8) continue;
break;
case CELL_GCM_TEXTURE_R5G6B5:
if (dst_is_argb8) continue;
break;
default:
continue;
}
if (const auto this_address = surface->get_section_base();
const u32 address_offset = dst_address - this_address)
{
const u32 offset_y = address_offset / dst.pitch;
const u32 offset_x = address_offset % dst.pitch;
const u32 offset_x_in_block = offset_x / dst_bpp;
dst_area.x1 += offset_x_in_block;
dst_area.x2 += offset_x_in_block;
dst_area.y1 += offset_y;
dst_area.y2 += offset_y;
}
// Validate clipping region
if (static_cast<uint>(dst_area.x2) <= surface->get_width() &&
static_cast<uint>(dst_area.y2) <= surface->get_height())
{
cached_dest = surface;
dest_texture = cached_dest->get_raw_texture();
typeless_info.dst_context = cached_dest->get_context();
max_dst_width = cached_dest->get_width();
max_dst_height = cached_dest->get_height();
continue;
}
dst_area = old_dst_area;
}
if (cached_dest && cached_dest->get_context() != texture_upload_context::dma)
{
// NOTE: DMA sections are plain memory blocks with no format!
if (cached_dest) [[likely]]
{
typeless_info.dst_gcm_format = cached_dest->get_gcm_format();
dst_is_depth_surface = cached_dest->is_depth_texture();
}
}
}
else
{
// Destination dimensions are relaxed (true)
dst_area = dst_subres.src_area;
dest_texture = dst_subres.surface->get_surface(rsx::surface_access::transfer_write);
typeless_info.dst_context = texture_upload_context::framebuffer_storage;
dst_is_depth_surface = typeless_info.dst_is_typeless ? false : dst_subres.is_depth;
max_dst_width = static_cast<u16>(dst_subres.surface->template get_surface_width<rsx::surface_metrics::samples>() * typeless_info.dst_scaling_hint);
max_dst_height = dst_subres.surface->template get_surface_height<rsx::surface_metrics::samples>();
}
// Create source texture if does not exist
// TODO: This can be greatly improved with DMA optimizations. Most transfer operations here are actually non-graphical (no transforms applied)
if (!src_is_render_target)
{
// NOTE: Src address already takes into account the flipped nature of the overlap!
const u32 lookup_mask = rsx::texture_upload_context::blit_engine_src | rsx::texture_upload_context::blit_engine_dst | rsx::texture_upload_context::shader_read;
auto overlapping_surfaces = find_texture_from_range<false>(address_range::start_length(src_address, src_payload_length), src.pitch, lookup_mask);
auto old_src_area = src_area;
for (const auto &surface : overlapping_surfaces)
{
if (!surface->is_locked())
{
// TODO: Rejecting unlocked blit_engine dst causes stutter in SCV
// Surfaces marked as dirty have already been removed, leaving only flushed blit_dst data
continue;
}
// Force format matching; only accept 16-bit data for 16-bit transfers, 32-bit for 32-bit transfers
switch (surface->get_gcm_format())
{
case CELL_GCM_TEXTURE_X32_FLOAT:
case CELL_GCM_TEXTURE_Y16_X16:
case CELL_GCM_TEXTURE_Y16_X16_FLOAT:
{
// Should be copy compatible but not scaling compatible
if (src_is_argb8 && (is_copy_op || dst_is_render_target)) break;
continue;
}
case CELL_GCM_TEXTURE_DEPTH24_D8:
case CELL_GCM_TEXTURE_DEPTH24_D8_FLOAT:
{
// Should be copy compatible but not scaling compatible
if (src_is_argb8 && (is_copy_op || !dst_is_render_target)) break;
continue;
}
case CELL_GCM_TEXTURE_A8R8G8B8:
case CELL_GCM_TEXTURE_D8R8G8B8:
{
// Perfect match
if (src_is_argb8) break;
continue;
}
case CELL_GCM_TEXTURE_X16:
case CELL_GCM_TEXTURE_G8B8:
case CELL_GCM_TEXTURE_A1R5G5B5:
case CELL_GCM_TEXTURE_A4R4G4B4:
case CELL_GCM_TEXTURE_D1R5G5B5:
case CELL_GCM_TEXTURE_R5G5B5A1:
{
// Copy compatible
if (!src_is_argb8 && (is_copy_op || dst_is_render_target)) break;
continue;
}
case CELL_GCM_TEXTURE_DEPTH16:
case CELL_GCM_TEXTURE_DEPTH16_FLOAT:
{
// Copy compatible
if (!src_is_argb8 && (is_copy_op || !dst_is_render_target)) break;
continue;
}
case CELL_GCM_TEXTURE_R5G6B5:
{
// Perfect match
if (!src_is_argb8) break;
continue;
}
default:
{
continue;
}
}
const auto this_address = surface->get_section_base();
if (this_address > src_address)
{
continue;
}
if (const u32 address_offset = src_address - this_address)
{
const u32 offset_y = address_offset / src.pitch;
const u32 offset_x = address_offset % src.pitch;
const u32 offset_x_in_block = offset_x / src_bpp;
src_area.x1 += offset_x_in_block;
src_area.x2 += offset_x_in_block;
src_area.y1 += offset_y;
src_area.y2 += offset_y;
}
if (src_area.x2 <= surface->get_width() &&
src_area.y2 <= surface->get_height())
{
cached_src = surface;
break;
}
src_area = old_src_area;
}
if (!cached_src)
{
const u16 full_width = src.pitch / src_bpp;
u32 image_base = src.rsx_address;
u16 image_width = full_width;
u16 image_height = src.height;
// Check if memory is valid
const bool use_full_range = validate_memory_range(
image_base,
(src_address + src_payload_length),
image_base + (image_height * src.pitch));
if (use_full_range && dst.scale_x > 0.f && dst.scale_y > 0.f) [[likely]]
{
// Loading full image from the corner address
// Translate src_area into the declared block
src_area.x1 += src.offset_x;
src_area.x2 += src.offset_x;
src_area.y1 += src.offset_y;
src_area.y2 += src.offset_y;
}
else
{
image_base = src_address;
image_height = src_h;
}
std::vector<rsx::subresource_layout> subresource_layout;
rsx::subresource_layout subres = {};
subres.width_in_block = subres.width_in_texel = image_width;
subres.height_in_block = subres.height_in_texel = image_height;
subres.pitch_in_block = full_width;
subres.depth = 1;
subres.data = { vm::_ptr<const std::byte>(image_base), static_cast<std::span<const std::byte>::size_type>(src.pitch * image_height) };
subresource_layout.push_back(subres);
const u32 gcm_format = helpers::get_sized_blit_format(src_is_argb8, dst_is_depth_surface, is_format_convert);
const auto rsx_range = address_range::start_length(image_base, src.pitch * image_height);
lock.upgrade();
invalidate_range_impl_base(cmd, rsx_range, invalidation_cause::read, {}, std::forward<Args>(extras)...);
cached_src = upload_image_from_cpu(cmd, rsx_range, image_width, image_height, 1, 1, src.pitch, gcm_format, texture_upload_context::blit_engine_src,
subresource_layout, rsx::texture_dimension_extended::texture_dimension_2d, dst.swizzled);
typeless_info.src_gcm_format = gcm_format;
}
else
{
typeless_info.src_gcm_format = cached_src->get_gcm_format();
}
cached_src->add_ref();
vram_texture = cached_src->get_raw_texture();
typeless_info.src_context = cached_src->get_context();
}
else
{
src_area = src_subres.src_area;
vram_texture = src_subres.surface->get_surface(rsx::surface_access::transfer_read);
typeless_info.src_context = texture_upload_context::framebuffer_storage;
}
//const auto src_is_depth_format = helpers::is_gcm_depth_format(typeless_info.src_gcm_format);
const auto preferred_dst_format = helpers::get_sized_blit_format(dst_is_argb8, false, is_format_convert);
if (cached_dest && !use_null_region)
{
// Prep surface
auto channel_order = src_is_render_target ? rsx::component_order::native :
dst_is_argb8 ? rsx::component_order::default_ :
rsx::component_order::swapped_native;
set_component_order(*cached_dest, preferred_dst_format, channel_order);
}
// Validate clipping region
if ((dst.offset_x + dst.clip_x + dst.clip_width) > max_dst_width) dst.clip_x = 0;
if ((dst.offset_y + dst.clip_y + dst.clip_height) > max_dst_height) dst.clip_y = 0;
// Reproject clip offsets onto source to simplify blit
if (dst.clip_x || dst.clip_y)
{
const u16 scaled_clip_offset_x = static_cast<u16>(dst.clip_x / (scale_x * typeless_info.src_scaling_hint));
const u16 scaled_clip_offset_y = static_cast<u16>(dst.clip_y / scale_y);
src_area.x1 += scaled_clip_offset_x;
src_area.x2 += scaled_clip_offset_x;
src_area.y1 += scaled_clip_offset_y;
src_area.y2 += scaled_clip_offset_y;
}
if (!cached_dest && !dst_is_render_target)
{
ensure(!dest_texture);
// Need to calculate the minimum required size that will fit the data, anchored on the rsx_address
// If the application starts off with an 'inseted' section, the guessed dimensions may not fit!
const u32 write_end = dst_address + dst_payload_length;
u32 block_end = dst_base_address + (dst.pitch * dst_dimensions.height);
// Confirm if the pages actually exist in vm
if (!validate_memory_range(dst_base_address, write_end, block_end))
{
block_end = write_end;
}
const u32 usable_section_length = std::max(write_end, block_end) - dst_base_address;
dst_dimensions.height = align2(usable_section_length, dst.pitch) / dst.pitch;
const u32 full_section_length = ((dst_dimensions.height - 1) * dst.pitch) + (dst_dimensions.width * dst_bpp);
const auto rsx_range = address_range::start_length(dst_base_address, full_section_length);
lock.upgrade();
// NOTE: Write flag set to remove all other overlapping regions (e.g shader_read or blit_src)
// NOTE: This step can potentially invalidate the newly created src image as well.
invalidate_range_impl_base(cmd, rsx_range, invalidation_cause::write, {}, std::forward<Args>(extras)...);
if (use_null_region) [[likely]]
{
bool force_dma_load = false;
if ((dst_w * dst_bpp) != dst.pitch)
{
// Keep Cell from touching the range we need
const auto prot_range = dst_range.to_page_range();
utils::memory_protect(vm::base(prot_range.start), prot_range.length(), utils::protection::no);
force_dma_load = true;
}
const image_section_attributes_t attrs =
{
.pitch = dst.pitch,
.width = static_cast<u16>(dst_dimensions.width),
.height = static_cast<u16>(dst_dimensions.height),
.bpp = dst_bpp
};
cached_dest = create_nul_section(cmd, rsx_range, attrs, dst_tile, force_dma_load);
}
else
{
// render target data is already in correct swizzle layout
auto channel_order = src_is_render_target ? rsx::component_order::native :
dst_is_argb8 ? rsx::component_order::default_ :
rsx::component_order::swapped_native;
// Translate dst_area into the 'full' dst block based on dst.rsx_address as (0, 0)
dst_area.x1 += dst_offset.x;
dst_area.x2 += dst_offset.x;
dst_area.y1 += dst_offset.y;
dst_area.y2 += dst_offset.y;
if (!dst_area.x1 && !dst_area.y1 && dst_area.x2 == dst_dimensions.width && dst_area.y2 == dst_dimensions.height)
{
cached_dest = create_new_texture(cmd, rsx_range, dst_dimensions.width, dst_dimensions.height, 1, 1, dst.pitch,
preferred_dst_format, rsx::texture_upload_context::blit_engine_dst, rsx::texture_dimension_extended::texture_dimension_2d,
dst.swizzled, channel_order, 0);
}
else
{
// HACK: workaround for data race with Cell
// Pre-lock the memory range we'll be touching, then load with super_ptr
const auto prot_range = dst_range.to_page_range();
utils::memory_protect(vm::base(prot_range.start), prot_range.length(), utils::protection::no);
const auto pitch_in_block = dst.pitch / dst_bpp;
std::vector<rsx::subresource_layout> subresource_layout;
rsx::subresource_layout subres = {};
subres.width_in_block = subres.width_in_texel = dst_dimensions.width;
subres.height_in_block = subres.height_in_texel = dst_dimensions.height;
subres.pitch_in_block = pitch_in_block;
subres.depth = 1;
subres.data = { vm::get_super_ptr<const std::byte>(dst_base_address), static_cast<std::span<const std::byte>::size_type>(dst.pitch * dst_dimensions.height) };
subresource_layout.push_back(subres);
cached_dest = upload_image_from_cpu(cmd, rsx_range, dst_dimensions.width, dst_dimensions.height, 1, 1, dst.pitch,
preferred_dst_format, rsx::texture_upload_context::blit_engine_dst, subresource_layout,
rsx::texture_dimension_extended::texture_dimension_2d, dst.swizzled);
set_component_order(*cached_dest, preferred_dst_format, channel_order);
}
dest_texture = cached_dest->get_raw_texture();
typeless_info.dst_context = texture_upload_context::blit_engine_dst;
typeless_info.dst_gcm_format = preferred_dst_format;
}
}
ensure(cached_dest || dst_is_render_target);
// Invalidate any cached subresources in modified range
notify_surface_changed(dst_range);
// What type of data is being moved?
const auto raster_type = src_is_render_target ? src_subres.surface->raster_type : rsx::surface_raster_type::undefined;
if (cached_dest)
{
// Validate modified range
u32 mem_offset = dst_address - cached_dest->get_section_base();
ensure((mem_offset + dst_payload_length) <= cached_dest->get_section_size());
lock.upgrade();
cached_dest->reprotect(utils::protection::no, { mem_offset, dst_payload_length });
cached_dest->touch(m_cache_update_tag);
update_cache_tag();
// Set swizzle flag
cached_dest->set_swizzled(raster_type == rsx::surface_raster_type::swizzle || dst.swizzled);
}
else
{
// NOTE: This doesn't work very well in case of Cell access
// Need to lock the affected memory range and actually attach this subres to a locked_region
dst_subres.surface->on_write_copy(rsx::get_shared_tag(), false, raster_type);
// Reset this object's synchronization status if it is locked
lock.upgrade();
if (const auto found = find_cached_texture(dst_subres.surface->get_memory_range(), { .gcm_format = RSX_GCM_FORMAT_IGNORED }, false, false, false))
{
if (found->is_locked())
{
if (found->get_rsx_pitch() == dst.pitch)
{
// It is possible for other resource types to overlap this fbo if it only covers a small section of its max width.
// Blit engine read and write resources do not allow clipping and would have been recreated at the same address.
// TODO: In cases of clipped data, generate the blit resources in the surface cache instead.
if (found->get_context() == rsx::texture_upload_context::framebuffer_storage)
{
found->touch(m_cache_update_tag);
update_cache_tag();
}
}
else
{
// Unlikely situation, but the only one which would allow re-upload from CPU to overlap this section.
if (found->is_flushable())
{
// Technically this is possible in games that may change surface pitch at random (insomniac engine)
// FIXME: A proper fix includes pitch conversion and surface inheritance chains between surface targets and blit targets (unified cache) which is a very long-term thing.
const auto range = found->get_section_range();
rsx_log.error("[Pitch Mismatch] GPU-resident data at 0x%x->0x%x is discarded due to surface cache data clobbering it.", range.start, range.end);
}
found->discard(true);
}
}
}
}
if (src_is_render_target)
{
const auto surface_width = src_subres.surface->template get_surface_width<rsx::surface_metrics::pixels>();
const auto surface_height = src_subres.surface->template get_surface_height<rsx::surface_metrics::pixels>();
std::tie(src_area.x1, src_area.y1) = rsx::apply_resolution_scale<false>(src_area.x1, src_area.y1, surface_width, surface_height);
std::tie(src_area.x2, src_area.y2) = rsx::apply_resolution_scale<true>(src_area.x2, src_area.y2, surface_width, surface_height);
// The resource is of surface type; possibly disabled AA emulation
src_subres.surface->transform_blit_coordinates(rsx::surface_access::transfer_read, src_area);
}
if (dst_is_render_target)
{
const auto surface_width = dst_subres.surface->template get_surface_width<rsx::surface_metrics::pixels>();
const auto surface_height = dst_subres.surface->template get_surface_height<rsx::surface_metrics::pixels>();
std::tie(dst_area.x1, dst_area.y1) = rsx::apply_resolution_scale<false>(dst_area.x1, dst_area.y1, surface_width, surface_height);
std::tie(dst_area.x2, dst_area.y2) = rsx::apply_resolution_scale<true>(dst_area.x2, dst_area.y2, surface_width, surface_height);
// The resource is of surface type; possibly disabled AA emulation
dst_subres.surface->transform_blit_coordinates(rsx::surface_access::transfer_write, dst_area);
}
if (helpers::is_gcm_depth_format(typeless_info.src_gcm_format) !=
helpers::is_gcm_depth_format(typeless_info.dst_gcm_format))
{
// Make the depth side typeless because the other side is guaranteed to be color
if (helpers::is_gcm_depth_format(typeless_info.src_gcm_format))
{
// SRC is depth, transfer must be done typelessly
if (!typeless_info.src_is_typeless)
{
typeless_info.src_is_typeless = true;
typeless_info.src_gcm_format = helpers::get_sized_blit_format(src_is_argb8, false, false);
}
}
else
{
// DST is depth, transfer must be done typelessly
if (!typeless_info.dst_is_typeless)
{
typeless_info.dst_is_typeless = true;
typeless_info.dst_gcm_format = helpers::get_sized_blit_format(dst_is_argb8, false, false);
}
}
}
if (!use_null_region) [[likely]]
{
// Do preliminary analysis
typeless_info.analyse();
blitter.scale_image(cmd, vram_texture, dest_texture, src_area, dst_area, interpolate, typeless_info);
}
else if (cached_dest)
{
cached_dest->dma_transfer(cmd, vram_texture, src_area, dst_range, dst.pitch);
}
if (cached_src)
{
cached_src->release();
}
blit_op_result result = true;
if (cached_dest)
{
result.real_dst_address = cached_dest->get_section_base();
result.real_dst_size = cached_dest->get_section_size();
}
else
{
result.real_dst_address = dst_base_address;
result.real_dst_size = dst.pitch * dst_dimensions.height;
}
return result;
}
void do_update()
{
if (!m_flush_always_cache.empty())
{
if (m_cache_update_tag.load() != m_flush_always_update_timestamp)
{
std::lock_guard lock(m_cache_mutex);
bool update_tag = false;
for (const auto &It : m_flush_always_cache)
{
auto& section = *(It.second);
if (section.get_protection() != utils::protection::no)
{
ensure(section.exists());
AUDIT(section.get_context() == texture_upload_context::framebuffer_storage);
AUDIT(section.get_memory_read_flags() == memory_read_flags::flush_always);
section.reprotect(utils::protection::no);
update_tag = true;
}
}
if (update_tag) update_cache_tag();
m_flush_always_update_timestamp = m_cache_update_tag.load();
#ifdef TEXTURE_CACHE_DEBUG
// Check that the cache has the correct protections
m_storage.verify_protection();
#endif // TEXTURE_CACHE_DEBUG
}
}
}
predictor_type& get_predictor()
{
return m_predictor;
}
bool is_protected(u32 section_base_address, const address_range& test_range, rsx::texture_upload_context context)
{
reader_lock lock(m_cache_mutex);
const auto& block = m_storage.block_for(section_base_address);
for (const auto& tex : block)
{
if (tex.get_section_base() == section_base_address)
{
return tex.get_context() == context &&
tex.is_locked() &&
test_range.inside(tex.get_section_range());
}
}
return false;
}
/**
* The read only texture invalidate flag is set if a read only texture is trampled by framebuffer memory
* If set, all cached read only textures are considered invalid and should be re-fetched from the texture cache
*/
void clear_ro_tex_invalidate_intr()
{
read_only_tex_invalidate = false;
}
bool get_ro_tex_invalidate_intr() const
{
return read_only_tex_invalidate;
}
/**
* Per-frame statistics
*/
void reset_frame_statistics()
{
m_flushes_this_frame.store(0u);
m_misses_this_frame.store(0u);
m_speculations_this_frame.store(0u);
m_unavoidable_hard_faults_this_frame.store(0u);
m_texture_upload_calls_this_frame.store(0u);
m_texture_upload_misses_this_frame.store(0u);
m_texture_copies_ellided_this_frame.store(0u);
}
void on_flush()
{
m_flushes_this_frame++;
}
void on_speculative_flush()
{
m_speculations_this_frame++;
}
void on_misprediction()
{
m_predictor.on_misprediction();
}
void on_miss(const section_storage_type& section)
{
m_misses_this_frame++;
if (section.get_memory_read_flags() == memory_read_flags::flush_always)
{
m_unavoidable_hard_faults_this_frame++;
}
}
virtual u32 get_unreleased_textures_count() const
{
return m_storage.m_unreleased_texture_objects;
}
u64 get_texture_memory_in_use() const
{
return m_storage.m_texture_memory_in_use;
}
u32 get_num_flush_requests() const
{
return m_flushes_this_frame;
}
u32 get_num_cache_mispredictions() const
{
return m_predictor.m_mispredictions_this_frame;
}
u32 get_num_cache_speculative_writes() const
{
return m_speculations_this_frame;
}
u32 get_num_cache_misses() const
{
return m_misses_this_frame;
}
u32 get_num_unavoidable_hard_faults() const
{
return m_unavoidable_hard_faults_this_frame;
}
f32 get_cache_miss_ratio() const
{
const auto num_flushes = m_flushes_this_frame.load();
return (num_flushes == 0u) ? 0.f : static_cast<f32>(m_misses_this_frame.load()) / num_flushes;
}
u32 get_texture_upload_calls_this_frame() const
{
return m_texture_upload_calls_this_frame;
}
u32 get_texture_upload_misses_this_frame() const
{
return m_texture_upload_misses_this_frame;
}
u32 get_texture_upload_miss_percentage() const
{
return (m_texture_upload_calls_this_frame)? (m_texture_upload_misses_this_frame * 100 / m_texture_upload_calls_this_frame) : 0;
}
u32 get_texture_copies_ellided_this_frame() const
{
return m_texture_copies_ellided_this_frame;
}
};
}
| 122,083
|
C++
|
.h
| 3,082
| 34.249838
| 228
| 0.674725
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
6,004
|
texture_cache_helpers.h
|
RPCS3_rpcs3/rpcs3/Emu/RSX/Common/texture_cache_helpers.h
|
#pragma once
#include "../rsx_utils.h"
#include "TextureUtils.h"
namespace rsx
{
// Defines pixel operation to be performed on a surface before it is ready for use
enum surface_transform : u32
{
identity = 0, // Nothing
coordinate_transform = 1 // Incoming source coordinates may generated based on the format of the secondary (dest) surface. Recalculate them before use.
};
template<typename image_resource_type>
struct copy_region_descriptor_base
{
image_resource_type src;
flags32_t xform;
u32 base_addr;
u8 level;
u16 src_x;
u16 src_y;
u16 dst_x;
u16 dst_y;
u16 dst_z;
u16 src_w;
u16 src_h;
u16 dst_w;
u16 dst_h;
};
// Deferred texture processing commands
enum class deferred_request_command : u32
{
nop = 0, // Nothing
copy_image_static, // Copy image and cache the results
copy_image_dynamic, // Copy image but do not cache the results
cubemap_gather, // Provided list of sections generates a cubemap
cubemap_unwrap, // One large texture provided to be partitioned into a cubemap
atlas_gather, // Provided list of sections generates a texture atlas
_3d_gather, // Provided list of sections generates a 3D array
_3d_unwrap, // One large texture provided to be partitioned into a 3D array
mipmap_gather, // Provided list of sections to be reassembled as mipmap levels of the same texture
blit_image_static, // Variant of the copy command that does scaling instead of copying
};
struct image_section_attributes_t
{
u32 address;
u32 gcm_format;
u32 pitch;
u16 width;
u16 height;
u16 depth;
u16 mipmaps;
u16 slice_h;
u8 bpp;
bool swizzled;
bool edge_clamped;
};
struct blit_op_result
{
bool succeeded = false;
u32 real_dst_address = 0;
u32 real_dst_size = 0;
blit_op_result(bool success) : succeeded(success)
{}
inline address_range to_address_range() const
{
return address_range::start_length(real_dst_address, real_dst_size);
}
};
struct blit_target_properties
{
bool use_dma_region;
u32 offset;
u32 width;
u32 height;
};
struct texture_cache_search_options
{
u8 lookup_mask = 0xff;
bool is_compressed_format = false;
bool skip_texture_barriers = false;
bool skip_texture_merge = false;
bool prefer_surface_cache = false;
};
namespace texture_cache_helpers
{
static inline bool force_strict_fbo_sampling(u8 samples)
{
if (g_cfg.video.strict_rendering_mode)
{
// Strict mode. All access is strict.
return true;
}
if (g_cfg.video.antialiasing_level == msaa_level::none)
{
// MSAA disabled. All access is fast.
return false;
}
// Strict access if MSAA only.
return samples > 1 && !!g_cfg.video.force_hw_MSAA_resolve;
}
static inline bool is_gcm_depth_format(u32 format)
{
switch (format)
{
case CELL_GCM_TEXTURE_DEPTH16:
case CELL_GCM_TEXTURE_DEPTH16_FLOAT:
case CELL_GCM_TEXTURE_DEPTH24_D8:
case CELL_GCM_TEXTURE_DEPTH24_D8_FLOAT:
return true;
default:
return false;
}
}
static inline u32 get_compatible_depth_format(u32 gcm_format)
{
switch (gcm_format)
{
case CELL_GCM_TEXTURE_DEPTH24_D8:
case CELL_GCM_TEXTURE_DEPTH24_D8_FLOAT:
case CELL_GCM_TEXTURE_DEPTH16:
case CELL_GCM_TEXTURE_DEPTH16_FLOAT:
return gcm_format;
case CELL_GCM_TEXTURE_A8R8G8B8:
return CELL_GCM_TEXTURE_DEPTH24_D8;
case CELL_GCM_TEXTURE_X16:
//case CELL_GCM_TEXTURE_A4R4G4B4:
//case CELL_GCM_TEXTURE_G8B8:
//case CELL_GCM_TEXTURE_A1R5G5B5:
//case CELL_GCM_TEXTURE_R5G5B5A1:
//case CELL_GCM_TEXTURE_R5G6B5:
//case CELL_GCM_TEXTURE_R6G5B5:
return CELL_GCM_TEXTURE_DEPTH16;
}
rsx_log.error("Unsupported depth conversion (0x%X)", gcm_format);
return gcm_format;
}
static inline u32 get_sized_blit_format(bool is_32_bit, bool depth_format, bool /*is_format_convert*/)
{
if (is_32_bit)
{
return (!depth_format) ? CELL_GCM_TEXTURE_A8R8G8B8 : CELL_GCM_TEXTURE_DEPTH24_D8;
}
else
{
return (!depth_format) ? CELL_GCM_TEXTURE_R5G6B5 : CELL_GCM_TEXTURE_DEPTH16;
}
}
static inline bool is_compressed_gcm_format(u32 format)
{
switch (format)
{
default:
return false;
case CELL_GCM_TEXTURE_COMPRESSED_DXT1:
case CELL_GCM_TEXTURE_COMPRESSED_DXT23:
case CELL_GCM_TEXTURE_COMPRESSED_DXT45:
case CELL_GCM_TEXTURE_COMPRESSED_B8R8_G8R8:
case CELL_GCM_TEXTURE_COMPRESSED_R8B8_R8G8:
case CELL_GCM_TEXTURE_COMPRESSED_HILO8:
case CELL_GCM_TEXTURE_COMPRESSED_HILO_S8:
return true;
}
}
static inline blit_target_properties get_optimal_blit_target_properties(
bool src_is_render_target,
address_range dst_range,
u32 dst_pitch,
const sizeu src_dimensions,
const sizeu dst_dimensions)
{
if (get_location(dst_range.start) == CELL_GCM_LOCATION_LOCAL)
{
// Check if this is a blit to the output buffer
// TODO: This can be used to implement reference tracking to possibly avoid downscaling
const auto renderer = rsx::get_current_renderer();
for (u32 i = 0; i < renderer->display_buffers_count; ++i)
{
const auto& buffer = renderer->display_buffers[i];
if (!buffer.valid())
{
continue;
}
const u32 bpp = g_fxo->get<rsx::avconf>().get_bpp();
const u32 pitch = buffer.pitch ? +buffer.pitch : bpp * buffer.width;
if (pitch != dst_pitch)
{
continue;
}
const auto buffer_range = address_range::start_length(rsx::get_address(buffer.offset, CELL_GCM_LOCATION_LOCAL), pitch * (buffer.height - 1) + (buffer.width * bpp));
if (dst_range.inside(buffer_range))
{
// Match found
return { false, buffer_range.start, buffer.width, buffer.height };
}
if (dst_range.overlaps(buffer_range)) [[unlikely]]
{
// The range clips the destination but does not fit inside it
// Use DMA stream to optimize the flush that is likely to happen when flipping
return { true };
}
}
}
if (src_is_render_target)
{
// Attempt to optimize...
if (dst_dimensions.width == 1280 || dst_dimensions.width == 2560) [[likely]]
{
// Optimizations table based on common width/height pairings. If we guess wrong, the upload resolver will fix it anyway
// TODO: Add more entries based on empirical data
const auto optimal_height = std::max(dst_dimensions.height, 720u);
return { false, 0, dst_dimensions.width, optimal_height };
}
if (dst_dimensions.width == src_dimensions.width)
{
const auto optimal_height = std::max(dst_dimensions.height, src_dimensions.height);
return { false, 0, dst_dimensions.width, optimal_height };
}
}
return { false, 0, dst_dimensions.width, dst_dimensions.height };
}
template<typename commandbuffer_type, typename section_storage_type, typename copy_region_type, typename surface_store_list_type>
void gather_texture_slices(
commandbuffer_type& cmd,
std::vector<copy_region_type>& out,
const surface_store_list_type& fbos,
const std::vector<section_storage_type*>& local,
const image_section_attributes_t& attr,
u16 count, bool /*is_depth*/)
{
// Need to preserve sorting order
struct sort_helper
{
u64 tag; // Timestamp
u32 list; // List source, 0 = fbo, 1 = local
u32 index; // Index in list
};
std::vector<sort_helper> sort_list;
if (!fbos.empty() && !local.empty())
{
// Generate sorting tree if both resources are available and overlapping
sort_list.reserve(fbos.size() + local.size());
for (u32 index = 0; index < fbos.size(); ++index)
{
sort_list.push_back({ fbos[index].surface->last_use_tag, 0, index });
}
for (u32 index = 0; index < local.size(); ++index)
{
if (local[index]->get_context() != rsx::texture_upload_context::blit_engine_dst)
continue;
sort_list.push_back({ local[index]->last_write_tag, 1, index });
}
std::sort(sort_list.begin(), sort_list.end(), FN(x.tag < y.tag));
}
auto add_rtt_resource = [&](auto& section, u16 slice)
{
const u32 slice_begin = (slice * attr.slice_h);
const u32 slice_end = (slice_begin + attr.height);
const u32 section_end = section.dst_area.y + section.dst_area.height;
if (section.dst_area.y >= slice_end || section_end <= slice_begin)
{
// Belongs to a different slice
return;
}
// How much of this slice to read?
int rebased = int(section.dst_area.y) - slice_begin;
auto src_x = section.src_area.x;
auto dst_x = section.dst_area.x;
auto src_y = section.src_area.y;
auto dst_y = section.dst_area.y;
if (rebased < 0)
{
const u16 delta = u16(-rebased);
src_y += delta;
dst_y += delta;
ensure(dst_y == slice_begin);
}
ensure(dst_y >= slice_begin);
const auto h = std::min(section_end, slice_end) - dst_y;
dst_y = (dst_y - slice_begin);
const auto surface_width = section.surface->template get_surface_width<rsx::surface_metrics::pixels>();
const auto surface_height = section.surface->template get_surface_height<rsx::surface_metrics::pixels>();
const auto [src_width, src_height] = rsx::apply_resolution_scale<true>(section.src_area.width, h, surface_width, surface_height);
const auto [dst_width, dst_height] = rsx::apply_resolution_scale<true>(section.dst_area.width, h, attr.width, attr.height);
std::tie(src_x, src_y) = rsx::apply_resolution_scale<false>(src_x, src_y, surface_width, surface_height);
std::tie(dst_x, dst_y) = rsx::apply_resolution_scale<false>(dst_x, dst_y, attr.width, attr.height);
section.surface->memory_barrier(cmd, rsx::surface_access::transfer_read);
out.push_back
({
.src = section.surface->get_surface(rsx::surface_access::transfer_read),
.xform = surface_transform::identity,
.base_addr = section.base_address,
.level = 0,
.src_x = static_cast<u16>(src_x),
.src_y = static_cast<u16>(src_y),
.dst_x = static_cast<u16>(dst_x),
.dst_y = static_cast<u16>(dst_y),
.dst_z = slice,
.src_w = src_width,
.src_h = src_height,
.dst_w = dst_width,
.dst_h = dst_height
});
};
auto add_local_resource = [&](auto& section, u32 address, u16 slice, bool scaling = true)
{
// Intersect this resource with the original one.
// Note that intersection takes place in a normalized coordinate space (bpp = 1)
const u32 section_bpp = get_format_block_size_in_bytes(section->get_gcm_format());
const u32 normalized_section_width = (section->get_width() * section_bpp);
const u32 normalized_attr_width = (attr.width * attr.bpp);
auto [src_offset, dst_offset, dimensions] = rsx::intersect_region(
section->get_section_base(), normalized_section_width, section->get_height(), /* parent region (extractee) */
address, normalized_attr_width, attr.slice_h, /* child region (extracted) */
attr.pitch);
if (!dimensions.width || !dimensions.height)
{
// Out of bounds, invalid intersection
return;
}
// The intersection takes place in a normalized coordinate space. Now we convert back to domain-specific
src_offset.x /= section_bpp;
dst_offset.x /= attr.bpp;
const size2u dst_size = { dimensions.width / attr.bpp, dimensions.height };
const size2u src_size = { dimensions.width / section_bpp, dimensions.height };
const u32 dst_slice_begin = slice * attr.slice_h; // Output slice low watermark
const u32 dst_slice_end = dst_slice_begin + attr.height; // Output slice high watermark
const auto dst_y = dst_offset.y;
const auto dst_h = dst_size.height;
const auto write_section_end = dst_y + dst_h;
if (dst_y >= dst_slice_end || write_section_end <= dst_slice_begin)
{
// Belongs to a different slice
return;
}
const u16 dst_w = static_cast<u16>(dst_size.width);
const u16 src_w = static_cast<u16>(src_size.width);
const u16 height = std::min(dst_slice_end, write_section_end) - dst_y;
if (scaling)
{
// Since output is upscaled, also upscale on dst
const auto [_dst_x, _dst_y] = rsx::apply_resolution_scale<false>(static_cast<u16>(dst_offset.x), static_cast<u16>(dst_y - dst_slice_begin), attr.width, attr.height);
const auto [_dst_w, _dst_h] = rsx::apply_resolution_scale<true>(dst_w, height, attr.width, attr.height);
out.push_back
({
.src = section->get_raw_texture(),
.xform = surface_transform::identity,
.level = 0,
.src_x = static_cast<u16>(src_offset.x), // src.x
.src_y = static_cast<u16>(src_offset.y), // src.y
.dst_x = _dst_x, // dst.x
.dst_y = _dst_y, // dst.y
.dst_z = slice,
.src_w = src_w,
.src_h = height,
.dst_w = _dst_w,
.dst_h = _dst_h
});
}
else
{
out.push_back
({
.src = section->get_raw_texture(),
.xform = surface_transform::identity,
.level = 0,
.src_x = static_cast<u16>(src_offset.x), // src.x
.src_y = static_cast<u16>(src_offset.y), // src.y
.dst_x = static_cast<u16>(dst_offset.x), // dst.x
.dst_y = static_cast<u16>(dst_y - dst_slice_begin), // dst.y
.dst_z = 0,
.src_w = src_w,
.src_h = height,
.dst_w = dst_w,
.dst_h = height
});
}
};
u32 current_address = attr.address;
//u16 current_src_offset = 0;
//u16 current_dst_offset = 0;
u32 slice_size = (attr.pitch * attr.slice_h);
out.reserve(count);
u16 found_slices = 0;
for (u16 slice = 0; slice < count; ++slice)
{
auto num_surface = out.size();
if (local.empty()) [[likely]]
{
for (auto& section : fbos)
{
add_rtt_resource(section, slice);
}
}
else if (fbos.empty())
{
for (auto& section : local)
{
add_local_resource(section, current_address, slice, false);
}
}
else
{
for (const auto& e : sort_list)
{
if (e.list == 0)
{
add_rtt_resource(fbos[e.index], slice);
}
else
{
add_local_resource(local[e.index], current_address, slice);
}
}
}
current_address += slice_size;
if (out.size() != num_surface)
{
found_slices++;
}
}
if (found_slices < count)
{
if (found_slices > 0)
{
// TODO: Gather remaining sides from the texture cache or upload from cpu (too slow?)
rsx_log.warning("Could not gather all required slices for cubemap/3d generation");
}
else
{
rsx_log.warning("Could not gather textures into an atlas; using CPU fallback...");
}
}
}
template<typename render_target_type>
bool check_framebuffer_resource(
render_target_type texptr,
const image_section_attributes_t& attr,
texture_dimension_extended extended_dimension)
{
if (!rsx::pitch_compatible(texptr, attr.pitch, attr.height))
{
return false;
}
const auto surface_width = texptr->template get_surface_width<rsx::surface_metrics::samples>();
const auto surface_height = texptr->template get_surface_height<rsx::surface_metrics::samples>();
switch (extended_dimension)
{
case rsx::texture_dimension_extended::texture_dimension_1d:
return (surface_width >= attr.width);
case rsx::texture_dimension_extended::texture_dimension_2d:
return (surface_width >= attr.width && surface_height >= attr.height);
case rsx::texture_dimension_extended::texture_dimension_3d:
return (surface_width >= attr.width && surface_height >= u32{attr.slice_h} * attr.depth);
case rsx::texture_dimension_extended::texture_dimension_cubemap:
return (surface_width == attr.height && surface_width >= attr.width && surface_height >= (u32{attr.slice_h} * 6));
}
return false;
}
template <typename sampled_image_descriptor>
void calculate_sample_clip_parameters(
sampled_image_descriptor& desc,
const position2i& offset,
const size2i& desired_dimensions,
const size2i& actual_dimensions)
{
// Back up the transformation before we destructively modify it.
desc.push_texcoord_xform();
desc.texcoord_xform.scale[0] *= f32(desired_dimensions.width) / actual_dimensions.width;
desc.texcoord_xform.scale[1] *= f32(desired_dimensions.height) / actual_dimensions.height;
desc.texcoord_xform.bias[0] += f32(offset.x) / actual_dimensions.width;
desc.texcoord_xform.bias[1] += f32(offset.y) / actual_dimensions.height;
desc.texcoord_xform.clamp_min[0] = (offset.x + 0.49999f) / actual_dimensions.width;
desc.texcoord_xform.clamp_min[1] = (offset.y + 0.49999f) / actual_dimensions.height;
desc.texcoord_xform.clamp_max[0] = (offset.x + desired_dimensions.width - 0.50001f) / actual_dimensions.width;
desc.texcoord_xform.clamp_max[1] = (offset.y + desired_dimensions.height - 0.50001f) / actual_dimensions.height;
desc.texcoord_xform.clamp = true;
}
template <typename sampled_image_descriptor>
void convert_image_copy_to_clip_descriptor(
sampled_image_descriptor& desc,
const position2i& offset,
const size2i& desired_dimensions,
const size2i& actual_dimensions,
const texture_channel_remap_t& decoded_remap,
bool cyclic_reference)
{
desc.image_handle = desc.external_subresource_desc.as_viewable()->get_view(decoded_remap);
desc.ref_address = desc.external_subresource_desc.external_ref_addr;
desc.is_cyclic_reference = cyclic_reference;
desc.samples = desc.external_subresource_desc.external_handle->samples();
desc.external_subresource_desc = {};
calculate_sample_clip_parameters(desc, offset, desired_dimensions, actual_dimensions);
}
template <typename sampled_image_descriptor>
void convert_image_blit_to_clip_descriptor(
sampled_image_descriptor& desc,
const texture_channel_remap_t& decoded_remap,
bool cyclic_reference)
{
// Our "desired" output is the source window, and the "actual" output is the real size
const auto& section = desc.external_subresource_desc.sections_to_copy[0];
// Apply AA correct factor
auto surface_width = section.src->width();
auto surface_height = section.src->height();
switch (section.src->samples())
{
case 1:
break;
case 2:
surface_width *= 2;
break;
case 4:
surface_width *= 2;
surface_height *= 2;
break;
default:
fmt::throw_exception("Unsupported MSAA configuration");
}
// First, we convert this descriptor to a copy descriptor
desc.external_subresource_desc.external_handle = section.src;
desc.external_subresource_desc.external_ref_addr = section.base_addr;
// Now apply conversion
convert_image_copy_to_clip_descriptor(
desc,
position2i(section.src_x, section.src_y),
size2i(section.src_w, section.src_h),
size2i(surface_width, surface_height),
decoded_remap,
cyclic_reference);
}
template <typename sampled_image_descriptor, typename commandbuffer_type, typename render_target_type>
sampled_image_descriptor process_framebuffer_resource_fast(commandbuffer_type& cmd,
render_target_type texptr,
const image_section_attributes_t& attr,
const size3f& scale,
texture_dimension_extended extended_dimension,
const texture_channel_remap_t& decoded_remap,
bool surface_is_rop_target,
bool force_convert)
{
const auto surface_width = texptr->template get_surface_width<rsx::surface_metrics::samples>();
const auto surface_height = texptr->template get_surface_height<rsx::surface_metrics::samples>();
bool is_depth = texptr->is_depth_surface();
auto attr2 = attr;
if (rsx::get_resolution_scale_percent() != 100)
{
const auto [scaled_w, scaled_h] = rsx::apply_resolution_scale<true>(attr.width, attr.height, surface_width, surface_height);
const auto [unused, scaled_slice_h] = rsx::apply_resolution_scale<false>(RSX_SURFACE_DIMENSION_IGNORED, attr.slice_h, surface_width, surface_height);
attr2.width = scaled_w;
attr2.height = scaled_h;
attr2.slice_h = scaled_slice_h;
}
if (const bool gcm_format_is_depth = is_gcm_depth_format(attr2.gcm_format);
gcm_format_is_depth != is_depth)
{
if (force_convert || gcm_format_is_depth)
{
// If force_convert is set, we already know there is no simple workaround. Bitcast will be forced to resolve the issue.
// If the existing texture is a color texture but depth readout is requested, force bitcast
// Note that if only reading the depth value was needed from a depth surface, it would have been sampled as color due to Z comparison.
is_depth = gcm_format_is_depth;
force_convert = true;
}
else
{
// Existing texture is a depth texture, but RSX wants a color texture.
// Change the RSX request to a compatible depth texture to give same results in shader.
ensure(is_depth);
attr2.gcm_format = get_compatible_depth_format(attr2.gcm_format);
}
// Always make sure the conflict is resolved!
ensure(is_gcm_depth_format(attr2.gcm_format) == is_depth);
}
if (extended_dimension == rsx::texture_dimension_extended::texture_dimension_2d ||
extended_dimension == rsx::texture_dimension_extended::texture_dimension_1d) [[likely]]
{
if (extended_dimension == rsx::texture_dimension_extended::texture_dimension_1d)
{
ensure(attr.height == 1);
}
// A GPU operation must be performed on the data before sampling. Implies transfer_read access.
bool requires_processing = force_convert;
// A GPU clip operation may be performed by combining texture coordinate scaling with a clamp.
bool requires_clip = false;
rsx::surface_access access_type = rsx::surface_access::shader_read;
if (attr.width != surface_width || attr.height != surface_height)
{
// If we can get away with clip only, do it
if (attr.edge_clamped)
{
requires_clip = true;
}
else
{
requires_processing = true;
}
}
if (surface_is_rop_target && texture_cache_helpers::force_strict_fbo_sampling(texptr->samples()))
{
// Framebuffer feedback avoidance. For MSAA, we do not need to make copies; just use the resolve target
if (texptr->samples() == 1)
{
requires_processing = true;
}
else if (!requires_processing)
{
// Select resolve target instead of MSAA image
access_type = rsx::surface_access::transfer_read;
}
}
if (requires_processing)
{
const auto format_class = (force_convert) ? classify_format(attr2.gcm_format) : texptr->format_class();
const auto command = surface_is_rop_target ? deferred_request_command::copy_image_dynamic : deferred_request_command::copy_image_static;
texptr->memory_barrier(cmd, rsx::surface_access::transfer_read);
return { texptr->get_surface(rsx::surface_access::transfer_read), command, attr2, {},
texture_upload_context::framebuffer_storage, format_class, scale,
extended_dimension, decoded_remap };
}
texptr->memory_barrier(cmd, access_type);
auto viewed_surface = texptr->get_surface(access_type);
sampled_image_descriptor result = { viewed_surface->get_view(decoded_remap), texture_upload_context::framebuffer_storage,
texptr->format_class(), scale, rsx::texture_dimension_extended::texture_dimension_2d, surface_is_rop_target, viewed_surface->samples() };
if (requires_clip)
{
calculate_sample_clip_parameters(result, position2i(0, 0), size2i(attr.width, attr.height), size2i(surface_width, surface_height));
}
return result;
}
texptr->memory_barrier(cmd, rsx::surface_access::transfer_read);
if (extended_dimension == rsx::texture_dimension_extended::texture_dimension_3d)
{
return{ texptr->get_surface(rsx::surface_access::transfer_read), deferred_request_command::_3d_unwrap,
attr2, {},
texture_upload_context::framebuffer_storage, texptr->format_class(), scale,
rsx::texture_dimension_extended::texture_dimension_3d, decoded_remap };
}
ensure(extended_dimension == rsx::texture_dimension_extended::texture_dimension_cubemap);
return{ texptr->get_surface(rsx::surface_access::transfer_read), deferred_request_command::cubemap_unwrap,
attr2, {},
texture_upload_context::framebuffer_storage, texptr->format_class(), scale,
rsx::texture_dimension_extended::texture_dimension_cubemap, decoded_remap };
}
template <typename sampled_image_descriptor, typename commandbuffer_type, typename surface_store_list_type, typename section_storage_type>
sampled_image_descriptor merge_cache_resources(
commandbuffer_type& cmd,
const surface_store_list_type& fbos, const std::vector<section_storage_type*>& local,
const image_section_attributes_t& attr,
const size3f& scale,
texture_dimension_extended extended_dimension,
const texture_channel_remap_t& decoded_remap,
int select_hint = -1)
{
ensure((select_hint & 0x1) == select_hint);
bool is_depth = (select_hint == 0) ? fbos.back().is_depth : local.back()->is_depth_texture();
bool aspect_mismatch = false;
auto attr2 = attr;
// Check for mixed sources with aspect mismatch
// NOTE: If the last texture is a perfect match, this method would not have been called which means at least one transfer has to occur
if ((fbos.size() + local.size()) > 1) [[unlikely]]
{
for (const auto& tex : local)
{
if (tex->is_depth_texture() != is_depth)
{
aspect_mismatch = true;
break;
}
}
if (!aspect_mismatch) [[likely]]
{
for (const auto& surface : fbos)
{
if (surface.is_depth != is_depth)
{
aspect_mismatch = true;
break;
}
}
}
}
if (aspect_mismatch)
{
// Override with the requested format
is_depth = is_gcm_depth_format(attr.gcm_format);
}
else if (is_depth)
{
// Depth format textures were found. Check if the data can be bitcast without conversion.
if (const auto suggested_format = get_compatible_depth_format(attr.gcm_format);
!is_gcm_depth_format(suggested_format))
{
// Requested format cannot be directly read from a depth texture.
// Typeless conversion will be performed to make data accessible.
is_depth = false;
}
else
{
// Replace request format with one that is compatible with existing data.
attr2.gcm_format = suggested_format;
}
}
// If this method was called, there is no easy solution, likely means atlas gather is needed
const auto [scaled_w, scaled_h] = rsx::apply_resolution_scale(attr2.width, attr2.height);
const auto format_class = classify_format(attr2.gcm_format);
const auto upload_context = (fbos.empty()) ? texture_upload_context::shader_read : texture_upload_context::framebuffer_storage;
if (extended_dimension == rsx::texture_dimension_extended::texture_dimension_cubemap)
{
attr2.width = scaled_w;
attr2.height = scaled_h;
sampled_image_descriptor desc = { nullptr, deferred_request_command::cubemap_gather,
attr2, {},
upload_context, format_class, scale,
rsx::texture_dimension_extended::texture_dimension_cubemap, decoded_remap };
gather_texture_slices(cmd, desc.external_subresource_desc.sections_to_copy, fbos, local, attr, 6, is_depth);
return desc;
}
else if (extended_dimension == rsx::texture_dimension_extended::texture_dimension_3d && attr.depth > 1)
{
attr2.width = scaled_w;
attr2.height = scaled_h;
sampled_image_descriptor desc = { nullptr, deferred_request_command::_3d_gather,
attr2, {},
upload_context, format_class, scale,
rsx::texture_dimension_extended::texture_dimension_3d, decoded_remap };
gather_texture_slices(cmd, desc.external_subresource_desc.sections_to_copy, fbos, local, attr, attr.depth, is_depth);
return desc;
}
if (extended_dimension == rsx::texture_dimension_extended::texture_dimension_1d)
{
ensure(attr.height == 1);
}
if (!fbos.empty())
{
attr2.width = scaled_w;
attr2.height = scaled_h;
}
sampled_image_descriptor result = { nullptr, deferred_request_command::atlas_gather,
attr2, {}, upload_context, format_class,
scale, rsx::texture_dimension_extended::texture_dimension_2d, decoded_remap };
gather_texture_slices(cmd, result.external_subresource_desc.sections_to_copy, fbos, local, attr, 1, is_depth);
result.simplify();
return result;
}
template<typename sampled_image_descriptor, typename copy_region_descriptor_type>
bool append_mipmap_level(
std::vector<copy_region_descriptor_type>& sections, // Destination list
const sampled_image_descriptor& level, // Descriptor for the image level being checked
const image_section_attributes_t& attr, // Attributes of image level
u8 mipmap_level, // Level index
bool apply_upscaling, // Whether to upscale the results or not
const image_section_attributes_t& level0_attr) // Attributes of the first mipmap level
{
if (level.image_handle)
{
copy_region_descriptor_type mip
{
.src = level.image_handle->image(),
.xform = surface_transform::coordinate_transform,
.level = mipmap_level,
.dst_w = attr.width,
.dst_h = attr.height
};
// "Fast" framebuffer results are a perfect match for attr so we do not store transfer sizes
// Calculate transfer dimensions from attr
if (level.upload_context == rsx::texture_upload_context::framebuffer_storage) [[likely]]
{
std::tie(mip.src_w, mip.src_h) = rsx::apply_resolution_scale<true>(attr.width, attr.height);
}
else
{
mip.src_w = attr.width;
mip.src_h = attr.height;
}
sections.push_back(mip);
}
else
{
switch (level.external_subresource_desc.op)
{
case deferred_request_command::copy_image_dynamic:
case deferred_request_command::copy_image_static:
{
copy_region_descriptor_type mip
{
.src = level.external_subresource_desc.external_handle,
.xform = surface_transform::coordinate_transform,
.level = mipmap_level,
// NOTE: gather_texture_slices pre-applies resolution scaling
.src_x = level.external_subresource_desc.x,
.src_y = level.external_subresource_desc.y,
.src_w = level.external_subresource_desc.width,
.src_h = level.external_subresource_desc.height,
.dst_w = attr.width,
.dst_h = attr.height
};
sections.push_back(mip);
break;
}
default:
{
// TODO
return false;
}
}
}
// Check for upscaling if requested
if (apply_upscaling)
{
auto& mip = sections.back();
std::tie(mip.dst_w, mip.dst_h) = rsx::apply_resolution_scale<true>(mip.dst_w, mip.dst_h, level0_attr.width, level0_attr.height);
}
return true;
}
};
}
| 31,089
|
C++
|
.h
| 816
| 33.275735
| 170
| 0.678062
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
6,005
|
expected.hpp
|
RPCS3_rpcs3/rpcs3/Emu/RSX/Common/expected.hpp
|
#pragma once
#include <util/types.hpp>
#include <concepts>
#include <string>
#include <utility>
namespace fmt
{
template <typename CharT, usz N, typename... Args>
static std::string format(const CharT(&)[N], const Args&...);
}
namespace rsx
{
namespace exception_utils
{
enum soft_exception_error_code
{
none = 0,
range_exception = 1,
invalid_enum = 2
};
struct soft_exception_t
{
soft_exception_error_code error = soft_exception_error_code::none;
soft_exception_t() = default;
soft_exception_t(soft_exception_error_code code)
: error(code) {}
bool empty() const
{
return error == soft_exception_error_code::none;
}
std::string to_string() const
{
switch (error)
{
case soft_exception_error_code::none:
return "No error";
case soft_exception_error_code::range_exception:
return "Bad Range";
case soft_exception_error_code::invalid_enum:
return "Invalid enum";
default:
return "Unknown Error";
}
}
};
}
template <typename E>
concept ErrorType = requires (E & e)
{
{ e.empty() } -> std::same_as<bool>;
};
template <typename T, ErrorType E = exception_utils::soft_exception_t>
class expected
{
T value;
E error{};
public:
[[ nodiscard ]] expected(const T& value_)
: value(value_)
{}
#ifdef _MSC_VER
#pragma warning(push)
#pragma warning(disable : 26495) // disable warning for uninitialized value member (performance reasons)
#endif
[[ nodiscard ]] expected(const E& error_)
: error(error_)
{
ensure(!error.empty());
}
#ifdef _MSC_VER
#pragma warning(pop)
#endif
operator T() const
{
ensure(error.empty());
return value;
}
T operator *() const
{
ensure(error.empty());
return value;
}
template<typename = std::enable_if<!std::is_same_v<T, bool>>>
operator bool() const
{
return error.empty();
}
operator std::pair<T&, E&>() const
{
return { value, error };
}
bool operator == (const T& other) const
{
return error.empty() && value == other;
}
std::string to_string() const
{
if (error.empty())
{
return fmt::format("%s", value);
}
if constexpr (std::is_same_v<E, exception_utils::soft_exception_t>)
{
return error.to_string();
}
return fmt::format("%s", error);
}
};
}
| 2,559
|
C++
|
.h
| 109
| 17.954128
| 104
| 0.592349
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
6,006
|
texture_cache_types.h
|
RPCS3_rpcs3/rpcs3/Emu/RSX/Common/texture_cache_types.h
|
#pragma once
#include "Emu/system_config.h"
namespace rsx
{
/**
* Helper enums/structs
*/
enum invalidation_chain_policy
{
invalidation_chain_none, // No chaining: Only sections that overlap the faulting page get invalidated.
invalidation_chain_full, // Full chaining: Sections overlapping the faulting page get invalidated, as well as any sections overlapping invalidated sections.
invalidation_chain_nearby // Invalidations chain if they are near to the fault (<X pages away)
};
enum invalidation_chain_direction
{
chain_direction_both,
chain_direction_forward, // Only higher-base-address sections chain (unless they overlap the fault)
chain_direction_backward, // Only lower-base-address pages chain (unless they overlap the fault)
};
enum class component_order
{
default_ = 0,
native = 1,
swapped_native = 2,
};
enum memory_read_flags
{
flush_always = 0,
flush_once = 1
};
struct invalidation_cause
{
enum enum_type
{
invalid = 0,
read,
deferred_read,
write,
deferred_write,
unmap, // fault range is being unmapped
reprotect, // we are going to reprotect the fault range
superseded_by_fbo, // used by texture_cache::locked_memory_region
committed_as_fbo // same as superseded_by_fbo but without locking or preserving page flags
} cause;
constexpr bool valid() const
{
return cause != invalid;
}
constexpr bool is_read() const
{
AUDIT(valid());
return (cause == read || cause == deferred_read);
}
constexpr bool deferred_flush() const
{
AUDIT(valid());
return (cause == deferred_read || cause == deferred_write);
}
constexpr bool destroy_fault_range() const
{
AUDIT(valid());
return (cause == unmap);
}
constexpr bool keep_fault_range_protection() const
{
AUDIT(valid());
return (cause == unmap || cause == reprotect || cause == superseded_by_fbo);
}
constexpr bool skip_fbos() const
{
AUDIT(valid());
return (cause == superseded_by_fbo || cause == committed_as_fbo);
}
constexpr bool skip_flush() const
{
AUDIT(valid());
return (cause == unmap) || (!g_cfg.video.strict_texture_flushing && cause == superseded_by_fbo);
}
constexpr invalidation_cause undefer() const
{
AUDIT(deferred_flush());
if (cause == deferred_read)
return read;
if (cause == deferred_write)
return write;
fmt::throw_exception("Unreachable");
}
constexpr invalidation_cause defer() const
{
AUDIT(!deferred_flush());
if (cause == read)
return deferred_read;
if (cause == write)
return deferred_write;
fmt::throw_exception("Unreachable");
}
constexpr invalidation_cause() : cause(invalid) {}
constexpr invalidation_cause(enum_type _cause) : cause(_cause) {}
operator enum_type&() { return cause; }
constexpr operator enum_type() const { return cause; }
};
}
| 2,889
|
C++
|
.h
| 102
| 25.04902
| 166
| 0.69686
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
6,007
|
BufferUtils.h
|
RPCS3_rpcs3/rpcs3/Emu/RSX/Common/BufferUtils.h
|
#pragma once
#include "../gcm_enums.h"
#include <span>
/*
* If primitive mode is not supported and need to be emulated (using an index buffer) returns false.
*/
bool is_primitive_native(rsx::primitive_type m_draw_mode);
/*
* Returns true if adjacency information does not matter for this type. Allows optimizations e.g removal of primitive restart index
*/
bool is_primitive_disjointed(rsx::primitive_type draw_mode);
/**
* Returns a fixed index count for emulated primitive, otherwise returns initial_index_count
*/
u32 get_index_count(rsx::primitive_type m_draw_mode, u32 initial_index_count);
/**
* Returns index type size in byte
*/
u32 get_index_type_size(rsx::index_array_type type);
/**
* Write count indexes using (first, first + count) ranges.
* Returns min/max index found during the process and the number of valid indices written to the buffer.
* The function expands index buffer for non native primitive type if expands(draw_mode) return true.
*/
std::tuple<u32, u32, u32> write_index_array_data_to_buffer(std::span<std::byte> dst, std::span<const std::byte> src,
rsx::index_array_type, rsx::primitive_type draw_mode, bool restart_index_enabled, u32 restart_index,
const std::function<bool(rsx::primitive_type)>& expands);
/**
* Write index data needed to emulate non indexed non native primitive mode.
*/
void write_index_array_for_non_indexed_non_native_primitive_to_buffer(char* dst, rsx::primitive_type draw_mode, unsigned count);
// Copy and swap data in 32-bit units
extern void(*const copy_data_swap_u32)(u32* dst, const u32* src, u32 count);
// Copy and swap data in 32-bit units, return true if changed
extern bool(*const copy_data_swap_u32_cmp)(u32* dst, const u32* src, u32 count);
| 1,733
|
C++
|
.h
| 35
| 47.771429
| 131
| 0.759479
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
6,008
|
TextureUtils.h
|
RPCS3_rpcs3/rpcs3/Emu/RSX/Common/TextureUtils.h
|
#pragma once
#include "io_buffer.h"
#include "../color_utils.h"
#include "../RSXTexture.h"
#include <span>
#include <stack>
#include <vector>
namespace rsx
{
enum texture_upload_context : u32
{
shader_read = 1,
blit_engine_src = 2,
blit_engine_dst = 4,
framebuffer_storage = 8,
dma = 16
};
enum texture_colorspace : u32
{
rgb_linear = 0,
srgb_nonlinear = 1
};
enum surface_usage_flags : u32
{
unknown = 0,
attachment = 1,
storage = 2,
};
enum surface_metrics : u32
{
pixels = 0,
samples = 1,
bytes = 2
};
class surface_access // This is simply a modified enum class
{
public:
// Publicly visible enumerators
enum
{
shader_read = (1 << 0),
shader_write = (1 << 1),
transfer_read = (1 << 2),
transfer_write = (1 << 3),
// Arbitrary r/w flags, use with caution.
memory_write = (1 << 4),
memory_read = (1 << 5),
// Not r/w but signifies a GPU reference to this object.
gpu_reference = (1 << 6),
};
private:
// Meta
enum
{
all_writes = (shader_write | transfer_write | memory_write),
all_reads = (shader_read | transfer_read | memory_read),
all_transfer = (transfer_read | transfer_write)
};
u32 value_;
public:
// Ctor
surface_access(u32 value) : value_(value)
{}
// Quick helpers
inline bool is_read() const
{
return !(value_ & ~all_reads);
}
inline bool is_write() const
{
return !(value_ & ~all_writes);
}
inline bool is_transfer() const
{
return !(value_ & ~all_transfer);
}
inline bool is_transfer_or_read() const // Special; reads and transfers generate MSAA load operations
{
return !(value_ & ~(all_transfer | all_reads));
}
bool operator == (const surface_access& other) const
{
return value_ == other.value_;
}
bool operator == (u32 other) const
{
return value_ == other;
}
};
// Defines how the underlying PS3-visible memory backed by a texture is accessed
namespace format_class_
{
// TODO: Remove when enum import is supported by clang
enum format_class : u8
{
RSX_FORMAT_CLASS_UNDEFINED = 0,
RSX_FORMAT_CLASS_COLOR = 1,
RSX_FORMAT_CLASS_DEPTH16_UNORM = 2,
RSX_FORMAT_CLASS_DEPTH16_FLOAT = 4,
RSX_FORMAT_CLASS_DEPTH24_UNORM_X8_PACK32 = 8,
RSX_FORMAT_CLASS_DEPTH24_FLOAT_X8_PACK32 = 16,
RSX_FORMAT_CLASS_DEPTH_FLOAT_MASK = (RSX_FORMAT_CLASS_DEPTH16_FLOAT | RSX_FORMAT_CLASS_DEPTH24_FLOAT_X8_PACK32)
};
}
using namespace format_class_;
// Sampled image descriptor
class sampled_image_descriptor_base
{
#pragma pack(push, 1)
struct texcoord_xform_t
{
f32 scale[3];
f32 bias[3];
f32 clamp_min[2];
f32 clamp_max[2];
bool clamp = false;
};
#pragma pack(pop)
// Texure matrix stack
std::stack<texcoord_xform_t> m_texcoord_xform_stack;
public:
virtual ~sampled_image_descriptor_base() = default;
virtual u32 encoded_component_map() const = 0;
void push_texcoord_xform()
{
m_texcoord_xform_stack.push(texcoord_xform);
}
void pop_texcoord_xform()
{
ensure(!m_texcoord_xform_stack.empty());
std::memcpy(&texcoord_xform, &m_texcoord_xform_stack.top(), sizeof(texcoord_xform_t));
m_texcoord_xform_stack.pop();
}
texture_upload_context upload_context = texture_upload_context::shader_read;
rsx::texture_dimension_extended image_type = texture_dimension_extended::texture_dimension_2d;
rsx::format_class format_class = RSX_FORMAT_CLASS_UNDEFINED;
bool is_cyclic_reference = false;
u8 samples = 1;
u32 ref_address = 0;
u64 surface_cache_tag = 0;
texcoord_xform_t texcoord_xform;
};
struct typeless_xfer
{
bool src_is_typeless = false;
bool dst_is_typeless = false;
bool flip_vertical = false;
bool flip_horizontal = false;
u32 src_gcm_format = 0;
u32 dst_gcm_format = 0;
u32 src_native_format_override = 0;
u32 dst_native_format_override = 0;
f32 src_scaling_hint = 1.f;
f32 dst_scaling_hint = 1.f;
texture_upload_context src_context = texture_upload_context::blit_engine_src;
texture_upload_context dst_context = texture_upload_context::blit_engine_dst;
void analyse();
};
struct subresource_layout
{
rsx::io_buffer data;
u16 width_in_texel;
u16 height_in_texel;
u16 width_in_block;
u16 height_in_block;
u16 depth;
u16 level;
u16 layer;
u8 border;
u8 reserved;
u32 pitch_in_block;
};
struct memory_transfer_cmd
{
const void* dst;
const void* src;
u32 length;
};
struct texture_memory_info
{
int element_size;
int block_length;
bool require_swap;
bool require_deswizzle;
bool require_upload;
std::vector<memory_transfer_cmd> deferred_cmds;
};
struct texture_uploader_capabilities
{
bool supports_byteswap;
bool supports_vtc_decoding;
bool supports_hw_deswizzle;
bool supports_zero_copy;
usz alignment;
};
/**
* Get size to store texture in a linear fashion.
* Storage is assumed to use a rowPitchAlignment boundary for every row of texture.
*/
usz get_placed_texture_storage_size(u16 width, u16 height, u32 depth, u8 format, u16 mipmap, bool cubemap, usz row_pitch_alignment, usz mipmap_alignment);
usz get_placed_texture_storage_size(const rsx::fragment_texture &texture, usz row_pitch_alignment, usz mipmap_alignment = 0x200);
usz get_placed_texture_storage_size(const rsx::vertex_texture &texture, usz row_pitch_alignment, usz mipmap_alignment = 0x200);
/**
* get all rsx::subresource_layout for texture.
* The subresources are ordered per layer then per mipmap level (as in rsx memory).
*/
std::vector<subresource_layout> get_subresources_layout(const rsx::fragment_texture &texture);
std::vector<subresource_layout> get_subresources_layout(const rsx::vertex_texture &texture);
texture_memory_info upload_texture_subresource(rsx::io_buffer& dst_buffer, const subresource_layout &src_layout, int format, bool is_swizzled, texture_uploader_capabilities& caps);
u8 get_format_block_size_in_bytes(int format);
u8 get_format_block_size_in_texel(int format);
u8 get_format_block_size_in_bytes(rsx::surface_color_format format);
u8 get_format_block_size_in_bytes(rsx::surface_depth_format2 format);
bool is_compressed_host_format(u32 format); // Returns true for host-compressed formats (DXT)
u8 get_format_sample_count(rsx::surface_antialiasing antialias);
u32 get_max_depth_value(rsx::surface_depth_format2 format);
bool is_depth_stencil_format(rsx::surface_depth_format2 format);
bool is_int8_remapped_format(u32 format); // Returns true if the format is treated as INT8 by the RSX remapper.
/**
* Returns number of texel rows encoded in one pitch-length line of bytes
*/
u8 get_format_texel_rows_per_line(u32 format);
/**
* Get number of bytes occupied by texture in RSX mem
*/
usz get_texture_size(const rsx::fragment_texture &texture);
usz get_texture_size(const rsx::vertex_texture &texture);
/**
* Get packed pitch
*/
u32 get_format_packed_pitch(u32 format, u16 width, bool border = false, bool swizzled = false);
/**
* Reverse encoding
*/
u32 get_remap_encoding(const texture_channel_remap_t& remap);
/**
* Get gcm texel layout. Returns <format, byteswapped>
*/
std::pair<u32, bool> get_compatible_gcm_format(rsx::surface_color_format format);
std::pair<u32, bool> get_compatible_gcm_format(rsx::surface_depth_format2 format);
format_class classify_format(rsx::surface_depth_format2 format);
format_class classify_format(u32 gcm_format);
bool is_texcoord_wrapping_mode(rsx::texture_wrap_mode mode);
bool is_border_clamped_texture(rsx::texture_wrap_mode wrap_s, rsx::texture_wrap_mode wrap_t, rsx::texture_wrap_mode wrap_r, rsx::texture_dimension dimension);
template <typename TextureType>
bool is_border_clamped_texture(const TextureType& tex)
{
return is_border_clamped_texture(tex.wrap_s(), tex.wrap_t(), tex.wrap_r(), tex.dimension());
}
}
| 7,827
|
C++
|
.h
| 250
| 28.428
| 181
| 0.722635
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
6,009
|
simple_array.hpp
|
RPCS3_rpcs3/rpcs3/Emu/RSX/Common/simple_array.hpp
|
#pragma once
#include <util/types.hpp>
#include <functional>
#include <algorithm>
namespace rsx
{
template <typename Ty>
requires std::is_trivially_destructible_v<Ty>
struct simple_array
{
public:
using iterator = Ty*;
using const_iterator = const Ty*;
using value_type = Ty;
private:
static constexpr u32 _local_capacity = std::max<u32>(64u / sizeof(Ty), 1u);
char _local_storage[_local_capacity * sizeof(Ty)];
u32 _capacity = _local_capacity;
Ty* _data = _local_capacity ? reinterpret_cast<Ty*>(_local_storage) : nullptr;
u32 _size = 0;
inline u64 offset(const_iterator pos)
{
return (_data) ? u64(pos - _data) : 0ull;
}
bool is_local_storage() const
{
return _data == reinterpret_cast<const Ty*>(_local_storage);
}
public:
simple_array() = default;
simple_array(u32 initial_size)
{
reserve(initial_size);
_size = initial_size;
}
simple_array(u32 initial_size, const Ty val)
{
reserve(initial_size);
_size = initial_size;
for (u32 n = 0; n < initial_size; ++n)
{
_data[n] = val;
}
}
simple_array(const std::initializer_list<Ty>& args)
{
reserve(::size32(args));
for (const auto& arg : args)
{
push_back(arg);
}
}
simple_array(const simple_array& other)
{
resize(other._size);
if (_size)
{
std::memcpy(_data, other._data, size_bytes());
}
}
simple_array(simple_array&& other) noexcept
{
swap(other);
}
simple_array& operator=(const simple_array& other)
{
if (&other != this)
{
resize(other._size);
if (_size)
{
std::memcpy(_data, other._data, size_bytes());
}
}
return *this;
}
simple_array& operator=(simple_array&& other) noexcept
{
swap(other);
return *this;
}
~simple_array()
{
if (_data)
{
if (!is_local_storage())
{
free(_data);
}
_data = nullptr;
_size = _capacity = 0;
}
}
void swap(simple_array<Ty>& that) noexcept
{
if (!_size && !that._size)
{
// NOP. Surprisingly common
return;
}
const auto _this_is_local = is_local_storage();
const auto _that_is_local = that.is_local_storage();
if (!_this_is_local && !_that_is_local)
{
std::swap(_capacity, that._capacity);
std::swap(_size, that._size);
std::swap(_data, that._data);
return;
}
if (!_size)
{
*this = that;
that.clear();
return;
}
if (!that._size)
{
that = *this;
clear();
return;
}
if (_this_is_local != _that_is_local)
{
// Mismatched usage of the stack storage.
rsx::simple_array<Ty> tmp{ *this };
*this = that;
that = tmp;
return;
}
// Use memcpy to allow compiler optimizations
Ty _stack_alloc[_local_capacity];
std::memcpy(_stack_alloc, that._data, that.size_bytes());
std::memcpy(that._data, _data, size_bytes());
std::memcpy(_data, _stack_alloc, that.size_bytes());
std::swap(_size, that._size);
}
void reserve(u32 size)
{
if (_capacity >= size)
{
return;
}
if (is_local_storage())
{
// Switch to heap storage
_data = static_cast<Ty*>(std::malloc(sizeof(Ty) * size));
std::memcpy(_data, _local_storage, size_bytes());
}
else
{
// Extend heap storage
ensure(_data = static_cast<Ty*>(std::realloc(_data, sizeof(Ty) * size))); // "realloc() failed!"
}
_capacity = size;
}
template <typename T> requires UnsignedInt<T>
void resize(T size)
{
const auto new_size = static_cast<u32>(size);
reserve(new_size);
_size = new_size;
}
void push_back(const Ty& val)
{
if (_size >= _capacity)
{
reserve(_capacity + 16);
}
_data[_size++] = val;
}
void push_back(Ty&& val)
{
if (_size >= _capacity)
{
reserve(_capacity + 16);
}
_data[_size++] = val;
}
template <typename... Args>
void emplace_back(Args&&... args)
{
if (_size >= _capacity)
{
reserve(_capacity + 16);
}
std::construct_at(&_data[_size++], std::forward<Args&&>(args)...);
}
Ty pop_back()
{
return _data[--_size];
}
iterator insert(iterator pos, const Ty& val)
{
ensure(pos >= _data);
const auto _loc = offset(pos);
if (_size >= _capacity)
{
reserve(_capacity + 16);
pos = _data + _loc;
}
if (_loc >= _size)
{
_data[_size++] = val;
return pos;
}
ensure(_loc < _size);
const auto remaining = (_size - _loc);
memmove(pos + 1, pos, remaining * sizeof(Ty));
*pos = val;
_size++;
return pos;
}
iterator insert(iterator pos, Ty&& val)
{
ensure(pos >= _data);
const auto _loc = offset(pos);
if (_size >= _capacity)
{
reserve(_capacity + 16);
pos = _data + _loc;
}
if (_loc >= _size)
{
_data[_size++] = val;
return pos;
}
ensure(_loc < _size);
const u32 remaining = (_size - _loc);
memmove(pos + 1, pos, remaining * sizeof(Ty));
*pos = val;
_size++;
return pos;
}
void clear()
{
_size = 0;
}
bool empty() const
{
return _size == 0;
}
u32 size() const
{
return _size;
}
u64 size_bytes() const
{
return _size * sizeof(Ty);
}
u32 capacity() const
{
return _capacity;
}
Ty& operator[] (u32 index)
{
return _data[index];
}
const Ty& operator[] (u32 index) const
{
return _data[index];
}
Ty* data()
{
return _data;
}
const Ty* data() const
{
return _data;
}
Ty& back()
{
return _data[_size - 1];
}
const Ty& back() const
{
return _data[_size - 1];
}
Ty& front()
{
return _data[0];
}
const Ty& front() const
{
return _data[0];
}
iterator begin()
{
return _data;
}
iterator end()
{
return _data ? _data + _size : nullptr;
}
const_iterator begin() const
{
return _data;
}
const_iterator end() const
{
return _data ? _data + _size : nullptr;
}
bool any(std::predicate<const Ty&> auto predicate) const
{
for (auto it = begin(); it != end(); ++it)
{
if (std::invoke(predicate, *it))
{
return true;
}
}
return false;
}
void filter(std::predicate<const Ty&> auto predicate)
{
if (!_size)
{
return;
}
for (auto ptr = _data, last = _data + _size - 1; ptr < last; ptr++)
{
if (!predicate(*ptr))
{
// Move item to the end of the list and shrink by 1
std::memcpy(ptr, last, sizeof(Ty));
last = _data + (--_size);
}
}
}
void sort(std::predicate<const Ty&, const Ty&> auto predicate)
{
if (_size < 2)
{
return;
}
std::sort(begin(), end(), predicate);
}
template <typename F, typename U = std::invoke_result_t<F, const Ty&>>
requires std::is_invocable_v<F, const Ty&>
simple_array<U> map(F&& xform) const
{
simple_array<U> result;
result.reserve(size());
for (auto it = begin(); it != end(); ++it)
{
result.push_back(xform(*it));
}
return result;
}
};
}
| 6,970
|
C++
|
.h
| 350
| 15.982857
| 100
| 0.586275
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
6,010
|
buffer_stream.hpp
|
RPCS3_rpcs3/rpcs3/Emu/RSX/Common/buffer_stream.hpp
|
#pragma once
#include "util/types.hpp"
#include "util/asm.hpp"
#if defined(ARCH_X64)
#include "emmintrin.h"
#include "immintrin.h"
#endif
#ifdef ARCH_ARM64
#ifndef _MSC_VER
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wstrict-aliasing"
#pragma GCC diagnostic ignored "-Wold-style-cast"
#endif
#include "Emu/CPU/sse2neon.h"
#ifndef _MSC_VER
#pragma GCC diagnostic pop
#endif
#endif
namespace utils
{
/**
* Stream a 128 bits vector to dst.
*/
static inline
void stream_vector(void* dst, u32 x, u32 y, u32 z, u32 w)
{
const __m128i vector = _mm_set_epi32(w, z, y, x);
_mm_stream_si128(reinterpret_cast<__m128i*>(dst), vector);
}
static inline
void stream_vector(void* dst, f32 x, f32 y, f32 z, f32 w)
{
stream_vector(dst, std::bit_cast<u32>(x), std::bit_cast<u32>(y), std::bit_cast<u32>(z), std::bit_cast<u32>(w));
}
/**
* Stream a 128 bits vector from src to dst.
*/
static inline
void stream_vector_from_memory(void* dst, void* src)
{
const __m128i vector = _mm_loadu_si128(reinterpret_cast<__m128i*>(src));
_mm_stream_si128(reinterpret_cast<__m128i*>(dst), vector);
}
}
| 1,177
|
C++
|
.h
| 44
| 23.681818
| 114
| 0.67975
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
6,011
|
ranged_map.hpp
|
RPCS3_rpcs3/rpcs3/Emu/RSX/Common/ranged_map.hpp
|
#pragma once
#include <util/types.hpp>
#include "Utilities/address_range.h"
#include <unordered_map>
namespace rsx
{
template <typename T, int BlockSize>
class ranged_map
{
protected:
struct block_metadata_t
{
u32 id = umax; // ID of the matadata blob
u32 head_block = umax; // Earliest block that may have an object that intersects with the data at the block with ID 'id'
};
public:
using inner_type = typename std::unordered_map<u32, T>;
using outer_type = typename std::array<inner_type, 0x100000000ull / BlockSize>;
using metadata_array = typename std::array<block_metadata_t, 0x100000000ull / BlockSize>;
protected:
outer_type m_data;
metadata_array m_metadata;
static inline u32 block_for(u32 address)
{
return address / BlockSize;
}
static inline u32 block_address(u32 block_id)
{
return block_id * BlockSize;
}
void broadcast_insert(const utils::address_range& range)
{
const auto head_block = block_for(range.start);
for (auto meta = &m_metadata[head_block]; meta <= &m_metadata[block_for(range.end)]; ++meta)
{
meta->head_block = std::min(head_block, meta->head_block);
}
}
public:
class iterator
{
using super = typename rsx::ranged_map<T, BlockSize>;
using inner_iterator = typename inner_type::iterator;
friend super;
protected:
inner_type* m_current = nullptr;
inner_type* m_end = nullptr;
inner_type* m_data_ptr = nullptr;
block_metadata_t* m_metadata_ptr = nullptr;
inner_iterator m_it{};
void forward_scan()
{
while (m_current < m_end)
{
m_it = (++m_current)->begin();
if (m_it != m_current->end()) [[ likely ]]
{
return;
}
}
// end pointer
m_current = nullptr;
m_it = {};
}
void next()
{
if (!m_current)
{
return;
}
if (++m_it != m_current->end()) [[ likely ]]
{
return;
}
forward_scan();
}
void begin_range(u32 address, inner_iterator& where)
{
m_current = &m_data_ptr[address / BlockSize];
m_end = m_current;
m_it = where;
}
void begin_range(const utils::address_range& range)
{
const auto start_block_id = range.start / BlockSize;
const auto& metadata = m_metadata_ptr[start_block_id];
m_current = &m_data_ptr[std::min(start_block_id, metadata.head_block)];
m_end = &m_data_ptr[range.end / BlockSize];
--m_current;
forward_scan();
}
void erase()
{
m_it = m_current->erase(m_it);
if (m_it != m_current->end())
{
return;
}
forward_scan();
}
iterator(super* parent):
m_data_ptr(parent->m_data.data()),
m_metadata_ptr(parent->m_metadata.data())
{}
public:
bool operator == (const iterator& other) const
{
return m_current == other.m_current && m_it == other.m_it;
}
auto* operator -> ()
{
ensure(m_current);
return m_it.operator->();
}
auto& operator * ()
{
ensure(m_current);
return m_it.operator*();
}
auto* operator -> () const
{
ensure(m_current);
return m_it.operator->();
}
auto& operator * () const
{
ensure(m_current);
return m_it.operator*();
}
iterator& operator ++ ()
{
ensure(m_current);
next();
return *this;
}
T& operator ++ (int)
{
ensure(m_current);
auto old = *this;
next();
return old;
}
};
public:
ranged_map()
{
std::for_each(m_metadata.begin(), m_metadata.end(), [&](auto& meta) { meta.id = static_cast<u32>(&meta - m_metadata.data()); });
}
void emplace(const utils::address_range& range, T&& value)
{
broadcast_insert(range);
m_data[block_for(range.start)].insert_or_assign(range.start, std::forward<T>(value));
}
usz count(const u32 key) const
{
const auto& block = m_data[block_for(key)];
if (const auto found = block.find(key);
found != block.end())
{
return 1;
}
return 0;
}
iterator find(const u32 key)
{
auto& block = m_data[block_for(key)];
iterator ret = { this };
if (auto found = block.find(key);
found != block.end())
{
ret.begin_range(key, found);
}
return ret;
}
iterator erase(iterator& where)
{
where.erase();
return where;
}
void erase(u32 address)
{
m_data[block_for(address)].erase(address);
}
iterator begin_range(const utils::address_range& range)
{
iterator ret = { this };
ret.begin_range(range);
return ret;
}
iterator end()
{
iterator ret = { this };
return ret;
}
void clear()
{
for (auto& e : m_data)
{
e.clear();
}
}
};
}
| 4,879
|
C++
|
.h
| 203
| 18.729064
| 132
| 0.592512
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
6,012
|
texture_cache_utils.h
|
RPCS3_rpcs3/rpcs3/Emu/RSX/Common/texture_cache_utils.h
|
#pragma once
#include "texture_cache_types.h"
#include "texture_cache_predictor.h"
#include "TextureUtils.h"
#include "Emu/Memory/vm.h"
#include "util/vm.hpp"
#include <list>
#include <unordered_set>
namespace rsx
{
enum section_bounds
{
full_range,
locked_range,
confirmed_range
};
enum section_protection_strategy
{
lock,
hash
};
static inline void memory_protect(const address_range& range, utils::protection prot)
{
ensure(range.is_page_range());
//rsx_log.error("memory_protect(0x%x, 0x%x, %x)", static_cast<u32>(range.start), static_cast<u32>(range.length()), static_cast<u32>(prot));
utils::memory_protect(vm::base(range.start), range.length(), prot);
#ifdef TEXTURE_CACHE_DEBUG
tex_cache_checker.set_protection(range, prot);
#endif
}
/**
* List structure used in Ranged Storage Blocks
* List of Arrays
* (avoids reallocation without the significant disadvantages of slow iteration through a list)
*/
template <typename section_storage_type, usz array_size>
class ranged_storage_block_list
{
static_assert(array_size > 0, "array_elements must be positive non-zero");
public:
using value_type = section_storage_type;
using array_type = std::array<value_type, array_size>;
using list_type = std::list<array_type>;
using size_type = u32;
// Iterator
template <typename T, typename block_list, typename list_iterator>
class iterator_tmpl
{
public:
// Traits
using value_type = T;
using pointer = T * ;
using difference_type = int;
using reference = T & ;
using iterator_category = std::forward_iterator_tag;
// Constructors
iterator_tmpl() = default;
iterator_tmpl(block_list *_block) :
block(_block),
list_it(_block->m_data.begin()),
idx(0)
{
if (_block->empty())
idx = u32{umax};
}
private:
// Members
block_list *block;
list_iterator list_it = {};
size_type idx = u32{umax};
size_type array_idx = 0;
inline void next()
{
++idx;
if (idx >= block->size())
{
idx = u32{umax};
return;
}
++array_idx;
if (array_idx >= array_size)
{
array_idx = 0;
list_it++;
}
}
public:
inline reference operator*() const { return (*list_it)[array_idx]; }
inline pointer operator->() const { return &((*list_it)[array_idx]); }
inline reference operator++() { next(); return **this; }
inline reference operator++(int) { auto &res = **this; next(); return res; }
inline bool operator==(const iterator_tmpl &rhs) const { return idx == rhs.idx; }
};
using iterator = iterator_tmpl<value_type, ranged_storage_block_list, typename list_type::iterator>;
using const_iterator = iterator_tmpl<const value_type, const ranged_storage_block_list, typename list_type::const_iterator>;
// Members
size_type m_size = 0;
list_type m_data;
typename list_type::iterator m_data_it;
size_type m_array_idx;
size_type m_capacity;
// Helpers
inline void next_array()
{
if (m_data_it == m_data.end() || ++m_data_it == m_data.end())
{
m_data_it = m_data.emplace(m_data_it);
m_capacity += array_size;
}
m_array_idx = 0;
}
public:
// Constructor, Destructor
ranged_storage_block_list() :
m_data_it(m_data.end()),
m_array_idx(-1),
m_capacity(0)
{}
// Iterator
inline iterator begin() noexcept { return { this }; }
inline const_iterator begin() const noexcept { return { this }; }
constexpr iterator end() noexcept { return {}; }
constexpr const_iterator end() const noexcept { return {}; }
// Operators
inline value_type& front()
{
AUDIT(!empty());
return m_data.front()[0];
}
inline value_type& back()
{
AUDIT(m_data_it != m_data.end() && m_array_idx < array_size);
return (*m_data_it)[m_array_idx];
}
// Other operations on data
inline size_type size() const { return m_size; }
inline size_type capacity() const { return m_capacity; }
inline bool empty() const { return m_size == 0; }
inline void clear()
{
m_size = 0;
m_array_idx = 0;
m_data_it = m_data.begin();
}
inline void free()
{
m_size = 0;
m_array_idx = 0;
m_capacity = 0;
m_data.clear();
m_data_it = m_data.end();
}
inline void reserve(size_type new_size)
{
if (new_size <= m_capacity) return;
size_type new_num_arrays = ((new_size - 1) / array_size) + 1;
m_data.reserve(new_num_arrays);
m_capacity = new_num_arrays * array_size;
}
template <typename ...Args>
inline value_type& emplace_back(Args&&... args)
{
if (m_array_idx >= array_size)
{
next_array();
}
ensure(m_capacity > 0 && m_array_idx < array_size && m_data_it != m_data.end());
value_type *dest = &((*m_data_it)[m_array_idx++]);
new (dest) value_type(std::forward<Args>(args)...);
++m_size;
return *dest;
}
};
/**
* Ranged storage
*/
template <typename _ranged_storage_type>
class ranged_storage_block
{
public:
using ranged_storage_type = _ranged_storage_type;
using section_storage_type = typename ranged_storage_type::section_storage_type;
using texture_cache_type = typename ranged_storage_type::texture_cache_type;
//using block_container_type = std::list<section_storage_type>;
using block_container_type = ranged_storage_block_list<section_storage_type, 64>;
using iterator = typename block_container_type::iterator;
using const_iterator = typename block_container_type::const_iterator;
using size_type = typename block_container_type::size_type;
static constexpr u32 num_blocks = ranged_storage_type::num_blocks;
static constexpr u32 block_size = ranged_storage_type::block_size;
using unowned_container_type = std::unordered_set<section_storage_type*>;
using unowned_iterator = typename unowned_container_type::iterator;
using unowned_const_iterator = typename unowned_container_type::const_iterator;
private:
u32 index = 0;
address_range range = {};
block_container_type sections = {};
unowned_container_type unowned; // pointers to sections from other blocks that overlap this block
atomic_t<u32> exists_count = 0;
atomic_t<u32> locked_count = 0;
atomic_t<u32> unreleased_count = 0;
ranged_storage_type *m_storage = nullptr;
inline void add_owned_section_overlaps(section_storage_type §ion)
{
u32 end = section.get_section_range().end;
for (auto *block = next_block(); block != nullptr && end >= block->get_start(); block = block->next_block())
{
block->add_unowned_section(section);
}
}
inline void remove_owned_section_overlaps(section_storage_type §ion)
{
u32 end = section.get_section_range().end;
for (auto *block = next_block(); block != nullptr && end >= block->get_start(); block = block->next_block())
{
block->remove_unowned_section(section);
}
}
public:
// Construction
ranged_storage_block() = default;
void initialize(u32 _index, ranged_storage_type *storage)
{
ensure(m_storage == nullptr && storage != nullptr);
AUDIT(index < num_blocks);
m_storage = storage;
index = _index;
range = address_range::start_length(index * block_size, block_size);
AUDIT(range.is_page_range() && get_start() / block_size == index);
}
/**
* Wrappers
*/
constexpr iterator begin() noexcept { return sections.begin(); }
constexpr const_iterator begin() const noexcept { return sections.begin(); }
inline iterator end() noexcept { return sections.end(); }
inline const_iterator end() const noexcept { return sections.end(); }
inline bool empty() const { return sections.empty(); }
inline size_type size() const { return sections.size(); }
inline u32 get_exists_count() const { return exists_count; }
inline u32 get_locked_count() const { return locked_count; }
inline u32 get_unreleased_count() const { return unreleased_count; }
/**
* Utilities
*/
ranged_storage_type& get_storage() const
{
AUDIT(m_storage != nullptr);
return *m_storage;
}
texture_cache_type& get_texture_cache() const
{
return get_storage().get_texture_cache();
}
inline section_storage_type& create_section()
{
auto &res = sections.emplace_back(this);
return res;
}
inline void clear()
{
for (auto §ion : *this)
{
if (section.is_locked())
section.unprotect();
section.destroy();
}
AUDIT(exists_count == 0);
AUDIT(unreleased_count == 0);
AUDIT(locked_count == 0);
sections.clear();
}
inline bool is_first_block() const
{
return index == 0;
}
inline bool is_last_block() const
{
return index == num_blocks - 1;
}
inline ranged_storage_block* prev_block() const
{
if (is_first_block()) return nullptr;
return &get_storage()[index - 1];
}
inline ranged_storage_block* next_block() const
{
if (is_last_block()) return nullptr;
return &get_storage()[index + 1];
}
// Address range
inline const address_range& get_range() const { return range; }
inline u32 get_start() const { return range.start; }
inline u32 get_end() const { return range.end; }
inline u32 get_index() const { return index; }
inline bool overlaps(const section_storage_type& section, section_bounds bounds = full_range) const { return section.overlaps(range, bounds); }
inline bool overlaps(const address_range& _range) const { return range.overlaps(_range); }
/**
* Section callbacks
*/
inline void on_section_protected(const section_storage_type §ion)
{
(void)section; // silence unused warning without _AUDIT
AUDIT(section.is_locked());
locked_count++;
}
inline void on_section_unprotected(const section_storage_type §ion)
{
(void)section; // silence unused warning without _AUDIT
AUDIT(!section.is_locked());
u32 prev_locked = locked_count--;
ensure(prev_locked > 0);
}
inline void on_section_range_valid(section_storage_type §ion)
{
AUDIT(section.valid_range());
AUDIT(range.overlaps(section.get_section_base()));
add_owned_section_overlaps(section);
}
inline void on_section_range_invalid(section_storage_type §ion)
{
AUDIT(section.valid_range());
AUDIT(range.overlaps(section.get_section_base()));
remove_owned_section_overlaps(section);
}
inline void on_section_resources_created(const section_storage_type §ion)
{
(void)section; // silence unused warning without _AUDIT
AUDIT(section.exists());
u32 prev_exists = exists_count++;
if (prev_exists == 0)
{
m_storage->on_ranged_block_first_section_created(*this);
}
}
inline void on_section_resources_destroyed(const section_storage_type §ion)
{
(void)section; // silence unused warning without _AUDIT
AUDIT(!section.exists());
u32 prev_exists = exists_count--;
ensure(prev_exists > 0);
if (prev_exists == 1)
{
m_storage->on_ranged_block_last_section_destroyed(*this);
}
}
void on_section_released(const section_storage_type &/*section*/)
{
u32 prev_unreleased = unreleased_count--;
ensure(prev_unreleased > 0);
}
void on_section_unreleased(const section_storage_type &/*section*/)
{
unreleased_count++;
}
/**
* Overlapping sections
*/
inline bool contains_unowned(section_storage_type §ion) const
{
return (unowned.find(§ion) != unowned.end());
}
inline void add_unowned_section(section_storage_type §ion)
{
AUDIT(overlaps(section));
AUDIT(section.get_section_base() < range.start);
AUDIT(!contains_unowned(section));
unowned.insert(§ion);
}
inline void remove_unowned_section(section_storage_type §ion)
{
AUDIT(overlaps(section));
AUDIT(section.get_section_base() < range.start);
AUDIT(contains_unowned(section));
unowned.erase(§ion);
}
inline unowned_iterator unowned_begin() { return unowned.begin(); }
inline unowned_const_iterator unowned_begin() const { return unowned.begin(); }
inline unowned_iterator unowned_end() { return unowned.end(); }
inline unowned_const_iterator unowned_end() const { return unowned.end(); }
inline bool unowned_empty() const { return unowned.empty(); }
};
template <typename traits>
class ranged_storage
{
public:
static constexpr u32 block_size = 0x100000;
static_assert(block_size % 4096u == 0, "block_size must be a multiple of the page size");
static constexpr u32 num_blocks = u32{0x100000000ull / block_size};
static_assert((num_blocks > 0) && (u64{num_blocks} *block_size == 0x100000000ull), "Invalid block_size/num_blocks");
using section_storage_type = typename traits::section_storage_type;
using texture_cache_type = typename traits::texture_cache_base_type;
using block_type = ranged_storage_block<ranged_storage>;
private:
block_type blocks[num_blocks];
texture_cache_type *m_tex_cache;
std::unordered_set<block_type*> m_in_use;
public:
atomic_t<u32> m_unreleased_texture_objects = { 0 }; //Number of invalidated objects not yet freed from memory
atomic_t<u64> m_texture_memory_in_use = { 0 };
// Constructor
ranged_storage(texture_cache_type *tex_cache) :
m_tex_cache(tex_cache)
{
// Initialize blocks
for (u32 i = 0; i < num_blocks; i++)
{
blocks[i].initialize(i, this);
}
}
/**
* Iterators
*/
constexpr auto begin() { return std::begin(blocks); }
constexpr auto begin() const { return std::begin(blocks); }
constexpr auto end() { return std::end(blocks); }
constexpr auto end() const { return std::end(blocks); }
/**
* Utilities
*/
inline block_type& block_for(u32 address)
{
return blocks[address / block_size];
}
inline const block_type& block_for(u32 address) const
{
return blocks[address / block_size];
}
inline block_type& block_for(const address_range &range)
{
AUDIT(range.valid());
return block_for(range.start);
}
inline block_type& block_for(const section_storage_type §ion)
{
return block_for(section.get_section_base());
}
inline block_type& operator[](usz pos)
{
AUDIT(pos < num_blocks);
return blocks[pos];
}
inline texture_cache_type& get_texture_cache() const
{
AUDIT(m_tex_cache != nullptr);
return *m_tex_cache;
}
/**
* Blocks
*/
void clear()
{
for (auto &block : *this)
{
block.clear();
}
m_in_use.clear();
AUDIT(m_unreleased_texture_objects == 0);
AUDIT(m_texture_memory_in_use == 0);
}
void purge_unreleased_sections()
{
std::vector<section_storage_type*> textures_to_remove;
// Reclaims all graphics memory consumed by dirty textures
// Do not destroy anything while iterating or you will end up with stale iterators
for (auto& block : m_in_use)
{
if (block->get_unreleased_count() > 0)
{
for (auto& tex : *block)
{
if (!tex.is_unreleased())
continue;
ensure(!tex.is_locked());
textures_to_remove.push_back(&tex);
}
}
}
for (auto& tex : textures_to_remove)
{
tex->destroy();
}
AUDIT(m_unreleased_texture_objects == 0);
}
bool purge_unlocked_sections()
{
// Reclaims all graphics memory consumed by unlocked textures
// Do not destroy anything while iterating or you will end up with stale iterators
std::vector<section_storage_type*> textures_to_remove;
for (auto& block : m_in_use)
{
if (block->get_exists_count() > block->get_locked_count())
{
for (auto& tex : *block)
{
if (tex.get_context() == rsx::texture_upload_context::framebuffer_storage ||
tex.is_locked() ||
!tex.exists())
{
continue;
}
ensure(!tex.is_locked() && tex.exists());
textures_to_remove.push_back(&tex);
}
}
}
for (auto& tex : textures_to_remove)
{
tex->destroy();
}
return !textures_to_remove.empty();
}
void trim_sections()
{
for (auto it = m_in_use.begin(); it != m_in_use.end(); it++)
{
auto* block = *it;
if (block->get_locked_count() > 256)
{
for (auto& tex : *block)
{
if (tex.is_locked() && !tex.is_locked(true))
{
tex.sync_protection();
}
}
}
}
}
/**
* Callbacks
*/
void on_section_released(const section_storage_type &/*section*/)
{
u32 prev_unreleased = m_unreleased_texture_objects--;
ensure(prev_unreleased > 0);
}
void on_section_unreleased(const section_storage_type &/*section*/)
{
m_unreleased_texture_objects++;
}
void on_section_resources_created(const section_storage_type §ion)
{
m_texture_memory_in_use += section.get_section_size();
}
void on_section_resources_destroyed(const section_storage_type §ion)
{
u64 size = section.get_section_size();
u64 prev_size = m_texture_memory_in_use.fetch_sub(size);
ensure(prev_size >= size);
}
void on_ranged_block_first_section_created(block_type& block)
{
AUDIT(m_in_use.find(&block) == m_in_use.end());
m_in_use.insert(&block);
}
void on_ranged_block_last_section_destroyed(block_type& block)
{
AUDIT(m_in_use.find(&block) != m_in_use.end());
m_in_use.erase(&block);
}
/**
* Ranged Iterator
*/
// Iterator
template <typename T, typename unowned_iterator, typename section_iterator, typename block_type, typename parent_type>
class range_iterator_tmpl
{
public:
// Traits
using value_type = T;
using pointer = T * ;
using difference_type = int;
using reference = T & ;
using iterator_category = std::forward_iterator_tag;
// Constructors
range_iterator_tmpl() = default; // end iterator
explicit range_iterator_tmpl(parent_type &storage, const address_range &_range, section_bounds _bounds, bool _locked_only)
: range(_range)
, bounds(_bounds)
, block(&storage.block_for(range.start))
, unowned_remaining(true)
, unowned_it(block->unowned_begin())
, cur_block_it(block->begin())
, locked_only(_locked_only)
{
// do a "fake" iteration to ensure the internal state is consistent
next(false);
}
private:
// Members
address_range range;
section_bounds bounds;
block_type *block = nullptr;
bool needs_overlap_check = true;
bool unowned_remaining = false;
unowned_iterator unowned_it = {};
section_iterator cur_block_it = {};
pointer obj = nullptr;
bool locked_only = false;
inline void next(bool iterate = true)
{
AUDIT(block != nullptr);
if (unowned_remaining)
{
do
{
// Still have "unowned" sections from blocks before the range to loop through
auto blk_end = block->unowned_end();
if (iterate && unowned_it != blk_end)
{
++unowned_it;
}
if (unowned_it != blk_end)
{
obj = *unowned_it;
if (obj->valid_range() && (!locked_only || obj->is_locked()) && obj->overlaps(range, bounds))
return;
iterate = true;
continue;
}
// No more unowned sections remaining
unowned_remaining = false;
iterate = false;
break;
} while (true);
}
// Go to next block
do
{
// Iterate current block
do
{
auto blk_end = block->end();
if (iterate && cur_block_it != blk_end)
{
++cur_block_it;
}
if (cur_block_it != blk_end)
{
obj = &(*cur_block_it);
if (obj->valid_range() && (!locked_only || obj->is_locked()) && (!needs_overlap_check || obj->overlaps(range, bounds)))
return;
iterate = true;
continue;
}
break;
} while (true);
// Move to next block(s)
do
{
block = block->next_block();
if (block == nullptr || block->get_start() > range.end) // Reached end
{
block = nullptr;
obj = nullptr;
return;
}
needs_overlap_check = (block->get_end() > range.end);
cur_block_it = block->begin();
iterate = false;
} while (locked_only && block->get_locked_count() == 0); // find a block with locked sections
} while (true);
}
public:
inline reference operator*() const { return *obj; }
inline pointer operator->() const { return obj; }
inline reference operator++() { next(); return *obj; }
inline reference operator++(int) { auto *ptr = obj; next(); return *ptr; }
inline bool operator==(const range_iterator_tmpl &rhs) const { return obj == rhs.obj && unowned_remaining == rhs.unowned_remaining; }
inline void set_end(u32 new_end)
{
range.end = new_end;
// If we've exceeded the new end, invalidate iterator
if (block->get_start() > range.end)
{
block = nullptr;
}
}
inline block_type& get_block() const
{
AUDIT(block != nullptr);
return *block;
}
inline section_bounds get_bounds() const
{
return bounds;
}
};
using range_iterator = range_iterator_tmpl<section_storage_type, typename block_type::unowned_iterator, typename block_type::iterator, block_type, ranged_storage>;
using range_const_iterator = range_iterator_tmpl<const section_storage_type, typename block_type::unowned_const_iterator, typename block_type::const_iterator, const block_type, const ranged_storage>;
inline range_iterator range_begin(const address_range &range, section_bounds bounds, bool locked_only = false) {
return range_iterator(*this, range, bounds, locked_only);
}
inline range_const_iterator range_begin(const address_range &range, section_bounds bounds, bool locked_only = false) const {
return range_const_iterator(*this, range, bounds, locked_only);
}
inline range_const_iterator range_begin(u32 address, section_bounds bounds, bool locked_only = false) const {
return range_const_iterator(*this, address_range::start_length(address, 1), bounds, locked_only);
}
constexpr range_iterator range_end()
{
return range_iterator();
}
constexpr range_const_iterator range_end() const
{
return range_const_iterator();
}
/**
* Debug
*/
#ifdef TEXTURE_CACHE_DEBUG
void verify_protection(bool recount = false)
{
if (recount)
{
// Reset calculated part of the page_info struct
tex_cache_checker.reset_refcount();
// Go through all blocks and update calculated values
for (auto &block : *this)
{
for (auto &tex : block)
{
if (tex.is_locked())
{
tex_cache_checker.add(tex.get_locked_range(), tex.get_protection());
}
}
}
}
// Verify
tex_cache_checker.verify();
}
#endif //TEXTURE_CACHE_DEBUG
};
class buffered_section
{
private:
address_range locked_range;
address_range cpu_range = {};
address_range confirmed_range;
utils::protection protection = utils::protection::rw;
section_protection_strategy protection_strat = section_protection_strategy::lock;
u64 mem_hash = 0;
bool locked = false;
void init_lockable_range(const address_range& range);
u64 fast_hash_internal() const;
public:
buffered_section() = default;
~buffered_section() = default;
void reset(const address_range& memory_range);
protected:
void invalidate_range();
public:
void protect(utils::protection new_prot, bool force = false);
void protect(utils::protection prot, const std::pair<u32, u32>& new_confirm);
void unprotect();
bool sync() const;
void discard();
const address_range& get_bounds(section_bounds bounds) const;
bool is_locked(bool actual_page_flags = false) const;
/**
* Overlapping checks
*/
inline bool overlaps(const u32 address, section_bounds bounds) const
{
return get_bounds(bounds).overlaps(address);
}
inline bool overlaps(const address_range& other, section_bounds bounds) const
{
return get_bounds(bounds).overlaps(other);
}
inline bool overlaps(const address_range_vector& other, section_bounds bounds) const
{
return get_bounds(bounds).overlaps(other);
}
inline bool overlaps(const buffered_section& other, section_bounds bounds) const
{
return get_bounds(bounds).overlaps(other.get_bounds(bounds));
}
inline bool inside(const address_range& other, section_bounds bounds) const
{
return get_bounds(bounds).inside(other);
}
inline bool inside(const address_range_vector& other, section_bounds bounds) const
{
return get_bounds(bounds).inside(other);
}
inline bool inside(const buffered_section& other, section_bounds bounds) const
{
return get_bounds(bounds).inside(other.get_bounds(bounds));
}
inline s32 signed_distance(const address_range& other, section_bounds bounds) const
{
return get_bounds(bounds).signed_distance(other);
}
inline u32 distance(const address_range& other, section_bounds bounds) const
{
return get_bounds(bounds).distance(other);
}
/**
* Utilities
*/
inline bool valid_range() const
{
return cpu_range.valid();
}
inline u32 get_section_base() const
{
return cpu_range.start;
}
inline u32 get_section_size() const
{
return cpu_range.valid() ? cpu_range.length() : 0;
}
inline const address_range& get_locked_range() const
{
AUDIT(locked);
return locked_range;
}
inline const address_range& get_section_range() const
{
return cpu_range;
}
const address_range& get_confirmed_range() const
{
return confirmed_range.valid() ? confirmed_range : cpu_range;
}
const std::pair<u32, u32> get_confirmed_range_delta() const
{
if (!confirmed_range.valid())
return { 0, cpu_range.length() };
return { confirmed_range.start - cpu_range.start, confirmed_range.length() };
}
inline bool matches(const address_range& range) const
{
return cpu_range.valid() && cpu_range == range;
}
inline utils::protection get_protection() const
{
return protection;
}
inline address_range get_min_max(const address_range& current_min_max, section_bounds bounds) const
{
return get_bounds(bounds).get_min_max(current_min_max);
}
/**
* Super Pointer
*/
template <typename T = void>
inline T* get_ptr(u32 address) const
{
return reinterpret_cast<T*>(vm::g_sudo_addr + address);
}
};
/**
* Cached Texture Section
*/
template <typename derived_type, typename traits>
class cached_texture_section : public rsx::buffered_section, public rsx::ref_counted
{
public:
using ranged_storage_type = ranged_storage<traits>;
using ranged_storage_block_type = ranged_storage_block<ranged_storage_type>;
using texture_cache_type = typename traits::texture_cache_base_type;
using predictor_type = texture_cache_predictor<traits>;
using predictor_key_type = typename predictor_type::key_type;
using predictor_entry_type = typename predictor_type::mapped_type;
protected:
ranged_storage_type *m_storage = nullptr;
ranged_storage_block_type *m_block = nullptr;
texture_cache_type *m_tex_cache = nullptr;
private:
constexpr derived_type* derived()
{
return static_cast<derived_type*>(this);
}
constexpr const derived_type* derived() const
{
return static_cast<const derived_type*>(this);
}
bool dirty = true;
bool triggered_exists_callbacks = false;
bool triggered_unreleased_callbacks = false;
protected:
u16 width;
u16 height;
u16 depth;
u16 mipmaps;
u32 real_pitch;
u32 rsx_pitch;
u32 gcm_format = 0;
bool pack_unpack_swap_bytes = false;
bool swizzled = false;
u64 sync_timestamp = 0;
bool synchronized = false;
bool flushed = false;
bool speculatively_flushed = false;
rsx::memory_read_flags readback_behaviour = rsx::memory_read_flags::flush_once;
rsx::component_order view_flags = rsx::component_order::default_;
rsx::texture_upload_context context = rsx::texture_upload_context::shader_read;
rsx::texture_dimension_extended image_type = rsx::texture_dimension_extended::texture_dimension_2d;
address_range_vector flush_exclusions; // Address ranges that will be skipped during flush
predictor_type *m_predictor = nullptr;
usz m_predictor_key_hash = 0;
predictor_entry_type *m_predictor_entry = nullptr;
public:
u64 cache_tag = 0;
u64 last_write_tag = 0;
~cached_texture_section()
{
AUDIT(!exists());
}
cached_texture_section() = default;
cached_texture_section(ranged_storage_block_type *block)
{
initialize(block);
}
void initialize(ranged_storage_block_type *block)
{
ensure(m_block == nullptr && m_tex_cache == nullptr && m_storage == nullptr);
m_block = block;
m_storage = &block->get_storage();
m_tex_cache = &block->get_texture_cache();
m_predictor = &m_tex_cache->get_predictor();
update_unreleased();
}
/**
* Reset
*/
void reset(const address_range &memory_range)
{
AUDIT(memory_range.valid());
AUDIT(!is_locked());
// Destroy if necessary
destroy();
// Superclass
rsx::buffered_section::reset(memory_range);
// Reset member variables to the default
width = 0;
height = 0;
depth = 0;
mipmaps = 0;
real_pitch = 0;
rsx_pitch = 0;
gcm_format = 0;
pack_unpack_swap_bytes = false;
swizzled = false;
sync_timestamp = 0ull;
synchronized = false;
flushed = false;
speculatively_flushed = false;
cache_tag = 0ull;
last_write_tag = 0ull;
m_predictor_entry = nullptr;
readback_behaviour = rsx::memory_read_flags::flush_once;
view_flags = rsx::component_order::default_;
context = rsx::texture_upload_context::shader_read;
image_type = rsx::texture_dimension_extended::texture_dimension_2d;
flush_exclusions.clear();
// Set to dirty
set_dirty(true);
// Notify that our CPU range is now valid
notify_range_valid();
}
void create_dma_only(u16 width, u16 height, u32 pitch)
{
this->width = width;
this->height = height;
this->rsx_pitch = pitch;
set_context(rsx::texture_upload_context::dma);
}
/**
* Destroyed Flag
*/
inline bool is_destroyed() const { return !exists(); } // this section is currently destroyed
protected:
void on_section_resources_created()
{
AUDIT(exists());
AUDIT(valid_range());
if (triggered_exists_callbacks) return;
triggered_exists_callbacks = true;
// Callbacks
m_block->on_section_resources_created(*derived());
m_storage->on_section_resources_created(*derived());
}
void on_section_resources_destroyed()
{
if (!triggered_exists_callbacks) return;
triggered_exists_callbacks = false;
AUDIT(valid_range());
ensure(!is_locked());
ensure(is_managed());
// Set dirty
set_dirty(true);
// Trigger callbacks
m_block->on_section_resources_destroyed(*derived());
m_storage->on_section_resources_destroyed(*derived());
// Invalidate range
invalidate_range();
}
virtual void dma_abort()
{}
public:
/**
* Dirty/Unreleased Flag
*/
inline bool is_dirty() const { return dirty; } // this section is dirty and will need to be reuploaded
void set_dirty(bool new_dirty)
{
if (new_dirty == false && !is_locked() && context == texture_upload_context::shader_read)
return;
dirty = new_dirty;
AUDIT(dirty || exists());
update_unreleased();
}
private:
void update_unreleased()
{
bool unreleased = is_unreleased();
if (unreleased && !triggered_unreleased_callbacks)
{
triggered_unreleased_callbacks = true;
m_block->on_section_unreleased(*derived());
m_storage->on_section_unreleased(*derived());
}
else if (!unreleased && triggered_unreleased_callbacks)
{
triggered_unreleased_callbacks = false;
m_block->on_section_released(*derived());
m_storage->on_section_released(*derived());
}
}
/**
* Valid Range
*/
void notify_range_valid()
{
AUDIT(valid_range());
// Callbacks
m_block->on_section_range_valid(*derived());
//m_storage->on_section_range_valid(*derived());
// Reset texture_cache m_flush_always_cache
if (readback_behaviour == memory_read_flags::flush_always)
{
m_tex_cache->on_memory_read_flags_changed(*derived(), memory_read_flags::flush_always);
}
}
void invalidate_range()
{
if (!valid_range())
return;
// Reset texture_cache m_flush_always_cache
if (readback_behaviour == memory_read_flags::flush_always)
{
m_tex_cache->on_memory_read_flags_changed(*derived(), memory_read_flags::flush_once);
}
// Notify the storage block that we are now invalid
m_block->on_section_range_invalid(*derived());
//m_storage->on_section_range_invalid(*derived());
m_predictor_entry = nullptr;
speculatively_flushed = false;
buffered_section::invalidate_range();
}
public:
/**
* Misc.
*/
bool is_unreleased() const
{
return exists() && is_dirty() && !is_locked();
}
bool can_be_reused() const
{
if (has_refs()) [[unlikely]]
{
return false;
}
return !exists() || (is_dirty() && !is_locked());
}
bool is_flushable() const
{
//This section is active and can be flushed to cpu
return (get_protection() == utils::protection::no);
}
bool is_synchronized() const
{
return synchronized;
}
bool is_flushed() const
{
return flushed;
}
bool should_flush() const
{
if (context == rsx::texture_upload_context::framebuffer_storage)
{
const auto surface = derived()->get_render_target();
return surface->has_flushable_data();
}
return true;
}
private:
/**
* Protection
*/
void post_protect(utils::protection old_prot, utils::protection prot)
{
if (old_prot != utils::protection::rw && prot == utils::protection::rw)
{
AUDIT(!is_locked());
m_block->on_section_unprotected(*derived());
// Blit and framebuffers may be unprotected and clean
if (context == texture_upload_context::shader_read)
{
set_dirty(true);
}
}
else if (old_prot == utils::protection::rw && prot != utils::protection::rw)
{
AUDIT(is_locked());
m_block->on_section_protected(*derived());
set_dirty(false);
}
if (context == rsx::texture_upload_context::framebuffer_storage && !Emu.IsStopped())
{
// Lock, unlock
auto surface = derived()->get_render_target();
if (prot == utils::protection::no && old_prot != utils::protection::no)
{
// Locked memory. We have to take ownership of the object in the surface cache as well
surface->on_lock();
}
else if (old_prot == utils::protection::no && prot != utils::protection::no)
{
// Release the surface, the cache can remove it if needed
ensure(prot == utils::protection::rw);
surface->on_unlock();
}
}
}
public:
inline void protect(utils::protection prot)
{
utils::protection old_prot = get_protection();
rsx::buffered_section::protect(prot);
post_protect(old_prot, prot);
}
inline void protect(utils::protection prot, const std::pair<u32, u32>& range_confirm)
{
utils::protection old_prot = get_protection();
rsx::buffered_section::protect(prot, range_confirm);
post_protect(old_prot, prot);
}
inline void unprotect()
{
utils::protection old_prot = get_protection();
rsx::buffered_section::unprotect();
post_protect(old_prot, utils::protection::rw);
}
inline void discard(bool set_dirty = true)
{
utils::protection old_prot = get_protection();
rsx::buffered_section::discard();
post_protect(old_prot, utils::protection::rw);
if (set_dirty)
{
this->set_dirty(true);
}
}
void reprotect(const utils::protection prot)
{
if (synchronized && !flushed)
{
// Abort enqueued transfer
dma_abort();
}
//Reset properties and protect again
flushed = false;
synchronized = false;
sync_timestamp = 0ull;
protect(prot);
}
void reprotect(const utils::protection prot, const std::pair<u32, u32>& range)
{
if (synchronized && !flushed)
{
// Abort enqueued transfer
dma_abort();
}
//Reset properties and protect again
flushed = false;
synchronized = false;
sync_timestamp = 0ull;
protect(prot, range);
}
/**
* Prediction
*/
bool tracked_by_predictor() const
{
// We do not update the predictor statistics for flush_always sections
return get_context() != texture_upload_context::shader_read && get_memory_read_flags() != memory_read_flags::flush_always;
}
void on_flush()
{
speculatively_flushed = false;
m_tex_cache->on_flush();
if (tracked_by_predictor())
{
get_predictor_entry().on_flush();
}
flush_exclusions.clear();
if (context == rsx::texture_upload_context::framebuffer_storage)
{
derived()->get_render_target()->sync_tag();
}
}
void on_speculative_flush()
{
speculatively_flushed = true;
m_tex_cache->on_speculative_flush();
}
void on_miss()
{
rsx_log.warning("Cache miss at address 0x%X. This is gonna hurt...", get_section_base());
m_tex_cache->on_miss(*derived());
}
void touch(u64 tag)
{
last_write_tag = tag;
if (tracked_by_predictor())
{
get_predictor_entry().on_write(speculatively_flushed);
}
if (speculatively_flushed)
{
m_tex_cache->on_misprediction();
}
flush_exclusions.clear();
}
bool sync_protection()
{
if (!buffered_section::sync())
{
discard(true);
ensure(is_dirty());
return false;
}
return true;
}
/**
* Flush
*/
private:
void imp_flush_memcpy(u32 vm_dst, u8* src, u32 len) const
{
u8 *dst = get_ptr<u8>(vm_dst);
address_range copy_range = address_range::start_length(vm_dst, len);
if (flush_exclusions.empty() || !copy_range.overlaps(flush_exclusions))
{
// Normal case = no flush exclusions, or no overlap
memcpy(dst, src, len);
return;
}
else if (copy_range.inside(flush_exclusions))
{
// Nothing to copy
return;
}
// Otherwise, we need to filter the memcpy with our flush exclusions
// Should be relatively rare
address_range_vector vec;
vec.merge(copy_range);
vec.exclude(flush_exclusions);
for (const auto& rng : vec)
{
if (!rng.valid())
continue;
AUDIT(rng.inside(copy_range));
u32 offset = rng.start - vm_dst;
memcpy(dst + offset, src + offset, rng.length());
}
}
virtual void imp_flush()
{
AUDIT(synchronized);
ensure(real_pitch > 0);
// Calculate valid range
const auto valid_range = get_confirmed_range();
AUDIT(valid_range.valid());
const auto valid_length = valid_range.length();
const auto valid_offset = valid_range.start - get_section_base();
AUDIT(valid_length > 0);
// In case of pitch mismatch, match the offset point to the correct point
u32 mapped_offset, mapped_length;
if (real_pitch != rsx_pitch)
{
if (!valid_offset) [[likely]]
{
mapped_offset = 0;
}
else
{
const u32 offset_in_x = valid_offset % rsx_pitch;
const u32 offset_in_y = valid_offset / rsx_pitch;
mapped_offset = (offset_in_y * real_pitch) + offset_in_x;
}
const u32 available_vmem = (get_section_size() / rsx_pitch) * real_pitch + std::min<u32>(get_section_size() % rsx_pitch, real_pitch);
mapped_length = std::min(available_vmem - mapped_offset, valid_length);
}
else
{
mapped_offset = valid_offset;
mapped_length = valid_length;
}
// Obtain pointers to the source and destination memory regions
u8 *src = static_cast<u8*>(derived()->map_synchronized(mapped_offset, mapped_length));
u32 dst = valid_range.start;
ensure(src != nullptr);
// Copy from src to dst
if (real_pitch >= rsx_pitch || valid_length <= rsx_pitch)
{
imp_flush_memcpy(dst, src, valid_length);
}
else
{
u8 *_src = src;
u32 _dst = dst;
const auto num_exclusions = flush_exclusions.size();
if (num_exclusions > 0)
{
rsx_log.warning("Slow imp_flush path triggered with non-empty flush_exclusions (%d exclusions, %d bytes), performance might suffer", num_exclusions, valid_length);
}
for (s32 remaining = s32(valid_length); remaining > 0; remaining -= rsx_pitch)
{
imp_flush_memcpy(_dst, _src, real_pitch);
_src += real_pitch;
_dst += rsx_pitch;
}
}
}
public:
// Returns false if there was a cache miss
void flush()
{
if (flushed) return;
// Sanity checks
ensure(exists());
AUDIT(is_locked());
auto cleanup_flush = [&]()
{
flushed = true;
flush_exclusions.clear();
on_flush();
};
// If we are fully inside the flush exclusions regions, we just mark ourselves as flushed and return
// We apply the same skip if there is nothing new in the surface data
if (!should_flush() || get_confirmed_range().inside(flush_exclusions))
{
cleanup_flush();
return;
}
// NOTE: Hard faults should have been pre-processed beforehand
ensure(synchronized);
// Copy flush result to guest memory
imp_flush();
// Finish up
// Its highly likely that this surface will be reused, so we just leave resources in place
derived()->finish_flush();
cleanup_flush();
}
void add_flush_exclusion(const address_range& rng)
{
AUDIT(is_locked() && is_flushable());
const auto _rng = rng.get_intersect(get_section_range());
flush_exclusions.merge(_rng);
}
/**
* Misc
*/
public:
predictor_entry_type& get_predictor_entry()
{
// If we don't have a predictor entry, or the key has changed
if (m_predictor_entry == nullptr || !m_predictor_entry->key_matches(*derived()))
{
m_predictor_entry = &((*m_predictor)[*derived()]);
}
return *m_predictor_entry;
}
void set_view_flags(rsx::component_order flags)
{
view_flags = flags;
}
void set_context(rsx::texture_upload_context upload_context)
{
AUDIT(!exists() || !is_locked() || context == upload_context);
context = upload_context;
}
void set_image_type(rsx::texture_dimension_extended type)
{
image_type = type;
}
void set_gcm_format(u32 format)
{
gcm_format = format;
}
void set_swizzled(bool is_swizzled)
{
swizzled = is_swizzled;
}
void set_memory_read_flags(memory_read_flags flags, bool notify_texture_cache = true)
{
const bool changed = (flags != readback_behaviour);
readback_behaviour = flags;
if (notify_texture_cache && changed && valid_range())
{
m_tex_cache->on_memory_read_flags_changed(*derived(), flags);
}
}
u16 get_width() const
{
return width;
}
u16 get_height() const
{
return height;
}
u16 get_depth() const
{
return depth;
}
u16 get_mipmaps() const
{
return mipmaps;
}
u32 get_rsx_pitch() const
{
return rsx_pitch;
}
rsx::component_order get_view_flags() const
{
return view_flags;
}
rsx::texture_upload_context get_context() const
{
return context;
}
rsx::section_bounds get_overlap_test_bounds() const
{
return rsx::section_bounds::locked_range;
}
rsx::texture_dimension_extended get_image_type() const
{
return image_type;
}
u32 get_gcm_format() const
{
return gcm_format;
}
bool is_swizzled() const
{
return swizzled;
}
memory_read_flags get_memory_read_flags() const
{
return readback_behaviour;
}
u64 get_sync_timestamp() const
{
return sync_timestamp;
}
rsx::format_class get_format_class() const
{
return classify_format(gcm_format);
}
/**
* Comparison
*/
inline bool matches(const address_range &memory_range) const
{
return valid_range() && rsx::buffered_section::matches(memory_range);
}
bool matches(u32 format, u32 width, u32 height, u32 depth, u32 mipmaps) const
{
if (!valid_range())
return false;
if (format && gcm_format != format)
return false;
if (!width && !height && !depth && !mipmaps)
return true;
if (width && width != this->width)
return false;
if (height && height != this->height)
return false;
if (depth && depth != this->depth)
return false;
if (mipmaps && mipmaps > this->mipmaps)
return false;
return true;
}
bool matches(u32 rsx_address, u32 format, u32 width, u32 height, u32 depth, u32 mipmaps) const
{
if (!valid_range())
return false;
if (rsx_address != get_section_base())
return false;
return matches(format, width, height, depth, mipmaps);
}
bool matches(const address_range& memory_range, u32 format, u32 width, u32 height, u32 depth, u32 mipmaps) const
{
if (!valid_range())
return false;
if (!rsx::buffered_section::matches(memory_range))
return false;
return matches(format, width, height, depth, mipmaps);
}
/**
* Derived wrappers
*/
void destroy()
{
derived()->destroy();
}
bool is_managed() const
{
return derived()->is_managed();
}
bool exists() const
{
if (derived()->exists())
{
return true;
}
else
{
return (context == rsx::texture_upload_context::dma && is_locked());
}
}
};
} // namespace rsx
| 44,878
|
C++
|
.h
| 1,543
| 25.068697
| 201
| 0.67252
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
6,013
|
bitfield.hpp
|
RPCS3_rpcs3/rpcs3/Emu/RSX/Common/bitfield.hpp
|
#pragma once
#include <util/types.hpp>
#include <bitset>
namespace rsx
{
template <int N>
void unpack_bitset(const std::bitset<N>& block, u64* values)
{
for (int bit = 0, n = -1, shift = 0; bit < N; ++bit, ++shift)
{
if ((bit % 64) == 0)
{
values[++n] = 0;
shift = 0;
}
if (block[bit])
{
values[n] |= (1ull << shift);
}
}
}
template <int N>
void pack_bitset(std::bitset<N>& block, u64* values)
{
for (int n = 0, shift = 0; shift < N; ++n, shift += 64)
{
std::bitset<N> tmp = values[n];
tmp <<= shift;
block |= tmp;
}
}
template <typename T, typename bitmask_type = u32>
requires std::is_integral_v<bitmask_type>&& std::is_enum_v<T>
class atomic_bitmask_t
{
private:
atomic_t<bitmask_type> m_data{ 0 };
public:
atomic_bitmask_t() = default;
T load() const
{
return static_cast<T>(m_data.load());
}
void store(T value)
{
m_data.store(static_cast<bitmask_type>(value));
}
bool operator & (T mask) const
{
return ((m_data.load() & static_cast<bitmask_type>(mask)) != 0);
}
T operator | (T mask) const
{
return static_cast<T>(m_data.load() | static_cast<bitmask_type>(mask));
}
void operator &= (T mask)
{
m_data.fetch_and(static_cast<bitmask_type>(mask));
}
void operator |= (T mask)
{
m_data.fetch_or(static_cast<bitmask_type>(mask));
}
bool test_and_set(T mask)
{
const auto old = m_data.fetch_or(static_cast<bitmask_type>(mask));
return (old & static_cast<bitmask_type>(mask)) != 0;
}
auto clear(T mask)
{
bitmask_type clear_mask = ~(static_cast<bitmask_type>(mask));
return m_data.and_fetch(clear_mask);
}
void clear()
{
m_data.release(0);
}
operator bool () const
{
return m_data.observe() != 0;
}
};
template <typename T, typename bitmask_type = u32>
requires std::is_integral_v<bitmask_type> && std::is_enum_v<T>
class bitmask_t
{
private:
bitmask_type m_data = 0;
public:
bitmask_t() = default;
bitmask_type load() const
{
return m_data;
}
bool operator & (bitmask_type mask) const
{
return !!(m_data & mask);
}
bitmask_type operator | (bitmask_type mask) const
{
return m_data | mask;
}
void operator &= (bitmask_type mask)
{
m_data &= mask;
}
void operator |= (bitmask_type mask)
{
m_data |= mask;
}
bool test(T mask)
{
return !!(m_data & static_cast<bitmask_type>(mask));
}
void set(T mask)
{
m_data |= static_cast<bitmask_type>(mask);
}
bool test_and_set(T mask)
{
const auto old = m_data;
m_data |= static_cast<bitmask_type>(mask);
return !!(old & mask);
}
template <typename U>
requires std::is_same_v<T, U> || std::is_same_v<bitmask_type, U>
void clear(U mask)
{
const bitmask_type clear_mask = ~(static_cast<bitmask_type>(mask));
m_data &= clear_mask;
}
void clear()
{
m_data = 0;
}
operator bool() const
{
return !!m_data;
}
};
}
| 2,964
|
C++
|
.h
| 141
| 17.744681
| 74
| 0.620974
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| true
| false
| false
|
6,014
|
io_buffer.h
|
RPCS3_rpcs3/rpcs3/Emu/RSX/Common/io_buffer.h
|
#pragma once
#include <util/types.hpp>
#include <util/bless.hpp>
#include <span>
#include <vector>
#include <functional>
namespace rsx
{
template <typename T>
concept SpanLike = requires(T t)
{
{ t.data() } -> std::convertible_to<void*>;
{ t.size_bytes() } -> std::convertible_to<usz>;
};
template <typename T>
concept Integral = std::is_integral_v<T> || std::is_same_v<T, u128>;
class io_buffer
{
mutable void* m_ptr = nullptr;
mutable usz m_size = 0;
std::function<std::tuple<void*, usz>(usz)> m_allocator{};
mutable usz m_allocation_size = 0u;
public:
io_buffer() = default;
template <SpanLike T>
io_buffer(const T& container)
{
m_ptr = const_cast<void*>(reinterpret_cast<const void*>(container.data()));
m_size = container.size_bytes();
}
io_buffer(std::function<std::tuple<void*, usz>(usz)> allocator)
{
ensure(allocator);
m_allocator = allocator;
}
template <Integral T>
io_buffer(void* ptr, T size)
: m_ptr(ptr), m_size(size)
{}
template <Integral T>
io_buffer(const void* ptr, T size)
: m_ptr(const_cast<void*>(ptr)), m_size(size)
{}
void reserve(usz size) const
{
m_allocation_size = size;
}
std::pair<void*, usz> raw() const
{
return { m_ptr, m_size };
}
template <Integral T = u8>
T* data() const
{
if (!m_ptr && m_allocator)
{
std::tie(m_ptr, m_size) = m_allocator(m_allocation_size);
}
return static_cast<T*>(m_ptr);
}
usz size() const
{
return m_size;
}
template<typename T>
std::span<T> as_span() const
{
auto bytes = data();
return { utils::bless<T>(bytes), m_size / sizeof(T) };
}
bool empty() const
{
return m_size == 0;
}
};
}
| 1,796
|
C++
|
.h
| 76
| 19.342105
| 79
| 0.613152
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
6,015
|
surface_cache_dma.hpp
|
RPCS3_rpcs3/rpcs3/Emu/RSX/Common/surface_cache_dma.hpp
|
#pragma once
#include <util/types.hpp>
#include "Utilities/address_range.h"
namespace rsx
{
template <typename Traits, int BlockSize>
class surface_cache_dma
{
protected:
static inline u32 block_for(u32 address)
{
return address / BlockSize;
}
static inline u32 block_address(u32 block_id)
{
return block_id * BlockSize;
}
using buffer_object_storage_type = typename Traits::buffer_object_storage_type;
using buffer_object_type = typename Traits::buffer_object_type;
using command_list_type = typename Traits::command_list_type;
struct memory_buffer_entry_t
{
u32 id;
buffer_object_storage_type bo;
u64 memory_tag = 0;
u32 base_address = 0;
inline buffer_object_type get() { return Traits::get(bo); }
inline operator bool () const { return base_address != 0; }
inline void release() { bo.release(); }
inline void acquire(buffer_object_type b) { bo = b; }
};
using buffer_block_array = typename std::array<memory_buffer_entry_t, 0x100000000ull / BlockSize>;
buffer_block_array m_buffer_list;
public:
surface_cache_dma()
{
for (u32 i = 0; i < ::size32(m_buffer_list); ++i)
{
m_buffer_list[i].id = i;
}
}
surface_cache_dma& with_range(command_list_type cmd, const utils::address_range& range)
{
// Prepare underlying memory so that the range specified is provisioned and contiguous
// 1. Check if we have a pre-existing bo layer
const auto& this_entry = m_buffer_list[block_for(range.start)];
if (this_entry)
{
const auto bo = this_entry.get();
const auto buffer_range = utils::address_range::start_length(bo.base_address, ::size32(*bo));
if (range.inside(buffer_range))
{
// All is well
return *this;
}
}
// Data does not exist or is not contiguous. Merge the layer
std::vector<buffer_object_type> bo_list;
const auto start_address = this_entry ? this_entry.base_address : block_address(this_entry.id);
for (u32 address = start_address; address <= range.end;)
{
auto& bo_storage = m_buffer_list[block_for(address)];
bo_storage.base_address = start_address;
if (auto bo = bo_storage.get())
{
bo_list.push_back(bo);
bo_storage.release();
address += ::size32(*bo);
continue;
}
bo_list.push_back(nullptr);
address += BlockSize;
}
auto unified = Traits::template merge_bo_list<BlockSize>(cmd, bo_list);
ensure(unified);
m_buffer_list[block_for(start_address)].acquire(unified);
return *this;
}
utils::address_range to_block_range(const utils::address_range& range)
{
u32 start = block_address(block_for(range.start));
u32 end = block_address(block_for(range.end + BlockSize - 1));
return utils::address_range::start_end(start, end - 1);
}
std::tuple<buffer_object_type, u32, u64> get(u32 address)
{
const auto& block = m_buffer_list[block_for(address)];
return { block.get(), block.base_address - address };
}
void touch(const utils::address_range& range)
{
const u64 stamp = rsx::get_shared_tag();
for (usz i = block_for(range.start); i <= block_for(range.end); i++)
{
m_buffer_list[i].memory_tag = stamp;
}
}
};
}
| 3,193
|
C++
|
.h
| 99
| 28.383838
| 100
| 0.683474
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
6,016
|
unordered_map.hpp
|
RPCS3_rpcs3/rpcs3/Emu/RSX/Common/unordered_map.hpp
|
#pragma once
#ifdef RSX_USE_STD_MAP
#include <unordered_map>
namespace rsx
{
template<typename T, typename U>
using unordered_map = std::unordered_map<T, U>;
}
#else
#include "3rdparty/robin_hood/include/robin_hood.h"
namespace rsx
{
template<typename T, typename U>
using unordered_map = ::robin_hood::unordered_map<T, U>;
}
#endif
| 359
|
C++
|
.h
| 16
| 19.8125
| 58
| 0.72997
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
6,017
|
texture_cache_checker.h
|
RPCS3_rpcs3/rpcs3/Emu/RSX/Common/texture_cache_checker.h
|
#pragma once
#include "../rsx_utils.h"
#ifdef TEXTURE_CACHE_DEBUG
namespace rsx {
class tex_cache_checker_t {
struct per_page_info_t {
u8 prot = 0;
u8 no = 0;
u8 ro = 0;
FORCE_INLINE utils::protection get_protection() const
{
return static_cast<utils::protection>(prot);
}
FORCE_INLINE void set_protection(utils::protection prot)
{
this->prot = static_cast<u8>(prot);
}
FORCE_INLINE void reset_refcount()
{
no = 0;
ro = 0;
}
FORCE_INLINE u16 sum() const
{
return u16{ no } + ro;
}
FORCE_INLINE bool verify() const
{
const utils::protection prot = get_protection();
switch (prot)
{
case utils::protection::no: return no > 0;
case utils::protection::ro: return no == 0 && ro > 0;
case utils::protection::rw: return no == 0 && ro == 0;
default: fmt::throw_exception("Unreachable");
}
}
FORCE_INLINE void add(utils::protection prot)
{
switch (prot)
{
case utils::protection::no: if (no++ == umax) fmt::throw_exception("add(protection::no) overflow"); return;
case utils::protection::ro: if (ro++ == umax) fmt::throw_exception("add(protection::ro) overflow"); return;
default: fmt::throw_exception("Unreachable");
}
}
FORCE_INLINE void remove(utils::protection prot)
{
switch (prot)
{
case utils::protection::no: if (no-- == 0) fmt::throw_exception("remove(protection::no) overflow with NO==0"); return;
case utils::protection::ro: if (ro-- == 0) fmt::throw_exception("remove(protection::ro) overflow with RO==0"); return;
default: fmt::throw_exception("Unreachable");
}
}
};
static_assert(sizeof(per_page_info_t) <= 4, "page_info_elmnt must be less than 4-bytes in size");
// 4GB memory space / 4096 bytes per page = 1048576 pages
// Initialized to utils::protection::rw
static constexpr usz num_pages = 0x1'0000'0000 / 4096;
per_page_info_t _info[num_pages]{0};
static_assert(static_cast<u32>(utils::protection::rw) == 0, "utils::protection::rw must have value 0 for the above constructor to work");
static constexpr usz rsx_address_to_index(u32 address)
{
return (address / 4096);
}
static constexpr u32 index_to_rsx_address(usz idx)
{
return static_cast<u32>(idx * 4096);
}
constexpr per_page_info_t* rsx_address_to_info_pointer(u32 address)
{
return &(_info[rsx_address_to_index(address)]);
}
constexpr const per_page_info_t* rsx_address_to_info_pointer(u32 address) const
{
return &(_info[rsx_address_to_index(address)]);
}
constexpr u32 info_pointer_to_address(const per_page_info_t* ptr) const
{
return index_to_rsx_address(static_cast<usz>(ptr - _info));
}
std::string prot_to_str(utils::protection prot) const
{
switch (prot)
{
case utils::protection::no: return "NA";
case utils::protection::ro: return "RO";
case utils::protection::rw: return "RW";
default: fmt::throw_exception("Unreachable");
}
}
public:
void set_protection(const address_range& range, utils::protection prot)
{
AUDIT(range.is_page_range());
AUDIT(prot == utils::protection::no || prot == utils::protection::ro || prot == utils::protection::rw);
for (per_page_info_t* ptr = rsx_address_to_info_pointer(range.start); ptr <= rsx_address_to_info_pointer(range.end); ptr++)
{
ptr->set_protection(prot);
}
}
void discard(const address_range& range)
{
set_protection(range, utils::protection::rw);
}
void reset_refcount()
{
for (per_page_info_t* ptr = rsx_address_to_info_pointer(0); ptr <= rsx_address_to_info_pointer(0xFFFFFFFF); ptr++)
{
ptr->reset_refcount();
}
}
void add(const address_range& range, utils::protection prot)
{
AUDIT(range.is_page_range());
AUDIT(prot == utils::protection::no || prot == utils::protection::ro);
for (per_page_info_t* ptr = rsx_address_to_info_pointer(range.start); ptr <= rsx_address_to_info_pointer(range.end); ptr++)
{
ptr->add(prot);
}
}
void remove(const address_range& range, utils::protection prot)
{
AUDIT(range.is_page_range());
AUDIT(prot == utils::protection::no || prot == utils::protection::ro);
for (per_page_info_t* ptr = rsx_address_to_info_pointer(range.start); ptr <= rsx_address_to_info_pointer(range.end); ptr++)
{
ptr->remove(prot);
}
}
// Returns the a lower bound as to how many locked sections are known to be within the given range with each protection {NA,RO}
// The assumption here is that the page in the given range with the largest number of refcounted sections represents the lower bound to how many there must be
std::pair<u8,u8> get_minimum_number_of_sections(const address_range& range) const
{
AUDIT(range.is_page_range());
u8 no = 0;
u8 ro = 0;
for (const per_page_info_t* ptr = rsx_address_to_info_pointer(range.start); ptr <= rsx_address_to_info_pointer(range.end); ptr++)
{
no = std::max(no, ptr->no);
ro = std::max(ro, ptr->ro);
}
return { no,ro };
}
void check_unprotected(const address_range& range, bool allow_ro = false, bool must_be_empty = true) const
{
AUDIT(range.is_page_range());
for (const per_page_info_t* ptr = rsx_address_to_info_pointer(range.start); ptr <= rsx_address_to_info_pointer(range.end); ptr++)
{
const auto prot = ptr->get_protection();
if (prot != utils::protection::rw && (!allow_ro || prot != utils::protection::ro))
{
const u32 addr = info_pointer_to_address(ptr);
fmt::throw_exception("Page at addr=0x%8x should be RW%s: Prot=%s, RO=%d, NA=%d", addr, allow_ro ? " or RO" : "", prot_to_str(prot), ptr->ro, ptr->no);
}
if (must_be_empty && (
ptr->no > 0 ||
(!allow_ro && ptr->ro > 0)
))
{
const u32 addr = info_pointer_to_address(ptr);
fmt::throw_exception("Page at addr=0x%8x should not have any NA%s sections: Prot=%s, RO=%d, NA=%d", addr, allow_ro ? " or RO" : "", prot_to_str(prot), ptr->ro, ptr->no);
}
}
}
void verify() const
{
for (usz idx = 0; idx < num_pages; idx++)
{
auto &elmnt = _info[idx];
if (!elmnt.verify())
{
const u32 addr = index_to_rsx_address(idx);
const utils::protection prot = elmnt.get_protection();
fmt::throw_exception("Protection verification failed at addr=0x%x: Prot=%s, RO=%d, NA=%d", addr, prot_to_str(prot), elmnt.ro, elmnt.no);
}
}
}
};
extern tex_cache_checker_t tex_cache_checker;
}; // namespace rsx
#endif //TEXTURE_CACHE_DEBUG
| 6,510
|
C++
|
.h
| 183
| 31.469945
| 174
| 0.657134
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
6,018
|
tiled_dma_copy.hpp
|
RPCS3_rpcs3/rpcs3/Emu/RSX/Common/tiled_dma_copy.hpp
|
#pragma once
#include <util/types.hpp>
#include <cstdint>
// Set this to 1 to force all decoding to be done on the CPU.
#define DEBUG_DMA_TILING 0
// This is a 1:1 port of the GPU code for my own sanity when debugging misplaced bits
// For a high-level explanation, read https://envytools.readthedocs.io/en/latest/hw/memory/vram.html
namespace rsx
{
struct detiler_config
{
uint32_t prime;
uint32_t factor;
uint32_t num_tiles_per_row;
uint32_t tile_base_address;
uint32_t tile_size;
uint32_t tile_address_offset;
uint32_t tile_rw_offset;
uint32_t tile_pitch;
uint32_t tile_bank;
uint32_t image_width;
uint32_t image_height;
uint32_t image_pitch;
uint32_t image_bpp;
};
#define RSX_TILE_WIDTH 256
#define RSX_TILE_HEIGHT 64
#define RSX_DMA_OP_ENCODE_TILE 0
#define RSX_DMA_OP_DECODE_TILE 1
static inline void tiled_dma_copy(const uint32_t row, const uint32_t col, const detiler_config& conf, char* tiled_data, char* linear_data, int direction)
{
const uint32_t row_offset = (row * conf.tile_pitch) + conf.tile_base_address + conf.tile_address_offset;
const uint32_t this_address = row_offset + (col * conf.image_bpp);
// 1. Calculate row_addr
const uint32_t texel_offset = (this_address - conf.tile_base_address) / RSX_TILE_WIDTH;
// Calculate coordinate of the tile grid we're supposed to be in
const uint32_t tile_x = texel_offset % conf.num_tiles_per_row;
const uint32_t tile_y = (texel_offset / conf.num_tiles_per_row) / RSX_TILE_HEIGHT;
// Calculate the grid offset for the tile selected and add the base offset. It's supposed to affect the bank stuff in the next step
const uint32_t tile_id = tile_y * conf.num_tiles_per_row + tile_x;
const uint32_t tile_selector = (tile_id + (conf.tile_base_address >> 14)) & 0x3ffff;
// Calculate row address
const uint32_t row_address = (tile_selector >> 2) & 0xffff;
// 2. Calculate bank selector
// There's a lot of weird math here, but it's just a variant of (tile_selector % 4) to pick a value between [0..3]
uint32_t bank_selector = 0;
const uint32_t bank_distribution_lookup[16] = { 0, 1, 2, 3, 2, 3, 0, 1, 1, 2, 3, 0, 3, 0, 1, 2 };
if (conf.factor == 1)
{
bank_selector = (tile_selector & 3);
}
else if (conf.factor == 2)
{
const uint32_t idx = ((tile_selector + ((tile_y & 1) << 1)) & 3) * 4 + (tile_y & 3);
bank_selector = bank_distribution_lookup[idx];
}
else if (conf.factor >= 4)
{
const uint32_t idx = (tile_selector & 3) * 4 + (tile_y & 3);
bank_selector = bank_distribution_lookup[idx];
}
bank_selector = (bank_selector + conf.tile_bank) & 3;
// 3. Calculate column selector
uint32_t column_selector = 0;
const uint32_t line_offset_in_tile = (texel_offset / conf.num_tiles_per_row) % RSX_TILE_HEIGHT;
// Calculate column_selector by bit-twiddling line offset and the other calculated parameter bits:
// column_selector[9:7] = line_offset_in_tile[5:3]
// column_selector[6:4] = this_address[7:5]
// column_selector[3:2] = line_offset_in_tile[1:0]
// column_selector[1:0] = 0
column_selector |= ((line_offset_in_tile >> 3) & 0x7) << 7;
column_selector |= ((this_address >> 5) & 0x7) << 4;
column_selector |= ((line_offset_in_tile >> 0) & 0x3) << 2;
// 4. Calculate partition selector (0 or 1)
const uint32_t partition_selector = (((line_offset_in_tile >> 2) & 1) + ((this_address >> 6) & 1)) & 1;
// 5. Build tiled address
uint32_t tile_address = 0;
// tile_address[31:16] = row_adr[15:0]
// tile_address[15:14] = bank_sel[1:0]
// tile_address[13:8] = column_sel[9:4]
// tile_address[7:7] = partition_sel[0:0]
// tile_address[6:5] = column_sel[3:2]
// tile_address[4:0] = this_address[4:0]
tile_address |= ((row_address >> 0) & 0xFFFF) << 16;
tile_address |= ((bank_selector >> 0) & 0x3) << 14;
tile_address |= ((column_selector >> 4) & 0x3F) << 8;
tile_address |= ((partition_selector >> 0) & 0x1) << 7;
tile_address |= ((column_selector >> 2) & 0x3) << 5;
tile_address |= ((this_address >> 0) & 0x1F) << 0;
// Twiddle bits 9 and 10
tile_address ^= (((tile_address >> 12) ^ ((bank_selector ^ tile_selector) & 1) ^ (tile_address >> 14)) & 1) << 9;
tile_address ^= ((tile_address >> 11) & 1) << 10;
// Calculate relative addresses and sample
const uint32_t linear_image_offset = (row * conf.image_pitch) + (col * conf.image_bpp);
const uint32_t tile_base_offset = tile_address - conf.tile_base_address; // Distance from tile base address
const uint32_t tile_data_offset = tile_base_offset - conf.tile_rw_offset; // Distance from data base address
if (tile_base_offset >= conf.tile_size)
{
// Do not touch anything out of bounds
return;
}
if (direction == RSX_DMA_OP_ENCODE_TILE)
{
std::memcpy(tiled_data + tile_data_offset, linear_data + linear_image_offset, conf.image_bpp);
}
else
{
std::memcpy(linear_data + linear_image_offset, tiled_data + tile_data_offset, conf.image_bpp);
}
}
// Entry point. In GPU code this is handled by dispatch + main
template <typename T, bool Decode = false>
void tile_texel_data(void* dst, const void* src, uint32_t base_address, uint32_t base_offset, uint32_t tile_size, uint8_t bank_sense, uint16_t row_pitch_in_bytes, uint16_t image_width, uint16_t image_height)
{
// Some constants
auto get_prime_factor = [](uint32_t pitch) -> std::pair<uint32_t, uint32_t>
{
const uint32_t base = (pitch >> 8);
if ((pitch & (pitch - 1)) == 0)
{
return { 1u, base };
}
for (const auto prime : { 3, 5, 7, 11, 13 })
{
if ((base % prime) == 0)
{
return { prime, base / prime };
}
}
// rsx_log.error("Unexpected pitch value 0x%x", pitch);
return {};
};
const auto [prime, factor] = get_prime_factor(row_pitch_in_bytes);
const uint32_t tiles_per_row = prime * factor;
constexpr int op = Decode ? RSX_DMA_OP_DECODE_TILE : RSX_DMA_OP_ENCODE_TILE;
auto src2 = static_cast<char*>(const_cast<void*>(src));
auto dst2 = static_cast<char*>(dst);
const detiler_config dconf = {
.prime = prime,
.factor = factor,
.num_tiles_per_row = tiles_per_row,
.tile_base_address = base_address,
.tile_size = tile_size,
.tile_address_offset = base_offset,
.tile_rw_offset = base_offset,
.tile_pitch = row_pitch_in_bytes,
.tile_bank = bank_sense,
.image_width = image_width,
.image_height = image_height,
.image_pitch = row_pitch_in_bytes,
.image_bpp = sizeof(T)
};
for (u16 row = 0; row < image_height; ++row)
{
for (u16 col = 0; col < image_width; ++col)
{
if constexpr (op == RSX_DMA_OP_DECODE_TILE)
{
tiled_dma_copy(row, col, dconf, src2, dst2, op);
}
else
{
tiled_dma_copy(row, col, dconf, dst2, src2, op);
}
}
}
}
[[maybe_unused]] static auto tile_texel_data16 = tile_texel_data<u16, false>;
[[maybe_unused]] static auto tile_texel_data32 = tile_texel_data<u32, false>;
[[maybe_unused]] static auto detile_texel_data16 = tile_texel_data<u16, true>;
[[maybe_unused]] static auto detile_texel_data32 = tile_texel_data<u32, true>;
#undef RSX_TILE_WIDTH
#undef RSX_TILE_HEIGHT
#undef RSX_DMA_OP_ENCODE_TILE
#undef RSX_DMA_OP_DECODE_TILE
}
| 7,374
|
C++
|
.h
| 176
| 37.568182
| 209
| 0.651286
|
RPCS3/rpcs3
| 15,204
| 1,895
| 1,021
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.